ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
(Generate patch)

Comparing jsr166/src/jsr166e/ConcurrentHashMapV8.java (file contents):
Revision 1.13 by jsr166, Wed Aug 31 00:22:11 2011 UTC vs.
Revision 1.80 by jsr166, Sat Nov 24 03:46:28 2012 UTC

# Line 5 | Line 5
5   */
6  
7   package jsr166e;
8 < import jsr166e.LongAdder;
8 >
9 > import java.util.Comparator;
10 > import java.util.Arrays;
11   import java.util.Map;
12   import java.util.Set;
13   import java.util.Collection;
# Line 19 | Line 21 | import java.util.Enumeration;
21   import java.util.ConcurrentModificationException;
22   import java.util.NoSuchElementException;
23   import java.util.concurrent.ConcurrentMap;
24 + import java.util.concurrent.ThreadLocalRandom;
25 + import java.util.concurrent.locks.LockSupport;
26 + import java.util.concurrent.locks.AbstractQueuedSynchronizer;
27 + import java.util.concurrent.atomic.AtomicReference;
28 +
29 + import java.io.Serializable;
30 +
31 + import java.util.Comparator;
32 + import java.util.Arrays;
33 + import java.util.Map;
34 + import java.util.Set;
35 + import java.util.Collection;
36 + import java.util.AbstractMap;
37 + import java.util.AbstractSet;
38 + import java.util.AbstractCollection;
39 + import java.util.Hashtable;
40 + import java.util.HashMap;
41 + import java.util.Iterator;
42 + import java.util.Enumeration;
43 + import java.util.ConcurrentModificationException;
44 + import java.util.NoSuchElementException;
45 + import java.util.concurrent.ConcurrentMap;
46 + import java.util.concurrent.ThreadLocalRandom;
47 + import java.util.concurrent.locks.LockSupport;
48 + import java.util.concurrent.locks.AbstractQueuedSynchronizer;
49 + import java.util.concurrent.atomic.AtomicReference;
50 +
51   import java.io.Serializable;
52  
53   /**
# Line 33 | Line 62 | import java.io.Serializable;
62   * interoperable with {@code Hashtable} in programs that rely on its
63   * thread safety but not on its synchronization details.
64   *
65 < * <p> Retrieval operations (including {@code get}) generally do not
65 > * <p>Retrieval operations (including {@code get}) generally do not
66   * block, so may overlap with update operations (including {@code put}
67   * and {@code remove}). Retrievals reflect the results of the most
68   * recently <em>completed</em> update operations holding upon their
69 < * onset.  For aggregate operations such as {@code putAll} and {@code
70 < * clear}, concurrent retrievals may reflect insertion or removal of
71 < * only some entries.  Similarly, Iterators and Enumerations return
72 < * elements reflecting the state of the hash table at some point at or
73 < * since the creation of the iterator/enumeration.  They do
74 < * <em>not</em> throw {@link ConcurrentModificationException}.
75 < * However, iterators are designed to be used by only one thread at a
76 < * time.  Bear in mind that the results of aggregate status methods
77 < * including {@code size}, {@code isEmpty}, and {@code containsValue}
78 < * are typically useful only when a map is not undergoing concurrent
79 < * updates in other threads.  Otherwise the results of these methods
80 < * reflect transient states that may be adequate for monitoring
81 < * purposes, but not for program control.
82 < *
83 < * <p> Resizing this or any other kind of hash table is a relatively
84 < * slow operation, so, when possible, it is a good idea to provide
85 < * estimates of expected table sizes in constructors. Also, for
86 < * compatibility with previous versions of this class, constructors
87 < * may optionally specify an expected {@code concurrencyLevel} as an
88 < * additional hint for internal sizing.
69 > * onset. (More formally, an update operation for a given key bears a
70 > * <em>happens-before</em> relation with any (non-null) retrieval for
71 > * that key reporting the updated value.)  For aggregate operations
72 > * such as {@code putAll} and {@code clear}, concurrent retrievals may
73 > * reflect insertion or removal of only some entries.  Similarly,
74 > * Iterators and Enumerations return elements reflecting the state of
75 > * the hash table at some point at or since the creation of the
76 > * iterator/enumeration.  They do <em>not</em> throw {@link
77 > * ConcurrentModificationException}.  However, iterators are designed
78 > * to be used by only one thread at a time.  Bear in mind that the
79 > * results of aggregate status methods including {@code size}, {@code
80 > * isEmpty}, and {@code containsValue} are typically useful only when
81 > * a map is not undergoing concurrent updates in other threads.
82 > * Otherwise the results of these methods reflect transient states
83 > * that may be adequate for monitoring or estimation purposes, but not
84 > * for program control.
85 > *
86 > * <p>The table is dynamically expanded when there are too many
87 > * collisions (i.e., keys that have distinct hash codes but fall into
88 > * the same slot modulo the table size), with the expected average
89 > * effect of maintaining roughly two bins per mapping (corresponding
90 > * to a 0.75 load factor threshold for resizing). There may be much
91 > * variance around this average as mappings are added and removed, but
92 > * overall, this maintains a commonly accepted time/space tradeoff for
93 > * hash tables.  However, resizing this or any other kind of hash
94 > * table may be a relatively slow operation. When possible, it is a
95 > * good idea to provide a size estimate as an optional {@code
96 > * initialCapacity} constructor argument. An additional optional
97 > * {@code loadFactor} constructor argument provides a further means of
98 > * customizing initial table capacity by specifying the table density
99 > * to be used in calculating the amount of space to allocate for the
100 > * given number of elements.  Also, for compatibility with previous
101 > * versions of this class, constructors may optionally specify an
102 > * expected {@code concurrencyLevel} as an additional hint for
103 > * internal sizing.  Note that using many keys with exactly the same
104 > * {@code hashCode()} is a sure way to slow down performance of any
105 > * hash table.
106 > *
107 > * <p>A {@link Set} projection of a ConcurrentHashMapV8 may be created
108 > * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
109 > * (using {@link #keySet(Object)} when only keys are of interest, and the
110 > * mapped values are (perhaps transiently) not used or all take the
111 > * same mapping value.
112 > *
113 > * <p>A ConcurrentHashMapV8 can be used as scalable frequency map (a
114 > * form of histogram or multiset) by using {@link LongAdder} values
115 > * and initializing via {@link #computeIfAbsent}. For example, to add
116 > * a count to a {@code ConcurrentHashMapV8<String,LongAdder> freqs}, you
117 > * can use {@code freqs.computeIfAbsent(k -> new
118 > * LongAdder()).increment();}
119   *
120   * <p>This class and its views and iterators implement all of the
121   * <em>optional</em> methods of the {@link Map} and {@link Iterator}
122   * interfaces.
123   *
124 < * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
124 > * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
125   * does <em>not</em> allow {@code null} to be used as a key or value.
126   *
127 + * <p>ConcurrentHashMapV8s support parallel operations using the {@link
128 + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
129 + * are available in class {@link ForkJoinTasks}). These operations are
130 + * designed to be safely, and often sensibly, applied even with maps
131 + * that are being concurrently updated by other threads; for example,
132 + * when computing a snapshot summary of the values in a shared
133 + * registry.  There are three kinds of operation, each with four
134 + * forms, accepting functions with Keys, Values, Entries, and (Key,
135 + * Value) arguments and/or return values. (The first three forms are
136 + * also available via the {@link #keySet()}, {@link #values()} and
137 + * {@link #entrySet()} views). Because the elements of a
138 + * ConcurrentHashMapV8 are not ordered in any particular way, and may be
139 + * processed in different orders in different parallel executions, the
140 + * correctness of supplied functions should not depend on any
141 + * ordering, or on any other objects or values that may transiently
142 + * change while computation is in progress; and except for forEach
143 + * actions, should ideally be side-effect-free.
144 + *
145 + * <ul>
146 + * <li> forEach: Perform a given action on each element.
147 + * A variant form applies a given transformation on each element
148 + * before performing the action.</li>
149 + *
150 + * <li> search: Return the first available non-null result of
151 + * applying a given function on each element; skipping further
152 + * search when a result is found.</li>
153 + *
154 + * <li> reduce: Accumulate each element.  The supplied reduction
155 + * function cannot rely on ordering (more formally, it should be
156 + * both associative and commutative).  There are five variants:
157 + *
158 + * <ul>
159 + *
160 + * <li> Plain reductions. (There is not a form of this method for
161 + * (key, value) function arguments since there is no corresponding
162 + * return type.)</li>
163 + *
164 + * <li> Mapped reductions that accumulate the results of a given
165 + * function applied to each element.</li>
166 + *
167 + * <li> Reductions to scalar doubles, longs, and ints, using a
168 + * given basis value.</li>
169 + *
170 + * </li>
171 + * </ul>
172 + * </ul>
173 + *
174 + * <p>The concurrency properties of bulk operations follow
175 + * from those of ConcurrentHashMapV8: Any non-null result returned
176 + * from {@code get(key)} and related access methods bears a
177 + * happens-before relation with the associated insertion or
178 + * update.  The result of any bulk operation reflects the
179 + * composition of these per-element relations (but is not
180 + * necessarily atomic with respect to the map as a whole unless it
181 + * is somehow known to be quiescent).  Conversely, because keys
182 + * and values in the map are never null, null serves as a reliable
183 + * atomic indicator of the current lack of any result.  To
184 + * maintain this property, null serves as an implicit basis for
185 + * all non-scalar reduction operations. For the double, long, and
186 + * int versions, the basis should be one that, when combined with
187 + * any other value, returns that other value (more formally, it
188 + * should be the identity element for the reduction). Most common
189 + * reductions have these properties; for example, computing a sum
190 + * with basis 0 or a minimum with basis MAX_VALUE.
191 + *
192 + * <p>Search and transformation functions provided as arguments
193 + * should similarly return null to indicate the lack of any result
194 + * (in which case it is not used). In the case of mapped
195 + * reductions, this also enables transformations to serve as
196 + * filters, returning null (or, in the case of primitive
197 + * specializations, the identity basis) if the element should not
198 + * be combined. You can create compound transformations and
199 + * filterings by composing them yourself under this "null means
200 + * there is nothing there now" rule before using them in search or
201 + * reduce operations.
202 + *
203 + * <p>Methods accepting and/or returning Entry arguments maintain
204 + * key-value associations. They may be useful for example when
205 + * finding the key for the greatest value. Note that "plain" Entry
206 + * arguments can be supplied using {@code new
207 + * AbstractMap.SimpleEntry(k,v)}.
208 + *
209 + * <p>Bulk operations may complete abruptly, throwing an
210 + * exception encountered in the application of a supplied
211 + * function. Bear in mind when handling such exceptions that other
212 + * concurrently executing functions could also have thrown
213 + * exceptions, or would have done so if the first exception had
214 + * not occurred.
215 + *
216 + * <p>Parallel speedups for bulk operations compared to sequential
217 + * processing are common but not guaranteed.  Operations involving
218 + * brief functions on small maps may execute more slowly than
219 + * sequential loops if the underlying work to parallelize the
220 + * computation is more expensive than the computation itself.
221 + * Similarly, parallelization may not lead to much actual parallelism
222 + * if all processors are busy performing unrelated tasks.
223 + *
224 + * <p>All arguments to all task methods must be non-null.
225 + *
226 + * <p><em>jsr166e note: During transition, this class
227 + * uses nested functional interfaces with different names but the
228 + * same forms as those expected for JDK8.</em>
229 + *
230   * <p>This class is a member of the
231   * <a href="{@docRoot}/../technotes/guides/collections/index.html">
232   * Java Collections Framework</a>.
233   *
72 * <p><em>jsr166e note: This class is a candidate replacement for
73 * java.util.concurrent.ConcurrentHashMap.<em>
74 *
234   * @since 1.5
235   * @author Doug Lea
236   * @param <K> the type of keys maintained by this map
237   * @param <V> the type of mapped values
238   */
239   public class ConcurrentHashMapV8<K, V>
240 <        implements ConcurrentMap<K, V>, Serializable {
240 >    implements ConcurrentMap<K, V>, Serializable {
241      private static final long serialVersionUID = 7249069246763182397L;
242  
243      /**
244 <     * A function computing a mapping from the given key to a value,
245 <     * or {@code null} if there is no mapping. This is a place-holder
246 <     * for an upcoming JDK8 interface.
247 <     */
248 <    public static interface MappingFunction<K, V> {
249 <        /**
250 <         * Returns a value for the given key, or null if there is no
251 <         * mapping. If this function throws an (unchecked) exception,
252 <         * the exception is rethrown to its caller, and no mapping is
253 <         * recorded.  Because this function is invoked within
254 <         * atomicity control, the computation should be short and
255 <         * simple. The most common usage is to construct a new object
256 <         * serving as an initial mapped value.
244 >     * A partitionable iterator. A Spliterator can be traversed
245 >     * directly, but can also be partitioned (before traversal) by
246 >     * creating another Spliterator that covers a non-overlapping
247 >     * portion of the elements, and so may be amenable to parallel
248 >     * execution.
249 >     *
250 >     * <p>This interface exports a subset of expected JDK8
251 >     * functionality.
252 >     *
253 >     * <p>Sample usage: Here is one (of the several) ways to compute
254 >     * the sum of the values held in a map using the ForkJoin
255 >     * framework. As illustrated here, Spliterators are well suited to
256 >     * designs in which a task repeatedly splits off half its work
257 >     * into forked subtasks until small enough to process directly,
258 >     * and then joins these subtasks. Variants of this style can also
259 >     * be used in completion-based designs.
260 >     *
261 >     * <pre>
262 >     * {@code ConcurrentHashMapV8<String, Long> m = ...
263 >     * // split as if have 8 * parallelism, for load balance
264 >     * int n = m.size();
265 >     * int p = aForkJoinPool.getParallelism() * 8;
266 >     * int split = (n < p)? n : p;
267 >     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
268 >     * // ...
269 >     * static class SumValues extends RecursiveTask<Long> {
270 >     *   final Spliterator<Long> s;
271 >     *   final int split;             // split while > 1
272 >     *   final SumValues nextJoin;    // records forked subtasks to join
273 >     *   SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
274 >     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
275 >     *   }
276 >     *   public Long compute() {
277 >     *     long sum = 0;
278 >     *     SumValues subtasks = null; // fork subtasks
279 >     *     for (int s = split >>> 1; s > 0; s >>>= 1)
280 >     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
281 >     *     while (s.hasNext())        // directly process remaining elements
282 >     *       sum += s.next();
283 >     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
284 >     *       sum += t.join();         // collect subtask results
285 >     *     return sum;
286 >     *   }
287 >     * }
288 >     * }</pre>
289 >     */
290 >    public static interface Spliterator<T> extends Iterator<T> {
291 >        /**
292 >         * Returns a Spliterator covering approximately half of the
293 >         * elements, guaranteed not to overlap with those subsequently
294 >         * returned by this Spliterator.  After invoking this method,
295 >         * the current Spliterator will <em>not</em> produce any of
296 >         * the elements of the returned Spliterator, but the two
297 >         * Spliterators together will produce all of the elements that
298 >         * would have been produced by this Spliterator had this
299 >         * method not been called. The exact number of elements
300 >         * produced by the returned Spliterator is not guaranteed, and
301 >         * may be zero (i.e., with {@code hasNext()} reporting {@code
302 >         * false}) if this Spliterator cannot be further split.
303           *
304 <         * @param key the (non-null) key
305 <         * @return a value, or null if none
304 >         * @return a Spliterator covering approximately half of the
305 >         * elements
306 >         * @throws IllegalStateException if this Spliterator has
307 >         * already commenced traversing elements
308           */
309 <        V map(K key);
309 >        Spliterator<T> split();
310      }
311  
312 +
313      /*
314       * Overview:
315       *
316       * The primary design goal of this hash table is to maintain
317       * concurrent readability (typically method get(), but also
318       * iterators and related methods) while minimizing update
319 <     * contention.
319 >     * contention. Secondary goals are to keep space consumption about
320 >     * the same or better than java.util.HashMap, and to support high
321 >     * initial insertion rates on an empty table by many threads.
322       *
323       * Each key-value mapping is held in a Node.  Because Node fields
324       * can contain special values, they are defined using plain Object
325       * types. Similarly in turn, all internal methods that use them
326 <     * work off Object types. All public generic-typed methods relay
327 <     * in/out of these internal methods, supplying casts as needed.
326 >     * work off Object types. And similarly, so do the internal
327 >     * methods of auxiliary iterator and view classes.  All public
328 >     * generic typed methods relay in/out of these internal methods,
329 >     * supplying null-checks and casts as needed. This also allows
330 >     * many of the public methods to be factored into a smaller number
331 >     * of internal methods (although sadly not so for the five
332 >     * variants of put-related operations). The validation-based
333 >     * approach explained below leads to a lot of code sprawl because
334 >     * retry-control precludes factoring into smaller methods.
335       *
336       * The table is lazily initialized to a power-of-two size upon the
337 <     * first insertion.  Each bin in the table contains a (typically
338 <     * short) list of Nodes.  Table accesses require volatile/atomic
339 <     * reads, writes, and CASes.  Because there is no other way to
340 <     * arrange this without adding further indirections, we use
341 <     * intrinsics (sun.misc.Unsafe) operations.  The lists of nodes
342 <     * within bins are always accurately traversable under volatile
343 <     * reads, so long as lookups check hash code and non-nullness of
344 <     * key and value before checking key equality. (All valid hash
345 <     * codes are nonnegative. Negative values are reserved for special
346 <     * forwarding nodes; see below.)
347 <     *
348 <     * A bin may be locked during update (insert, delete, and replace)
349 <     * operations.  We do not want to waste the space required to
350 <     * associate a distinct lock object with each bin, so instead use
351 <     * the first node of a bin list itself as a lock, using builtin
352 <     * "synchronized" locks. These save space and we can live with
353 <     * only plain block-structured lock/unlock operations. Using the
354 <     * first node of a list as a lock does not by itself suffice
355 <     * though: When a node is locked, any update must first validate
356 <     * that it is still the first node, and retry if not. (Because new
357 <     * nodes are always appended to lists, once a node is first in a
358 <     * bin, it remains first until deleted or the bin becomes
359 <     * invalidated.)  However, update operations can and sometimes do
360 <     * still traverse the bin until the point of update, which helps
361 <     * reduce cache misses on retries.  This is a converse of sorts to
362 <     * the lazy locking technique described by Herlihy & Shavit. If
363 <     * there is no existing node during a put operation, then one can
364 <     * be CAS'ed in (without need for lock except in computeIfAbsent);
365 <     * the CAS serves as validation. This is on average the most
366 <     * common case for put operations -- under random hash codes, the
367 <     * distribution of nodes in bins follows a Poisson distribution
368 <     * (see http://en.wikipedia.org/wiki/Poisson_distribution) with a
369 <     * parameter of 0.5 on average under the default loadFactor of
370 <     * 0.75.  The expected number of locks covering different elements
371 <     * (i.e., bins with 2 or more nodes) is approximately 10% at
372 <     * steady state under default settings.  Lock contention
373 <     * probability for two threads accessing arbitrary distinct
374 <     * elements is, roughly, 1 / (8 * #elements).
375 <     *
376 <     * The table is resized when occupancy exceeds a threshold.  Only
377 <     * a single thread performs the resize (using field "resizing", to
378 <     * arrange exclusion), but the table otherwise remains usable for
379 <     * both reads and updates. Resizing proceeds by transferring bins,
380 <     * one by one, from the table to the next table.  Upon transfer,
381 <     * the old table bin contains only a special forwarding node (with
382 <     * negative hash code ("MOVED")) that contains the next table as
337 >     * first insertion.  Each bin in the table normally contains a
338 >     * list of Nodes (most often, the list has only zero or one Node).
339 >     * Table accesses require volatile/atomic reads, writes, and
340 >     * CASes.  Because there is no other way to arrange this without
341 >     * adding further indirections, we use intrinsics
342 >     * (sun.misc.Unsafe) operations.  The lists of nodes within bins
343 >     * are always accurately traversable under volatile reads, so long
344 >     * as lookups check hash code and non-nullness of value before
345 >     * checking key equality.
346 >     *
347 >     * We use the top two bits of Node hash fields for control
348 >     * purposes -- they are available anyway because of addressing
349 >     * constraints.  As explained further below, these top bits are
350 >     * used as follows:
351 >     *  00 - Normal
352 >     *  01 - Locked
353 >     *  11 - Locked and may have a thread waiting for lock
354 >     *  10 - Node is a forwarding node
355 >     *
356 >     * The lower 30 bits of each Node's hash field contain a
357 >     * transformation of the key's hash code, except for forwarding
358 >     * nodes, for which the lower bits are zero (and so always have
359 >     * hash field == MOVED).
360 >     *
361 >     * Insertion (via put or its variants) of the first node in an
362 >     * empty bin is performed by just CASing it to the bin.  This is
363 >     * by far the most common case for put operations under most
364 >     * key/hash distributions.  Other update operations (insert,
365 >     * delete, and replace) require locks.  We do not want to waste
366 >     * the space required to associate a distinct lock object with
367 >     * each bin, so instead use the first node of a bin list itself as
368 >     * a lock. Blocking support for these locks relies on the builtin
369 >     * "synchronized" monitors.  However, we also need a tryLock
370 >     * construction, so we overlay these by using bits of the Node
371 >     * hash field for lock control (see above), and so normally use
372 >     * builtin monitors only for blocking and signalling using
373 >     * wait/notifyAll constructions. See Node.tryAwaitLock.
374 >     *
375 >     * Using the first node of a list as a lock does not by itself
376 >     * suffice though: When a node is locked, any update must first
377 >     * validate that it is still the first node after locking it, and
378 >     * retry if not. Because new nodes are always appended to lists,
379 >     * once a node is first in a bin, it remains first until deleted
380 >     * or the bin becomes invalidated (upon resizing).  However,
381 >     * operations that only conditionally update may inspect nodes
382 >     * until the point of update. This is a converse of sorts to the
383 >     * lazy locking technique described by Herlihy & Shavit.
384 >     *
385 >     * The main disadvantage of per-bin locks is that other update
386 >     * operations on other nodes in a bin list protected by the same
387 >     * lock can stall, for example when user equals() or mapping
388 >     * functions take a long time.  However, statistically, under
389 >     * random hash codes, this is not a common problem.  Ideally, the
390 >     * frequency of nodes in bins follows a Poisson distribution
391 >     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
392 >     * parameter of about 0.5 on average, given the resizing threshold
393 >     * of 0.75, although with a large variance because of resizing
394 >     * granularity. Ignoring variance, the expected occurrences of
395 >     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
396 >     * first values are:
397 >     *
398 >     * 0:    0.60653066
399 >     * 1:    0.30326533
400 >     * 2:    0.07581633
401 >     * 3:    0.01263606
402 >     * 4:    0.00157952
403 >     * 5:    0.00015795
404 >     * 6:    0.00001316
405 >     * 7:    0.00000094
406 >     * 8:    0.00000006
407 >     * more: less than 1 in ten million
408 >     *
409 >     * Lock contention probability for two threads accessing distinct
410 >     * elements is roughly 1 / (8 * #elements) under random hashes.
411 >     *
412 >     * Actual hash code distributions encountered in practice
413 >     * sometimes deviate significantly from uniform randomness.  This
414 >     * includes the case when N > (1<<30), so some keys MUST collide.
415 >     * Similarly for dumb or hostile usages in which multiple keys are
416 >     * designed to have identical hash codes. Also, although we guard
417 >     * against the worst effects of this (see method spread), sets of
418 >     * hashes may differ only in bits that do not impact their bin
419 >     * index for a given power-of-two mask.  So we use a secondary
420 >     * strategy that applies when the number of nodes in a bin exceeds
421 >     * a threshold, and at least one of the keys implements
422 >     * Comparable.  These TreeBins use a balanced tree to hold nodes
423 >     * (a specialized form of red-black trees), bounding search time
424 >     * to O(log N).  Each search step in a TreeBin is around twice as
425 >     * slow as in a regular list, but given that N cannot exceed
426 >     * (1<<64) (before running out of addresses) this bounds search
427 >     * steps, lock hold times, etc, to reasonable constants (roughly
428 >     * 100 nodes inspected per operation worst case) so long as keys
429 >     * are Comparable (which is very common -- String, Long, etc).
430 >     * TreeBin nodes (TreeNodes) also maintain the same "next"
431 >     * traversal pointers as regular nodes, so can be traversed in
432 >     * iterators in the same way.
433 >     *
434 >     * The table is resized when occupancy exceeds a percentage
435 >     * threshold (nominally, 0.75, but see below).  Only a single
436 >     * thread performs the resize (using field "sizeCtl", to arrange
437 >     * exclusion), but the table otherwise remains usable for reads
438 >     * and updates. Resizing proceeds by transferring bins, one by
439 >     * one, from the table to the next table.  Because we are using
440 >     * power-of-two expansion, the elements from each bin must either
441 >     * stay at same index, or move with a power of two offset. We
442 >     * eliminate unnecessary node creation by catching cases where old
443 >     * nodes can be reused because their next fields won't change.  On
444 >     * average, only about one-sixth of them need cloning when a table
445 >     * doubles. The nodes they replace will be garbage collectable as
446 >     * soon as they are no longer referenced by any reader thread that
447 >     * may be in the midst of concurrently traversing table.  Upon
448 >     * transfer, the old table bin contains only a special forwarding
449 >     * node (with hash field "MOVED") that contains the next table as
450       * its key. On encountering a forwarding node, access and update
451 <     * operations restart, using the new table. To ensure concurrent
452 <     * readability of traversals, transfers must proceed from the last
453 <     * bin (table.length - 1) up towards the first.  Any traversal
454 <     * starting from the first bin can then arrange to move to the new
455 <     * table for the rest of the traversal without revisiting nodes.
456 <     * This constrains bin transfers to a particular order, and so can
457 <     * block indefinitely waiting for the next lock, and other threads
458 <     * cannot help with the transfer. However, expected stalls are
459 <     * infrequent enough to not warrant the additional overhead and
460 <     * complexity of access and iteration schemes that could admit
461 <     * out-of-order or concurrent bin transfers.
462 <     *
463 <     * A similar traversal scheme (not yet implemented) can apply to
464 <     * partial traversals during partitioned aggregate operations.
465 <     * Also, read-only operations give up if ever forwarded to a null
466 <     * table, which provides support for shutdown-style clearing,
467 <     * which is also not currently implemented.
451 >     * operations restart, using the new table.
452 >     *
453 >     * Each bin transfer requires its bin lock. However, unlike other
454 >     * cases, a transfer can skip a bin if it fails to acquire its
455 >     * lock, and revisit it later (unless it is a TreeBin). Method
456 >     * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
457 >     * have been skipped because of failure to acquire a lock, and
458 >     * blocks only if none are available (i.e., only very rarely).
459 >     * The transfer operation must also ensure that all accessible
460 >     * bins in both the old and new table are usable by any traversal.
461 >     * When there are no lock acquisition failures, this is arranged
462 >     * simply by proceeding from the last bin (table.length - 1) up
463 >     * towards the first.  Upon seeing a forwarding node, traversals
464 >     * (see class Iter) arrange to move to the new table
465 >     * without revisiting nodes.  However, when any node is skipped
466 >     * during a transfer, all earlier table bins may have become
467 >     * visible, so are initialized with a reverse-forwarding node back
468 >     * to the old table until the new ones are established. (This
469 >     * sometimes requires transiently locking a forwarding node, which
470 >     * is possible under the above encoding.) These more expensive
471 >     * mechanics trigger only when necessary.
472 >     *
473 >     * The traversal scheme also applies to partial traversals of
474 >     * ranges of bins (via an alternate Traverser constructor)
475 >     * to support partitioned aggregate operations.  Also, read-only
476 >     * operations give up if ever forwarded to a null table, which
477 >     * provides support for shutdown-style clearing, which is also not
478 >     * currently implemented.
479 >     *
480 >     * Lazy table initialization minimizes footprint until first use,
481 >     * and also avoids resizings when the first operation is from a
482 >     * putAll, constructor with map argument, or deserialization.
483 >     * These cases attempt to override the initial capacity settings,
484 >     * but harmlessly fail to take effect in cases of races.
485       *
486       * The element count is maintained using a LongAdder, which avoids
487       * contention on updates but can encounter cache thrashing if read
488 <     * too frequently during concurrent updates. To avoid reading so
489 <     * often, resizing is normally attempted only upon adding to a bin
490 <     * already holding two or more nodes. Under the default threshold
491 <     * (0.75), and uniform hash distributions, the probability of this
492 <     * occurring at threshold is around 13%, meaning that only about 1
493 <     * in 8 puts check threshold (and after resizing, many fewer do
494 <     * so). But this approximation has high variance for small table
495 <     * sizes, so we check on any collision for sizes <= 64.  Further,
496 <     * to increase the probability that a resize occurs soon enough, we
497 <     * offset the threshold (see THRESHOLD_OFFSET) by the expected
498 <     * number of puts between checks. This is currently set to 8, in
499 <     * accord with the default load factor. In practice, this is
500 <     * rarely overridden, and in any case is close enough to other
501 <     * plausible values not to waste dynamic probability computation
502 <     * for more precision.
488 >     * too frequently during concurrent access. To avoid reading so
489 >     * often, resizing is attempted either when a bin lock is
490 >     * contended, or upon adding to a bin already holding two or more
491 >     * nodes (checked before adding in the xIfAbsent methods, after
492 >     * adding in others). Under uniform hash distributions, the
493 >     * probability of this occurring at threshold is around 13%,
494 >     * meaning that only about 1 in 8 puts check threshold (and after
495 >     * resizing, many fewer do so). But this approximation has high
496 >     * variance for small table sizes, so we check on any collision
497 >     * for sizes <= 64. The bulk putAll operation further reduces
498 >     * contention by only committing count updates upon these size
499 >     * checks.
500 >     *
501 >     * Maintaining API and serialization compatibility with previous
502 >     * versions of this class introduces several oddities. Mainly: We
503 >     * leave untouched but unused constructor arguments refering to
504 >     * concurrencyLevel. We accept a loadFactor constructor argument,
505 >     * but apply it only to initial table capacity (which is the only
506 >     * time that we can guarantee to honor it.) We also declare an
507 >     * unused "Segment" class that is instantiated in minimal form
508 >     * only when serializing.
509       */
510  
511      /* ---------------- Constants -------------- */
512  
513      /**
514 <     * The smallest allowed table capacity.  Must be a power of 2, at
515 <     * least 2.
514 >     * The largest possible table capacity.  This value must be
515 >     * exactly 1<<30 to stay within Java array allocation and indexing
516 >     * bounds for power of two table sizes, and is further required
517 >     * because the top two bits of 32bit hash fields are used for
518 >     * control purposes.
519       */
520 <    static final int MINIMUM_CAPACITY = 2;
520 >    private static final int MAXIMUM_CAPACITY = 1 << 30;
521  
522      /**
523 <     * The largest allowed table capacity.  Must be a power of 2, at
524 <     * most 1<<30.
523 >     * The default initial table capacity.  Must be a power of 2
524 >     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
525       */
526 <    static final int MAXIMUM_CAPACITY = 1 << 30;
526 >    private static final int DEFAULT_CAPACITY = 16;
527  
528      /**
529 <     * The default initial table capacity.  Must be a power of 2, at
530 <     * least MINIMUM_CAPACITY and at most MAXIMUM_CAPACITY.
529 >     * The largest possible (non-power of two) array size.
530 >     * Needed by toArray and related methods.
531       */
532 <    static final int DEFAULT_CAPACITY = 16;
532 >    static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
533  
534      /**
535 <     * The default load factor for this table, used when not otherwise
536 <     * specified in a constructor.
535 >     * The default concurrency level for this table. Unused but
536 >     * defined for compatibility with previous versions of this class.
537       */
538 <    static final float DEFAULT_LOAD_FACTOR = 0.75f;
538 >    private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
539  
540      /**
541 <     * The default concurrency level for this table. Unused, but
542 <     * defined for compatibility with previous versions of this class.
541 >     * The load factor for this table. Overrides of this value in
542 >     * constructors affect only the initial table capacity.  The
543 >     * actual floating point value isn't normally used -- it is
544 >     * simpler to use expressions such as {@code n - (n >>> 2)} for
545 >     * the associated resizing threshold.
546       */
547 <    static final int DEFAULT_CONCURRENCY_LEVEL = 16;
547 >    private static final float LOAD_FACTOR = 0.75f;
548  
549      /**
550 <     * The count value to offset thresholds to compensate for checking
551 <     * for resizing only when inserting into bins with two or more
552 <     * elements. See above for explanation.
550 >     * The buffer size for skipped bins during transfers. The
551 >     * value is arbitrary but should be large enough to avoid
552 >     * most locking stalls during resizes.
553       */
554 <    static final int THRESHOLD_OFFSET = 8;
554 >    private static final int TRANSFER_BUFFER_SIZE = 32;
555  
556      /**
557 <     * Special node hash value indicating to use table in node.key
558 <     * Must be negative.
557 >     * The bin count threshold for using a tree rather than list for a
558 >     * bin.  The value reflects the approximate break-even point for
559 >     * using tree-based operations.
560       */
561 <    static final int MOVED = -1;
561 >    private static final int TREE_THRESHOLD = 8;
562 >
563 >    /*
564 >     * Encodings for special uses of Node hash fields. See above for
565 >     * explanation.
566 >     */
567 >    static final int MOVED     = 0x80000000; // hash field for forwarding nodes
568 >    static final int LOCKED    = 0x40000000; // set/tested only as a bit
569 >    static final int WAITING   = 0xc0000000; // both bits set/tested together
570 >    static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
571  
572      /* ---------------- Fields -------------- */
573  
574      /**
575       * The array of bins. Lazily initialized upon first insertion.
576 <     * Size is always a power of two. Accessed directly by inner
254 <     * classes.
576 >     * Size is always a power of two. Accessed directly by iterators.
577       */
578      transient volatile Node[] table;
579  
580 <    /** The counter maintaining number of elements. */
580 >    /**
581 >     * The counter maintaining number of elements.
582 >     */
583      private transient final LongAdder counter;
584 <    /** Nonzero when table is being initialized or resized. Updated via CAS. */
585 <    private transient volatile int resizing;
586 <    /** The target load factor for the table. */
587 <    private transient float loadFactor;
588 <    /** The next element count value upon which to resize the table. */
589 <    private transient int threshold;
590 <    /** The initial capacity of the table. */
591 <    private transient int initCap;
584 >
585 >    /**
586 >     * Table initialization and resizing control.  When negative, the
587 >     * table is being initialized or resized. Otherwise, when table is
588 >     * null, holds the initial table size to use upon creation, or 0
589 >     * for default. After initialization, holds the next element count
590 >     * value upon which to resize the table.
591 >     */
592 >    private transient volatile int sizeCtl;
593  
594      // views
595 <    transient Set<K> keySet;
596 <    transient Set<Map.Entry<K,V>> entrySet;
597 <    transient Collection<V> values;
595 >    private transient KeySetView<K,V> keySet;
596 >    private transient ValuesView<K,V> values;
597 >    private transient EntrySetView<K,V> entrySet;
598  
599      /** For serialization compatibility. Null unless serialized; see below */
600 <    Segment<K,V>[] segments;
600 >    private Segment<K,V>[] segments;
601  
602 <    /**
603 <     * Applies a supplemental hash function to a given hashCode, which
604 <     * defends against poor quality hash functions.  The result must
605 <     * be non-negative, and for reasonable performance must have good
606 <     * avalanche properties; i.e., that each bit of the argument
607 <     * affects each bit (except sign bit) of the result.
602 >    /* ---------------- Table element access -------------- */
603 >
604 >    /*
605 >     * Volatile access methods are used for table elements as well as
606 >     * elements of in-progress next table while resizing.  Uses are
607 >     * null checked by callers, and implicitly bounds-checked, relying
608 >     * on the invariants that tab arrays have non-zero size, and all
609 >     * indices are masked with (tab.length - 1) which is never
610 >     * negative and always less than length. Note that, to be correct
611 >     * wrt arbitrary concurrency errors by users, bounds checks must
612 >     * operate on local variables, which accounts for some odd-looking
613 >     * inline assignments below.
614       */
615 <    private static final int spread(int h) {
616 <        // Apply base step of MurmurHash; see http://code.google.com/p/smhasher/
617 <        h ^= h >>> 16;
618 <        h *= 0x85ebca6b;
619 <        h ^= h >>> 13;
620 <        h *= 0xc2b2ae35;
621 <        return (h >>> 16) ^ (h & 0x7fffffff); // mask out sign bit
615 >
616 >    static final Node tabAt(Node[] tab, int i) { // used by Iter
617 >        return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
618 >    }
619 >
620 >    private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
621 >        return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
622      }
623  
624 +    private static final void setTabAt(Node[] tab, int i, Node v) {
625 +        UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
626 +    }
627 +
628 +    /* ---------------- Nodes -------------- */
629 +
630      /**
631       * Key-value entry. Note that this is never exported out as a
632 <     * user-visible Map.Entry.
632 >     * user-visible Map.Entry (see MapEntry below). Nodes with a hash
633 >     * field of MOVED are special, and do not contain user keys or
634 >     * values.  Otherwise, keys are never null, and null val fields
635 >     * indicate that a node is in the process of being deleted or
636 >     * created. For purposes of read-only access, a key may be read
637 >     * before a val, but can only be used after checking val to be
638 >     * non-null.
639       */
640 <    static final class Node {
641 <        final int hash;
640 >    static class Node {
641 >        volatile int hash;
642          final Object key;
643          volatile Object val;
644          volatile Node next;
# Line 306 | Line 649 | public class ConcurrentHashMapV8<K, V>
649              this.val = val;
650              this.next = next;
651          }
309    }
652  
653 <    /*
654 <     * Volatile access methods are used for table elements as well as
655 <     * elements of in-progress next table while resizing.  Uses in
656 <     * access and update methods are null checked by callers, and
315 <     * implicitly bounds-checked, relying on the invariants that tab
316 <     * arrays have non-zero size, and all indices are masked with
317 <     * (tab.length - 1) which is never negative and always less than
318 <     * length. The "relaxed" non-volatile forms are used only during
319 <     * table initialization. The only other usage is in
320 <     * HashIterator.advance, which performs explicit checks.
321 <     */
653 >        /** CompareAndSet the hash field */
654 >        final boolean casHash(int cmp, int val) {
655 >            return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
656 >        }
657  
658 <    static final Node tabAt(Node[] tab, int i) { // used in HashIterator
659 <        return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
660 <    }
658 >        /** The number of spins before blocking for a lock */
659 >        static final int MAX_SPINS =
660 >            Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
661  
662 <    private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
663 <        return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
664 <    }
662 >        /**
663 >         * Spins a while if LOCKED bit set and this node is the first
664 >         * of its bin, and then sets WAITING bits on hash field and
665 >         * blocks (once) if they are still set.  It is OK for this
666 >         * method to return even if lock is not available upon exit,
667 >         * which enables these simple single-wait mechanics.
668 >         *
669 >         * The corresponding signalling operation is performed within
670 >         * callers: Upon detecting that WAITING has been set when
671 >         * unlocking lock (via a failed CAS from non-waiting LOCKED
672 >         * state), unlockers acquire the sync lock and perform a
673 >         * notifyAll.
674 >         *
675 >         * The initial sanity check on tab and bounds is not currently
676 >         * necessary in the only usages of this method, but enables
677 >         * use in other future contexts.
678 >         */
679 >        final void tryAwaitLock(Node[] tab, int i) {
680 >            if (tab != null && i >= 0 && i < tab.length) { // sanity check
681 >                int r = ThreadLocalRandom.current().nextInt(); // randomize spins
682 >                int spins = MAX_SPINS, h;
683 >                while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
684 >                    if (spins >= 0) {
685 >                        r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
686 >                        if (r >= 0 && --spins == 0)
687 >                            Thread.yield();  // yield before block
688 >                    }
689 >                    else if (casHash(h, h | WAITING)) {
690 >                        synchronized (this) {
691 >                            if (tabAt(tab, i) == this &&
692 >                                (hash & WAITING) == WAITING) {
693 >                                try {
694 >                                    wait();
695 >                                } catch (InterruptedException ie) {
696 >                                    try {
697 >                                        Thread.currentThread().interrupt();
698 >                                    } catch (SecurityException ignore) {
699 >                                    }
700 >                                }
701 >                            }
702 >                            else
703 >                                notifyAll(); // possibly won race vs signaller
704 >                        }
705 >                        break;
706 >                    }
707 >                }
708 >            }
709 >        }
710  
711 <    private static final void setTabAt(Node[] tab, int i, Node v) {
712 <        UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
713 <    }
711 >        // Unsafe mechanics for casHash
712 >        private static final sun.misc.Unsafe UNSAFE;
713 >        private static final long hashOffset;
714  
715 <    private static final Node relaxedTabAt(Node[] tab, int i) {
716 <        return (Node)UNSAFE.getObject(tab, ((long)i<<ASHIFT)+ABASE);
715 >        static {
716 >            try {
717 >                UNSAFE = getUnsafe();
718 >                Class<?> k = Node.class;
719 >                hashOffset = UNSAFE.objectFieldOffset
720 >                    (k.getDeclaredField("hash"));
721 >            } catch (Exception e) {
722 >                throw new Error(e);
723 >            }
724 >        }
725      }
726  
727 <    private static final void relaxedSetTabAt(Node[] tab, int i, Node v) {
728 <        UNSAFE.putObject(tab, ((long)i<<ASHIFT)+ABASE, v);
727 >    /* ---------------- TreeBins -------------- */
728 >
729 >    /**
730 >     * Nodes for use in TreeBins
731 >     */
732 >    static final class TreeNode extends Node {
733 >        TreeNode parent;  // red-black tree links
734 >        TreeNode left;
735 >        TreeNode right;
736 >        TreeNode prev;    // needed to unlink next upon deletion
737 >        boolean red;
738 >
739 >        TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
740 >            super(hash, key, val, next);
741 >            this.parent = parent;
742 >        }
743      }
744  
745 <    /* ---------------- Access and update operations -------------- */
745 >    /**
746 >     * A specialized form of red-black tree for use in bins
747 >     * whose size exceeds a threshold.
748 >     *
749 >     * TreeBins use a special form of comparison for search and
750 >     * related operations (which is the main reason we cannot use
751 >     * existing collections such as TreeMaps). TreeBins contain
752 >     * Comparable elements, but may contain others, as well as
753 >     * elements that are Comparable but not necessarily Comparable<T>
754 >     * for the same T, so we cannot invoke compareTo among them. To
755 >     * handle this, the tree is ordered primarily by hash value, then
756 >     * by getClass().getName() order, and then by Comparator order
757 >     * among elements of the same class.  On lookup at a node, if
758 >     * elements are not comparable or compare as 0, both left and
759 >     * right children may need to be searched in the case of tied hash
760 >     * values. (This corresponds to the full list search that would be
761 >     * necessary if all elements were non-Comparable and had tied
762 >     * hashes.)  The red-black balancing code is updated from
763 >     * pre-jdk-collections
764 >     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
765 >     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
766 >     * Algorithms" (CLR).
767 >     *
768 >     * TreeBins also maintain a separate locking discipline than
769 >     * regular bins. Because they are forwarded via special MOVED
770 >     * nodes at bin heads (which can never change once established),
771 >     * we cannot use those nodes as locks. Instead, TreeBin
772 >     * extends AbstractQueuedSynchronizer to support a simple form of
773 >     * read-write lock. For update operations and table validation,
774 >     * the exclusive form of lock behaves in the same way as bin-head
775 >     * locks. However, lookups use shared read-lock mechanics to allow
776 >     * multiple readers in the absence of writers.  Additionally,
777 >     * these lookups do not ever block: While the lock is not
778 >     * available, they proceed along the slow traversal path (via
779 >     * next-pointers) until the lock becomes available or the list is
780 >     * exhausted, whichever comes first. (These cases are not fast,
781 >     * but maximize aggregate expected throughput.)  The AQS mechanics
782 >     * for doing this are straightforward.  The lock state is held as
783 >     * AQS getState().  Read counts are negative; the write count (1)
784 >     * is positive.  There are no signalling preferences among readers
785 >     * and writers. Since we don't need to export full Lock API, we
786 >     * just override the minimal AQS methods and use them directly.
787 >     */
788 >    static final class TreeBin extends AbstractQueuedSynchronizer {
789 >        private static final long serialVersionUID = 2249069246763182397L;
790 >        transient TreeNode root;  // root of tree
791 >        transient TreeNode first; // head of next-pointer list
792  
793 <   /** Implementation for get and containsKey */
794 <    private final Object internalGet(Object k) {
795 <        int h = spread(k.hashCode());
796 <        Node[] tab = table;
797 <        retry: while (tab != null) {
798 <            Node e = tabAt(tab, (tab.length - 1) & h);
799 <            while (e != null) {
800 <                int eh = e.hash;
801 <                if (eh == h) {
802 <                    Object ek = e.key, ev = e.val;
803 <                    if (ev != null && ek != null && (k == ek || k.equals(ek)))
804 <                        return ev;
805 <                }
806 <                else if (eh < 0) { // bin was moved during resize
807 <                    tab = (Node[])e.key;
808 <                    continue retry;
809 <                }
810 <                e = e.next;
793 >        /* AQS overrides */
794 >        public final boolean isHeldExclusively() { return getState() > 0; }
795 >        public final boolean tryAcquire(int ignore) {
796 >            if (compareAndSetState(0, 1)) {
797 >                setExclusiveOwnerThread(Thread.currentThread());
798 >                return true;
799 >            }
800 >            return false;
801 >        }
802 >        public final boolean tryRelease(int ignore) {
803 >            setExclusiveOwnerThread(null);
804 >            setState(0);
805 >            return true;
806 >        }
807 >        public final int tryAcquireShared(int ignore) {
808 >            for (int c;;) {
809 >                if ((c = getState()) > 0)
810 >                    return -1;
811 >                if (compareAndSetState(c, c -1))
812 >                    return 1;
813 >            }
814 >        }
815 >        public final boolean tryReleaseShared(int ignore) {
816 >            int c;
817 >            do {} while (!compareAndSetState(c = getState(), c + 1));
818 >            return c == -1;
819 >        }
820 >
821 >        /** From CLR */
822 >        private void rotateLeft(TreeNode p) {
823 >            if (p != null) {
824 >                TreeNode r = p.right, pp, rl;
825 >                if ((rl = p.right = r.left) != null)
826 >                    rl.parent = p;
827 >                if ((pp = r.parent = p.parent) == null)
828 >                    root = r;
829 >                else if (pp.left == p)
830 >                    pp.left = r;
831 >                else
832 >                    pp.right = r;
833 >                r.left = p;
834 >                p.parent = r;
835              }
364            break;
836          }
366        return null;
367    }
837  
838 +        /** From CLR */
839 +        private void rotateRight(TreeNode p) {
840 +            if (p != null) {
841 +                TreeNode l = p.left, pp, lr;
842 +                if ((lr = p.left = l.right) != null)
843 +                    lr.parent = p;
844 +                if ((pp = l.parent = p.parent) == null)
845 +                    root = l;
846 +                else if (pp.right == p)
847 +                    pp.right = l;
848 +                else
849 +                    pp.left = l;
850 +                l.right = p;
851 +                p.parent = l;
852 +            }
853 +        }
854  
855 <    /** Implementation for put and putIfAbsent */
856 <    private final Object internalPut(Object k, Object v, boolean replace) {
857 <        int h = spread(k.hashCode());
858 <        Object oldVal = null;  // the previous value or null if none
859 <        Node[] tab = table;
860 <        for (;;) {
861 <            Node e; int i;
862 <            if (tab == null)
863 <                tab = grow(0);
864 <            else if ((e = tabAt(tab, i = (tab.length - 1) & h)) == null) {
865 <                if (casTabAt(tab, i, null, new Node(h, k, v, null)))
855 >        /**
856 >         * Returns the TreeNode (or null if not found) for the given key
857 >         * starting at given root.
858 >         */
859 >        @SuppressWarnings("unchecked") final TreeNode getTreeNode
860 >            (int h, Object k, TreeNode p) {
861 >            Class<?> c = k.getClass();
862 >            while (p != null) {
863 >                int dir, ph;  Object pk; Class<?> pc;
864 >                if ((ph = p.hash) == h) {
865 >                    if ((pk = p.key) == k || k.equals(pk))
866 >                        return p;
867 >                    if (c != (pc = pk.getClass()) ||
868 >                        !(k instanceof Comparable) ||
869 >                        (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
870 >                        dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
871 >                        TreeNode r = null, s = null, pl, pr;
872 >                        if (dir >= 0) {
873 >                            if ((pl = p.left) != null && h <= pl.hash)
874 >                                s = pl;
875 >                        }
876 >                        else if ((pr = p.right) != null && h >= pr.hash)
877 >                            s = pr;
878 >                        if (s != null && (r = getTreeNode(h, k, s)) != null)
879 >                            return r;
880 >                    }
881 >                }
882 >                else
883 >                    dir = (h < ph) ? -1 : 1;
884 >                p = (dir > 0) ? p.right : p.left;
885 >            }
886 >            return null;
887 >        }
888 >
889 >        /**
890 >         * Wrapper for getTreeNode used by CHM.get. Tries to obtain
891 >         * read-lock to call getTreeNode, but during failure to get
892 >         * lock, searches along next links.
893 >         */
894 >        final Object getValue(int h, Object k) {
895 >            Node r = null;
896 >            int c = getState(); // Must read lock state first
897 >            for (Node e = first; e != null; e = e.next) {
898 >                if (c <= 0 && compareAndSetState(c, c - 1)) {
899 >                    try {
900 >                        r = getTreeNode(h, k, root);
901 >                    } finally {
902 >                        releaseShared(0);
903 >                    }
904                      break;
905 +                }
906 +                else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
907 +                    r = e;
908 +                    break;
909 +                }
910 +                else
911 +                    c = getState();
912 +            }
913 +            return r == null ? null : r.val;
914 +        }
915 +
916 +        /**
917 +         * Finds or adds a node.
918 +         * @return null if added
919 +         */
920 +        @SuppressWarnings("unchecked") final TreeNode putTreeNode
921 +            (int h, Object k, Object v) {
922 +            Class<?> c = k.getClass();
923 +            TreeNode pp = root, p = null;
924 +            int dir = 0;
925 +            while (pp != null) { // find existing node or leaf to insert at
926 +                int ph;  Object pk; Class<?> pc;
927 +                p = pp;
928 +                if ((ph = p.hash) == h) {
929 +                    if ((pk = p.key) == k || k.equals(pk))
930 +                        return p;
931 +                    if (c != (pc = pk.getClass()) ||
932 +                        !(k instanceof Comparable) ||
933 +                        (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
934 +                        dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
935 +                        TreeNode r = null, s = null, pl, pr;
936 +                        if (dir >= 0) {
937 +                            if ((pl = p.left) != null && h <= pl.hash)
938 +                                s = pl;
939 +                        }
940 +                        else if ((pr = p.right) != null && h >= pr.hash)
941 +                            s = pr;
942 +                        if (s != null && (r = getTreeNode(h, k, s)) != null)
943 +                            return r;
944 +                    }
945 +                }
946 +                else
947 +                    dir = (h < ph) ? -1 : 1;
948 +                pp = (dir > 0) ? p.right : p.left;
949 +            }
950 +
951 +            TreeNode f = first;
952 +            TreeNode x = first = new TreeNode(h, k, v, f, p);
953 +            if (p == null)
954 +                root = x;
955 +            else { // attach and rebalance; adapted from CLR
956 +                TreeNode xp, xpp;
957 +                if (f != null)
958 +                    f.prev = x;
959 +                if (dir <= 0)
960 +                    p.left = x;
961 +                else
962 +                    p.right = x;
963 +                x.red = true;
964 +                while (x != null && (xp = x.parent) != null && xp.red &&
965 +                       (xpp = xp.parent) != null) {
966 +                    TreeNode xppl = xpp.left;
967 +                    if (xp == xppl) {
968 +                        TreeNode y = xpp.right;
969 +                        if (y != null && y.red) {
970 +                            y.red = false;
971 +                            xp.red = false;
972 +                            xpp.red = true;
973 +                            x = xpp;
974 +                        }
975 +                        else {
976 +                            if (x == xp.right) {
977 +                                rotateLeft(x = xp);
978 +                                xpp = (xp = x.parent) == null ? null : xp.parent;
979 +                            }
980 +                            if (xp != null) {
981 +                                xp.red = false;
982 +                                if (xpp != null) {
983 +                                    xpp.red = true;
984 +                                    rotateRight(xpp);
985 +                                }
986 +                            }
987 +                        }
988 +                    }
989 +                    else {
990 +                        TreeNode y = xppl;
991 +                        if (y != null && y.red) {
992 +                            y.red = false;
993 +                            xp.red = false;
994 +                            xpp.red = true;
995 +                            x = xpp;
996 +                        }
997 +                        else {
998 +                            if (x == xp.left) {
999 +                                rotateRight(x = xp);
1000 +                                xpp = (xp = x.parent) == null ? null : xp.parent;
1001 +                            }
1002 +                            if (xp != null) {
1003 +                                xp.red = false;
1004 +                                if (xpp != null) {
1005 +                                    xpp.red = true;
1006 +                                    rotateLeft(xpp);
1007 +                                }
1008 +                            }
1009 +                        }
1010 +                    }
1011 +                }
1012 +                TreeNode r = root;
1013 +                if (r != null && r.red)
1014 +                    r.red = false;
1015 +            }
1016 +            return null;
1017 +        }
1018 +
1019 +        /**
1020 +         * Removes the given node, that must be present before this
1021 +         * call.  This is messier than typical red-black deletion code
1022 +         * because we cannot swap the contents of an interior node
1023 +         * with a leaf successor that is pinned by "next" pointers
1024 +         * that are accessible independently of lock. So instead we
1025 +         * swap the tree linkages.
1026 +         */
1027 +        final void deleteTreeNode(TreeNode p) {
1028 +            TreeNode next = (TreeNode)p.next; // unlink traversal pointers
1029 +            TreeNode pred = p.prev;
1030 +            if (pred == null)
1031 +                first = next;
1032 +            else
1033 +                pred.next = next;
1034 +            if (next != null)
1035 +                next.prev = pred;
1036 +            TreeNode replacement;
1037 +            TreeNode pl = p.left;
1038 +            TreeNode pr = p.right;
1039 +            if (pl != null && pr != null) {
1040 +                TreeNode s = pr, sl;
1041 +                while ((sl = s.left) != null) // find successor
1042 +                    s = sl;
1043 +                boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1044 +                TreeNode sr = s.right;
1045 +                TreeNode pp = p.parent;
1046 +                if (s == pr) { // p was s's direct parent
1047 +                    p.parent = s;
1048 +                    s.right = p;
1049 +                }
1050 +                else {
1051 +                    TreeNode sp = s.parent;
1052 +                    if ((p.parent = sp) != null) {
1053 +                        if (s == sp.left)
1054 +                            sp.left = p;
1055 +                        else
1056 +                            sp.right = p;
1057 +                    }
1058 +                    if ((s.right = pr) != null)
1059 +                        pr.parent = s;
1060 +                }
1061 +                p.left = null;
1062 +                if ((p.right = sr) != null)
1063 +                    sr.parent = p;
1064 +                if ((s.left = pl) != null)
1065 +                    pl.parent = s;
1066 +                if ((s.parent = pp) == null)
1067 +                    root = s;
1068 +                else if (p == pp.left)
1069 +                    pp.left = s;
1070 +                else
1071 +                    pp.right = s;
1072 +                replacement = sr;
1073 +            }
1074 +            else
1075 +                replacement = (pl != null) ? pl : pr;
1076 +            TreeNode pp = p.parent;
1077 +            if (replacement == null) {
1078 +                if (pp == null) {
1079 +                    root = null;
1080 +                    return;
1081 +                }
1082 +                replacement = p;
1083              }
383            else if (e.hash < 0)
384                tab = (Node[])e.key;
1084              else {
1085 <                boolean validated = false;
1086 <                boolean checkSize = false;
1087 <                synchronized (e) {
1088 <                    if (tabAt(tab, i) == e) {
1089 <                        validated = true;
1090 <                        for (Node first = e;;) {
1091 <                            Object ek, ev;
1092 <                            if (e.hash == h &&
1093 <                                (ek = e.key) != null &&
1094 <                                (ev = e.val) != null &&
1095 <                                (k == ek || k.equals(ek))) {
1096 <                                oldVal = ev;
1097 <                                if (replace)
1098 <                                    e.val = v;
1099 <                                break;
1085 >                replacement.parent = pp;
1086 >                if (pp == null)
1087 >                    root = replacement;
1088 >                else if (p == pp.left)
1089 >                    pp.left = replacement;
1090 >                else
1091 >                    pp.right = replacement;
1092 >                p.left = p.right = p.parent = null;
1093 >            }
1094 >            if (!p.red) { // rebalance, from CLR
1095 >                TreeNode x = replacement;
1096 >                while (x != null) {
1097 >                    TreeNode xp, xpl;
1098 >                    if (x.red || (xp = x.parent) == null) {
1099 >                        x.red = false;
1100 >                        break;
1101 >                    }
1102 >                    if (x == (xpl = xp.left)) {
1103 >                        TreeNode sib = xp.right;
1104 >                        if (sib != null && sib.red) {
1105 >                            sib.red = false;
1106 >                            xp.red = true;
1107 >                            rotateLeft(xp);
1108 >                            sib = (xp = x.parent) == null ? null : xp.right;
1109 >                        }
1110 >                        if (sib == null)
1111 >                            x = xp;
1112 >                        else {
1113 >                            TreeNode sl = sib.left, sr = sib.right;
1114 >                            if ((sr == null || !sr.red) &&
1115 >                                (sl == null || !sl.red)) {
1116 >                                sib.red = true;
1117 >                                x = xp;
1118                              }
1119 <                            Node last = e;
1120 <                            if ((e = e.next) == null) {
1121 <                                last.next = new Node(h, k, v, null);
1122 <                                if (last != first || tab.length <= 64)
1123 <                                    checkSize = true;
1124 <                                break;
1119 >                            else {
1120 >                                if (sr == null || !sr.red) {
1121 >                                    if (sl != null)
1122 >                                        sl.red = false;
1123 >                                    sib.red = true;
1124 >                                    rotateRight(sib);
1125 >                                    sib = (xp = x.parent) == null ? null : xp.right;
1126 >                                }
1127 >                                if (sib != null) {
1128 >                                    sib.red = (xp == null) ? false : xp.red;
1129 >                                    if ((sr = sib.right) != null)
1130 >                                        sr.red = false;
1131 >                                }
1132 >                                if (xp != null) {
1133 >                                    xp.red = false;
1134 >                                    rotateLeft(xp);
1135 >                                }
1136 >                                x = root;
1137 >                            }
1138 >                        }
1139 >                    }
1140 >                    else { // symmetric
1141 >                        TreeNode sib = xpl;
1142 >                        if (sib != null && sib.red) {
1143 >                            sib.red = false;
1144 >                            xp.red = true;
1145 >                            rotateRight(xp);
1146 >                            sib = (xp = x.parent) == null ? null : xp.left;
1147 >                        }
1148 >                        if (sib == null)
1149 >                            x = xp;
1150 >                        else {
1151 >                            TreeNode sl = sib.left, sr = sib.right;
1152 >                            if ((sl == null || !sl.red) &&
1153 >                                (sr == null || !sr.red)) {
1154 >                                sib.red = true;
1155 >                                x = xp;
1156 >                            }
1157 >                            else {
1158 >                                if (sl == null || !sl.red) {
1159 >                                    if (sr != null)
1160 >                                        sr.red = false;
1161 >                                    sib.red = true;
1162 >                                    rotateLeft(sib);
1163 >                                    sib = (xp = x.parent) == null ? null : xp.left;
1164 >                                }
1165 >                                if (sib != null) {
1166 >                                    sib.red = (xp == null) ? false : xp.red;
1167 >                                    if ((sl = sib.left) != null)
1168 >                                        sl.red = false;
1169 >                                }
1170 >                                if (xp != null) {
1171 >                                    xp.red = false;
1172 >                                    rotateRight(xp);
1173 >                                }
1174 >                                x = root;
1175                              }
1176                          }
1177                      }
1178                  }
1179 <                if (validated) {
1180 <                    if (checkSize && tab.length < MAXIMUM_CAPACITY &&
1181 <                        resizing == 0 && counter.sum() >= threshold)
1182 <                        grow(0);
1183 <                    break;
1179 >            }
1180 >            if (p == replacement && (pp = p.parent) != null) {
1181 >                if (p == pp.left) // detach pointers
1182 >                    pp.left = null;
1183 >                else if (p == pp.right)
1184 >                    pp.right = null;
1185 >                p.parent = null;
1186 >            }
1187 >        }
1188 >    }
1189 >
1190 >    /* ---------------- Collision reduction methods -------------- */
1191 >
1192 >    /**
1193 >     * Spreads higher bits to lower, and also forces top 2 bits to 0.
1194 >     * Because the table uses power-of-two masking, sets of hashes
1195 >     * that vary only in bits above the current mask will always
1196 >     * collide. (Among known examples are sets of Float keys holding
1197 >     * consecutive whole numbers in small tables.)  To counter this,
1198 >     * we apply a transform that spreads the impact of higher bits
1199 >     * downward. There is a tradeoff between speed, utility, and
1200 >     * quality of bit-spreading. Because many common sets of hashes
1201 >     * are already reasonably distributed across bits (so don't benefit
1202 >     * from spreading), and because we use trees to handle large sets
1203 >     * of collisions in bins, we don't need excessively high quality.
1204 >     */
1205 >    private static final int spread(int h) {
1206 >        h ^= (h >>> 18) ^ (h >>> 12);
1207 >        return (h ^ (h >>> 10)) & HASH_BITS;
1208 >    }
1209 >
1210 >    /**
1211 >     * Replaces a list bin with a tree bin. Call only when locked.
1212 >     * Fails to replace if the given key is non-comparable or table
1213 >     * is, or needs, resizing.
1214 >     */
1215 >    private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1216 >        if ((key instanceof Comparable) &&
1217 >            (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1218 >            TreeBin t = new TreeBin();
1219 >            for (Node e = tabAt(tab, index); e != null; e = e.next)
1220 >                t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1221 >            setTabAt(tab, index, new Node(MOVED, t, null, null));
1222 >        }
1223 >    }
1224 >
1225 >    /* ---------------- Internal access and update methods -------------- */
1226 >
1227 >    /** Implementation for get and containsKey */
1228 >    private final Object internalGet(Object k) {
1229 >        int h = spread(k.hashCode());
1230 >        retry: for (Node[] tab = table; tab != null;) {
1231 >            Node e, p; Object ek, ev; int eh;      // locals to read fields once
1232 >            for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1233 >                if ((eh = e.hash) == MOVED) {
1234 >                    if ((ek = e.key) instanceof TreeBin)  // search TreeBin
1235 >                        return ((TreeBin)ek).getValue(h, k);
1236 >                    else {                        // restart with new table
1237 >                        tab = (Node[])ek;
1238 >                        continue retry;
1239 >                    }
1240                  }
1241 +                else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1242 +                         ((ek = e.key) == k || k.equals(ek)))
1243 +                    return ev;
1244              }
1245 +            break;
1246          }
1247 <        if (oldVal == null)
421 <            counter.increment();
422 <        return oldVal;
1247 >        return null;
1248      }
1249  
1250      /**
# Line 430 | Line 1255 | public class ConcurrentHashMapV8<K, V>
1255      private final Object internalReplace(Object k, Object v, Object cv) {
1256          int h = spread(k.hashCode());
1257          Object oldVal = null;
1258 <        Node e; int i;
1259 <        Node[] tab = table;
1260 <        while (tab != null &&
1261 <               (e = tabAt(tab, i = (tab.length - 1) & h)) != null) {
1262 <            if (e.hash < 0)
1263 <                tab = (Node[])e.key;
1264 <            else {
1258 >        for (Node[] tab = table;;) {
1259 >            Node f; int i, fh; Object fk;
1260 >            if (tab == null ||
1261 >                (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1262 >                break;
1263 >            else if ((fh = f.hash) == MOVED) {
1264 >                if ((fk = f.key) instanceof TreeBin) {
1265 >                    TreeBin t = (TreeBin)fk;
1266 >                    boolean validated = false;
1267 >                    boolean deleted = false;
1268 >                    t.acquire(0);
1269 >                    try {
1270 >                        if (tabAt(tab, i) == f) {
1271 >                            validated = true;
1272 >                            TreeNode p = t.getTreeNode(h, k, t.root);
1273 >                            if (p != null) {
1274 >                                Object pv = p.val;
1275 >                                if (cv == null || cv == pv || cv.equals(pv)) {
1276 >                                    oldVal = pv;
1277 >                                    if ((p.val = v) == null) {
1278 >                                        deleted = true;
1279 >                                        t.deleteTreeNode(p);
1280 >                                    }
1281 >                                }
1282 >                            }
1283 >                        }
1284 >                    } finally {
1285 >                        t.release(0);
1286 >                    }
1287 >                    if (validated) {
1288 >                        if (deleted)
1289 >                            counter.add(-1L);
1290 >                        break;
1291 >                    }
1292 >                }
1293 >                else
1294 >                    tab = (Node[])fk;
1295 >            }
1296 >            else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1297 >                break;                          // rules out possible existence
1298 >            else if ((fh & LOCKED) != 0) {
1299 >                checkForResize();               // try resizing if can't get lock
1300 >                f.tryAwaitLock(tab, i);
1301 >            }
1302 >            else if (f.casHash(fh, fh | LOCKED)) {
1303                  boolean validated = false;
1304                  boolean deleted = false;
1305 <                synchronized (e) {
1306 <                    if (tabAt(tab, i) == e) {
1305 >                try {
1306 >                    if (tabAt(tab, i) == f) {
1307                          validated = true;
1308 <                        Node pred = null;
446 <                        do {
1308 >                        for (Node e = f, pred = null;;) {
1309                              Object ek, ev;
1310 <                            if (e.hash == h &&
1311 <                                (ek = e.key) != null &&
1312 <                                (ev = e.val) != null &&
451 <                                (k == ek || k.equals(ek))) {
1310 >                            if ((e.hash & HASH_BITS) == h &&
1311 >                                ((ev = e.val) != null) &&
1312 >                                ((ek = e.key) == k || k.equals(ek))) {
1313                                  if (cv == null || cv == ev || cv.equals(ev)) {
1314                                      oldVal = ev;
1315                                      if ((e.val = v) == null) {
# Line 463 | Line 1324 | public class ConcurrentHashMapV8<K, V>
1324                                  break;
1325                              }
1326                              pred = e;
1327 <                        } while ((e = e.next) != null);
1327 >                            if ((e = e.next) == null)
1328 >                                break;
1329 >                        }
1330 >                    }
1331 >                } finally {
1332 >                    if (!f.casHash(fh | LOCKED, fh)) {
1333 >                        f.hash = fh;
1334 >                        synchronized (f) { f.notifyAll(); };
1335                      }
1336                  }
1337                  if (validated) {
1338                      if (deleted)
1339 <                        counter.decrement();
1339 >                        counter.add(-1L);
1340                      break;
1341                  }
1342              }
# Line 476 | Line 1344 | public class ConcurrentHashMapV8<K, V>
1344          return oldVal;
1345      }
1346  
1347 <    /** Implementation for computeIfAbsent and compute */
1348 <    @SuppressWarnings("unchecked")
1349 <    private final V internalCompute(K k,
1350 <                                    MappingFunction<? super K, ? extends V> f,
1351 <                                    boolean replace) {
1347 >    /*
1348 >     * Internal versions of the six insertion methods, each a
1349 >     * little more complicated than the last. All have
1350 >     * the same basic structure as the first (internalPut):
1351 >     *  1. If table uninitialized, create
1352 >     *  2. If bin empty, try to CAS new node
1353 >     *  3. If bin stale, use new table
1354 >     *  4. if bin converted to TreeBin, validate and relay to TreeBin methods
1355 >     *  5. Lock and validate; if valid, scan and add or update
1356 >     *
1357 >     * The others interweave other checks and/or alternative actions:
1358 >     *  * Plain put checks for and performs resize after insertion.
1359 >     *  * putIfAbsent prescans for mapping without lock (and fails to add
1360 >     *    if present), which also makes pre-emptive resize checks worthwhile.
1361 >     *  * computeIfAbsent extends form used in putIfAbsent with additional
1362 >     *    mechanics to deal with, calls, potential exceptions and null
1363 >     *    returns from function call.
1364 >     *  * compute uses the same function-call mechanics, but without
1365 >     *    the prescans
1366 >     *  * merge acts as putIfAbsent in the absent case, but invokes the
1367 >     *    update function if present
1368 >     *  * putAll attempts to pre-allocate enough table space
1369 >     *    and more lazily performs count updates and checks.
1370 >     *
1371 >     * Someday when details settle down a bit more, it might be worth
1372 >     * some factoring to reduce sprawl.
1373 >     */
1374 >
1375 >    /** Implementation for put */
1376 >    private final Object internalPut(Object k, Object v) {
1377          int h = spread(k.hashCode());
1378 <        V val = null;
1379 <        boolean added = false;
1380 <        Node[] tab = table;
488 <        for (;;) {
489 <            Node e; int i;
1378 >        int count = 0;
1379 >        for (Node[] tab = table;;) {
1380 >            int i; Node f; int fh; Object fk;
1381              if (tab == null)
1382 <                tab = grow(0);
1383 <            else if ((e = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1384 <                Node node = new Node(h, k, null, null);
1385 <                boolean validated = false;
1386 <                synchronized (node) {
1387 <                    if (casTabAt(tab, i, null, node)) {
1388 <                        validated = true;
1389 <                        try {
1390 <                            val = f.map(k);
1391 <                            if (val != null) {
1392 <                                node.val = val;
1393 <                                added = true;
1382 >                tab = initTable();
1383 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1384 >                if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1385 >                    break;                   // no lock when adding to empty bin
1386 >            }
1387 >            else if ((fh = f.hash) == MOVED) {
1388 >                if ((fk = f.key) instanceof TreeBin) {
1389 >                    TreeBin t = (TreeBin)fk;
1390 >                    Object oldVal = null;
1391 >                    t.acquire(0);
1392 >                    try {
1393 >                        if (tabAt(tab, i) == f) {
1394 >                            count = 2;
1395 >                            TreeNode p = t.putTreeNode(h, k, v);
1396 >                            if (p != null) {
1397 >                                oldVal = p.val;
1398 >                                p.val = v;
1399                              }
1400 <                        } finally {
1401 <                            if (!added)
1402 <                                setTabAt(tab, i, null);
1400 >                        }
1401 >                    } finally {
1402 >                        t.release(0);
1403 >                    }
1404 >                    if (count != 0) {
1405 >                        if (oldVal != null)
1406 >                            return oldVal;
1407 >                        break;
1408 >                    }
1409 >                }
1410 >                else
1411 >                    tab = (Node[])fk;
1412 >            }
1413 >            else if ((fh & LOCKED) != 0) {
1414 >                checkForResize();
1415 >                f.tryAwaitLock(tab, i);
1416 >            }
1417 >            else if (f.casHash(fh, fh | LOCKED)) {
1418 >                Object oldVal = null;
1419 >                try {                        // needed in case equals() throws
1420 >                    if (tabAt(tab, i) == f) {
1421 >                        count = 1;
1422 >                        for (Node e = f;; ++count) {
1423 >                            Object ek, ev;
1424 >                            if ((e.hash & HASH_BITS) == h &&
1425 >                                (ev = e.val) != null &&
1426 >                                ((ek = e.key) == k || k.equals(ek))) {
1427 >                                oldVal = ev;
1428 >                                e.val = v;
1429 >                                break;
1430 >                            }
1431 >                            Node last = e;
1432 >                            if ((e = e.next) == null) {
1433 >                                last.next = new Node(h, k, v, null);
1434 >                                if (count >= TREE_THRESHOLD)
1435 >                                    replaceWithTreeBin(tab, i, k);
1436 >                                break;
1437 >                            }
1438 >                        }
1439 >                    }
1440 >                } finally {                  // unlock and signal if needed
1441 >                    if (!f.casHash(fh | LOCKED, fh)) {
1442 >                        f.hash = fh;
1443 >                        synchronized (f) { f.notifyAll(); };
1444 >                    }
1445 >                }
1446 >                if (count != 0) {
1447 >                    if (oldVal != null)
1448 >                        return oldVal;
1449 >                    if (tab.length <= 64)
1450 >                        count = 2;
1451 >                    break;
1452 >                }
1453 >            }
1454 >        }
1455 >        counter.add(1L);
1456 >        if (count > 1)
1457 >            checkForResize();
1458 >        return null;
1459 >    }
1460 >
1461 >    /** Implementation for putIfAbsent */
1462 >    private final Object internalPutIfAbsent(Object k, Object v) {
1463 >        int h = spread(k.hashCode());
1464 >        int count = 0;
1465 >        for (Node[] tab = table;;) {
1466 >            int i; Node f; int fh; Object fk, fv;
1467 >            if (tab == null)
1468 >                tab = initTable();
1469 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1470 >                if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1471 >                    break;
1472 >            }
1473 >            else if ((fh = f.hash) == MOVED) {
1474 >                if ((fk = f.key) instanceof TreeBin) {
1475 >                    TreeBin t = (TreeBin)fk;
1476 >                    Object oldVal = null;
1477 >                    t.acquire(0);
1478 >                    try {
1479 >                        if (tabAt(tab, i) == f) {
1480 >                            count = 2;
1481 >                            TreeNode p = t.putTreeNode(h, k, v);
1482 >                            if (p != null)
1483 >                                oldVal = p.val;
1484 >                        }
1485 >                    } finally {
1486 >                        t.release(0);
1487 >                    }
1488 >                    if (count != 0) {
1489 >                        if (oldVal != null)
1490 >                            return oldVal;
1491 >                        break;
1492 >                    }
1493 >                }
1494 >                else
1495 >                    tab = (Node[])fk;
1496 >            }
1497 >            else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1498 >                     ((fk = f.key) == k || k.equals(fk)))
1499 >                return fv;
1500 >            else {
1501 >                Node g = f.next;
1502 >                if (g != null) { // at least 2 nodes -- search and maybe resize
1503 >                    for (Node e = g;;) {
1504 >                        Object ek, ev;
1505 >                        if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1506 >                            ((ek = e.key) == k || k.equals(ek)))
1507 >                            return ev;
1508 >                        if ((e = e.next) == null) {
1509 >                            checkForResize();
1510 >                            break;
1511 >                        }
1512 >                    }
1513 >                }
1514 >                if (((fh = f.hash) & LOCKED) != 0) {
1515 >                    checkForResize();
1516 >                    f.tryAwaitLock(tab, i);
1517 >                }
1518 >                else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1519 >                    Object oldVal = null;
1520 >                    try {
1521 >                        if (tabAt(tab, i) == f) {
1522 >                            count = 1;
1523 >                            for (Node e = f;; ++count) {
1524 >                                Object ek, ev;
1525 >                                if ((e.hash & HASH_BITS) == h &&
1526 >                                    (ev = e.val) != null &&
1527 >                                    ((ek = e.key) == k || k.equals(ek))) {
1528 >                                    oldVal = ev;
1529 >                                    break;
1530 >                                }
1531 >                                Node last = e;
1532 >                                if ((e = e.next) == null) {
1533 >                                    last.next = new Node(h, k, v, null);
1534 >                                    if (count >= TREE_THRESHOLD)
1535 >                                        replaceWithTreeBin(tab, i, k);
1536 >                                    break;
1537 >                                }
1538 >                            }
1539 >                        }
1540 >                    } finally {
1541 >                        if (!f.casHash(fh | LOCKED, fh)) {
1542 >                            f.hash = fh;
1543 >                            synchronized (f) { f.notifyAll(); };
1544                          }
1545                      }
1546 +                    if (count != 0) {
1547 +                        if (oldVal != null)
1548 +                            return oldVal;
1549 +                        if (tab.length <= 64)
1550 +                            count = 2;
1551 +                        break;
1552 +                    }
1553                  }
1554 <                if (validated)
1554 >            }
1555 >        }
1556 >        counter.add(1L);
1557 >        if (count > 1)
1558 >            checkForResize();
1559 >        return null;
1560 >    }
1561 >
1562 >    /** Implementation for computeIfAbsent */
1563 >    private final Object internalComputeIfAbsent(K k,
1564 >                                                 Fun<? super K, ?> mf) {
1565 >        int h = spread(k.hashCode());
1566 >        Object val = null;
1567 >        int count = 0;
1568 >        for (Node[] tab = table;;) {
1569 >            Node f; int i, fh; Object fk, fv;
1570 >            if (tab == null)
1571 >                tab = initTable();
1572 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1573 >                Node node = new Node(fh = h | LOCKED, k, null, null);
1574 >                if (casTabAt(tab, i, null, node)) {
1575 >                    count = 1;
1576 >                    try {
1577 >                        if ((val = mf.apply(k)) != null)
1578 >                            node.val = val;
1579 >                    } finally {
1580 >                        if (val == null)
1581 >                            setTabAt(tab, i, null);
1582 >                        if (!node.casHash(fh, h)) {
1583 >                            node.hash = h;
1584 >                            synchronized (node) { node.notifyAll(); };
1585 >                        }
1586 >                    }
1587 >                }
1588 >                if (count != 0)
1589                      break;
1590              }
1591 <            else if (e.hash < 0)
1592 <                tab = (Node[])e.key;
1593 <            else if (Thread.holdsLock(e))
1594 <                throw new IllegalStateException("Recursive map computation");
1591 >            else if ((fh = f.hash) == MOVED) {
1592 >                if ((fk = f.key) instanceof TreeBin) {
1593 >                    TreeBin t = (TreeBin)fk;
1594 >                    boolean added = false;
1595 >                    t.acquire(0);
1596 >                    try {
1597 >                        if (tabAt(tab, i) == f) {
1598 >                            count = 1;
1599 >                            TreeNode p = t.getTreeNode(h, k, t.root);
1600 >                            if (p != null)
1601 >                                val = p.val;
1602 >                            else if ((val = mf.apply(k)) != null) {
1603 >                                added = true;
1604 >                                count = 2;
1605 >                                t.putTreeNode(h, k, val);
1606 >                            }
1607 >                        }
1608 >                    } finally {
1609 >                        t.release(0);
1610 >                    }
1611 >                    if (count != 0) {
1612 >                        if (!added)
1613 >                            return val;
1614 >                        break;
1615 >                    }
1616 >                }
1617 >                else
1618 >                    tab = (Node[])fk;
1619 >            }
1620 >            else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1621 >                     ((fk = f.key) == k || k.equals(fk)))
1622 >                return fv;
1623              else {
1624 <                boolean validated = false;
1625 <                boolean checkSize = false;
1626 <                synchronized (e) {
1627 <                    if (tabAt(tab, i) == e) {
1628 <                        validated = true;
1629 <                        for (Node first = e;;) {
1630 <                            Object ek, ev, fv;
1631 <                            if (e.hash == h &&
1632 <                                (ek = e.key) != null &&
1624 >                Node g = f.next;
1625 >                if (g != null) {
1626 >                    for (Node e = g;;) {
1627 >                        Object ek, ev;
1628 >                        if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1629 >                            ((ek = e.key) == k || k.equals(ek)))
1630 >                            return ev;
1631 >                        if ((e = e.next) == null) {
1632 >                            checkForResize();
1633 >                            break;
1634 >                        }
1635 >                    }
1636 >                }
1637 >                if (((fh = f.hash) & LOCKED) != 0) {
1638 >                    checkForResize();
1639 >                    f.tryAwaitLock(tab, i);
1640 >                }
1641 >                else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1642 >                    boolean added = false;
1643 >                    try {
1644 >                        if (tabAt(tab, i) == f) {
1645 >                            count = 1;
1646 >                            for (Node e = f;; ++count) {
1647 >                                Object ek, ev;
1648 >                                if ((e.hash & HASH_BITS) == h &&
1649 >                                    (ev = e.val) != null &&
1650 >                                    ((ek = e.key) == k || k.equals(ek))) {
1651 >                                    val = ev;
1652 >                                    break;
1653 >                                }
1654 >                                Node last = e;
1655 >                                if ((e = e.next) == null) {
1656 >                                    if ((val = mf.apply(k)) != null) {
1657 >                                        added = true;
1658 >                                        last.next = new Node(h, k, val, null);
1659 >                                        if (count >= TREE_THRESHOLD)
1660 >                                            replaceWithTreeBin(tab, i, k);
1661 >                                    }
1662 >                                    break;
1663 >                                }
1664 >                            }
1665 >                        }
1666 >                    } finally {
1667 >                        if (!f.casHash(fh | LOCKED, fh)) {
1668 >                            f.hash = fh;
1669 >                            synchronized (f) { f.notifyAll(); };
1670 >                        }
1671 >                    }
1672 >                    if (count != 0) {
1673 >                        if (!added)
1674 >                            return val;
1675 >                        if (tab.length <= 64)
1676 >                            count = 2;
1677 >                        break;
1678 >                    }
1679 >                }
1680 >            }
1681 >        }
1682 >        if (val != null) {
1683 >            counter.add(1L);
1684 >            if (count > 1)
1685 >                checkForResize();
1686 >        }
1687 >        return val;
1688 >    }
1689 >
1690 >    /** Implementation for compute */
1691 >    @SuppressWarnings("unchecked") private final Object internalCompute
1692 >        (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1693 >        int h = spread(k.hashCode());
1694 >        Object val = null;
1695 >        int delta = 0;
1696 >        int count = 0;
1697 >        for (Node[] tab = table;;) {
1698 >            Node f; int i, fh; Object fk;
1699 >            if (tab == null)
1700 >                tab = initTable();
1701 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1702 >                if (onlyIfPresent)
1703 >                    break;
1704 >                Node node = new Node(fh = h | LOCKED, k, null, null);
1705 >                if (casTabAt(tab, i, null, node)) {
1706 >                    try {
1707 >                        count = 1;
1708 >                        if ((val = mf.apply(k, null)) != null) {
1709 >                            node.val = val;
1710 >                            delta = 1;
1711 >                        }
1712 >                    } finally {
1713 >                        if (delta == 0)
1714 >                            setTabAt(tab, i, null);
1715 >                        if (!node.casHash(fh, h)) {
1716 >                            node.hash = h;
1717 >                            synchronized (node) { node.notifyAll(); };
1718 >                        }
1719 >                    }
1720 >                }
1721 >                if (count != 0)
1722 >                    break;
1723 >            }
1724 >            else if ((fh = f.hash) == MOVED) {
1725 >                if ((fk = f.key) instanceof TreeBin) {
1726 >                    TreeBin t = (TreeBin)fk;
1727 >                    t.acquire(0);
1728 >                    try {
1729 >                        if (tabAt(tab, i) == f) {
1730 >                            count = 1;
1731 >                            TreeNode p = t.getTreeNode(h, k, t.root);
1732 >                            Object pv = (p == null) ? null : p.val;
1733 >                            if ((val = mf.apply(k, (V)pv)) != null) {
1734 >                                if (p != null)
1735 >                                    p.val = val;
1736 >                                else {
1737 >                                    count = 2;
1738 >                                    delta = 1;
1739 >                                    t.putTreeNode(h, k, val);
1740 >                                }
1741 >                            }
1742 >                            else if (p != null) {
1743 >                                delta = -1;
1744 >                                t.deleteTreeNode(p);
1745 >                            }
1746 >                        }
1747 >                    } finally {
1748 >                        t.release(0);
1749 >                    }
1750 >                    if (count != 0)
1751 >                        break;
1752 >                }
1753 >                else
1754 >                    tab = (Node[])fk;
1755 >            }
1756 >            else if ((fh & LOCKED) != 0) {
1757 >                checkForResize();
1758 >                f.tryAwaitLock(tab, i);
1759 >            }
1760 >            else if (f.casHash(fh, fh | LOCKED)) {
1761 >                try {
1762 >                    if (tabAt(tab, i) == f) {
1763 >                        count = 1;
1764 >                        for (Node e = f, pred = null;; ++count) {
1765 >                            Object ek, ev;
1766 >                            if ((e.hash & HASH_BITS) == h &&
1767                                  (ev = e.val) != null &&
1768 <                                (k == ek || k.equals(ek))) {
1769 <                                if (replace && (fv = f.map(k)) != null)
1770 <                                    ev = e.val = fv;
1771 <                                val = (V)ev;
1768 >                                ((ek = e.key) == k || k.equals(ek))) {
1769 >                                val = mf.apply(k, (V)ev);
1770 >                                if (val != null)
1771 >                                    e.val = val;
1772 >                                else {
1773 >                                    delta = -1;
1774 >                                    Node en = e.next;
1775 >                                    if (pred != null)
1776 >                                        pred.next = en;
1777 >                                    else
1778 >                                        setTabAt(tab, i, en);
1779 >                                }
1780                                  break;
1781                              }
1782 <                            Node last = e;
1782 >                            pred = e;
1783                              if ((e = e.next) == null) {
1784 <                                if ((val = f.map(k)) != null) {
1785 <                                    last.next = new Node(h, k, val, null);
1786 <                                    added = true;
1787 <                                    if (last != first || tab.length <= 64)
1788 <                                        checkSize = true;
1784 >                                if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1785 >                                    pred.next = new Node(h, k, val, null);
1786 >                                    delta = 1;
1787 >                                    if (count >= TREE_THRESHOLD)
1788 >                                        replaceWithTreeBin(tab, i, k);
1789                                  }
1790                                  break;
1791                              }
1792                          }
1793                      }
1794 +                } finally {
1795 +                    if (!f.casHash(fh | LOCKED, fh)) {
1796 +                        f.hash = fh;
1797 +                        synchronized (f) { f.notifyAll(); };
1798 +                    }
1799                  }
1800 <                if (validated) {
1801 <                    if (checkSize && tab.length < MAXIMUM_CAPACITY &&
1802 <                        resizing == 0 && counter.sum() >= threshold)
550 <                        grow(0);
1800 >                if (count != 0) {
1801 >                    if (tab.length <= 64)
1802 >                        count = 2;
1803                      break;
1804                  }
1805              }
1806          }
1807 <        if (added)
1808 <            counter.increment();
1807 >        if (delta != 0) {
1808 >            counter.add((long)delta);
1809 >            if (count > 1)
1810 >                checkForResize();
1811 >        }
1812          return val;
1813      }
1814  
1815 <    /*
1816 <     * Reclassifies nodes in each bin to new table.  Because we are
1817 <     * using power-of-two expansion, the elements from each bin must
1818 <     * either stay at same index, or move with a power of two
1819 <     * offset. We eliminate unnecessary node creation by catching
1820 <     * cases where old nodes can be reused because their next fields
1821 <     * won't change.  Statistically, at the default threshold, only
1822 <     * about one-sixth of them need cloning when a table doubles. The
1823 <     * nodes they replace will be garbage collectable as soon as they
1824 <     * are no longer referenced by any reader thread that may be in
1825 <     * the midst of concurrently traversing table.
1826 <     *
1827 <     * Transfers are done from the bottom up to preserve iterator
1828 <     * traversability. On each step, the old bin is locked,
1829 <     * moved/copied, and then replaced with a forwarding node.
1830 <     */
576 <    private static final void transfer(Node[] tab, Node[] nextTab) {
577 <        int n = tab.length;
578 <        int mask = nextTab.length - 1;
579 <        Node fwd = new Node(MOVED, nextTab, null, null);
580 <        for (int i = n - 1; i >= 0; --i) {
581 <            for (Node e;;) {
582 <                if ((e = tabAt(tab, i)) == null) {
583 <                    if (casTabAt(tab, i, e, fwd))
584 <                        break;
1815 >    /** Implementation for merge */
1816 >    @SuppressWarnings("unchecked") private final Object internalMerge
1817 >        (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1818 >        int h = spread(k.hashCode());
1819 >        Object val = null;
1820 >        int delta = 0;
1821 >        int count = 0;
1822 >        for (Node[] tab = table;;) {
1823 >            int i; Node f; int fh; Object fk, fv;
1824 >            if (tab == null)
1825 >                tab = initTable();
1826 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1827 >                if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1828 >                    delta = 1;
1829 >                    val = v;
1830 >                    break;
1831                  }
1832 <                else {
1833 <                    int idx = e.hash & mask;
1834 <                    boolean validated = false;
1835 <                    synchronized (e) {
1836 <                        if (tabAt(tab, i) == e) {
1837 <                            validated = true;
1838 <                            Node lastRun = e;
1839 <                            for (Node p = e.next; p != null; p = p.next) {
1840 <                                int j = p.hash & mask;
1841 <                                if (j != idx) {
1842 <                                    idx = j;
1843 <                                    lastRun = p;
1832 >            }
1833 >            else if ((fh = f.hash) == MOVED) {
1834 >                if ((fk = f.key) instanceof TreeBin) {
1835 >                    TreeBin t = (TreeBin)fk;
1836 >                    t.acquire(0);
1837 >                    try {
1838 >                        if (tabAt(tab, i) == f) {
1839 >                            count = 1;
1840 >                            TreeNode p = t.getTreeNode(h, k, t.root);
1841 >                            val = (p == null) ? v : mf.apply((V)p.val, v);
1842 >                            if (val != null) {
1843 >                                if (p != null)
1844 >                                    p.val = val;
1845 >                                else {
1846 >                                    count = 2;
1847 >                                    delta = 1;
1848 >                                    t.putTreeNode(h, k, val);
1849                                  }
1850                              }
1851 <                            relaxedSetTabAt(nextTab, idx, lastRun);
1852 <                            for (Node p = e; p != lastRun; p = p.next) {
1853 <                                int h = p.hash;
603 <                                int j = h & mask;
604 <                                Node r = relaxedTabAt(nextTab, j);
605 <                                relaxedSetTabAt(nextTab, j,
606 <                                                new Node(h, p.key, p.val, r));
1851 >                            else if (p != null) {
1852 >                                delta = -1;
1853 >                                t.deleteTreeNode(p);
1854                              }
608                            setTabAt(tab, i, fwd);
1855                          }
1856 +                    } finally {
1857 +                        t.release(0);
1858                      }
1859 <                    if (validated)
1859 >                    if (count != 0)
1860                          break;
1861                  }
1862 +                else
1863 +                    tab = (Node[])fk;
1864 +            }
1865 +            else if ((fh & LOCKED) != 0) {
1866 +                checkForResize();
1867 +                f.tryAwaitLock(tab, i);
1868 +            }
1869 +            else if (f.casHash(fh, fh | LOCKED)) {
1870 +                try {
1871 +                    if (tabAt(tab, i) == f) {
1872 +                        count = 1;
1873 +                        for (Node e = f, pred = null;; ++count) {
1874 +                            Object ek, ev;
1875 +                            if ((e.hash & HASH_BITS) == h &&
1876 +                                (ev = e.val) != null &&
1877 +                                ((ek = e.key) == k || k.equals(ek))) {
1878 +                                val = mf.apply(v, (V)ev);
1879 +                                if (val != null)
1880 +                                    e.val = val;
1881 +                                else {
1882 +                                    delta = -1;
1883 +                                    Node en = e.next;
1884 +                                    if (pred != null)
1885 +                                        pred.next = en;
1886 +                                    else
1887 +                                        setTabAt(tab, i, en);
1888 +                                }
1889 +                                break;
1890 +                            }
1891 +                            pred = e;
1892 +                            if ((e = e.next) == null) {
1893 +                                val = v;
1894 +                                pred.next = new Node(h, k, val, null);
1895 +                                delta = 1;
1896 +                                if (count >= TREE_THRESHOLD)
1897 +                                    replaceWithTreeBin(tab, i, k);
1898 +                                break;
1899 +                            }
1900 +                        }
1901 +                    }
1902 +                } finally {
1903 +                    if (!f.casHash(fh | LOCKED, fh)) {
1904 +                        f.hash = fh;
1905 +                        synchronized (f) { f.notifyAll(); };
1906 +                    }
1907 +                }
1908 +                if (count != 0) {
1909 +                    if (tab.length <= 64)
1910 +                        count = 2;
1911 +                    break;
1912 +                }
1913              }
1914          }
1915 +        if (delta != 0) {
1916 +            counter.add((long)delta);
1917 +            if (count > 1)
1918 +                checkForResize();
1919 +        }
1920 +        return val;
1921      }
1922  
1923 +    /** Implementation for putAll */
1924 +    private final void internalPutAll(Map<?, ?> m) {
1925 +        tryPresize(m.size());
1926 +        long delta = 0L;     // number of uncommitted additions
1927 +        boolean npe = false; // to throw exception on exit for nulls
1928 +        try {                // to clean up counts on other exceptions
1929 +            for (Map.Entry<?, ?> entry : m.entrySet()) {
1930 +                Object k, v;
1931 +                if (entry == null || (k = entry.getKey()) == null ||
1932 +                    (v = entry.getValue()) == null) {
1933 +                    npe = true;
1934 +                    break;
1935 +                }
1936 +                int h = spread(k.hashCode());
1937 +                for (Node[] tab = table;;) {
1938 +                    int i; Node f; int fh; Object fk;
1939 +                    if (tab == null)
1940 +                        tab = initTable();
1941 +                    else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1942 +                        if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1943 +                            ++delta;
1944 +                            break;
1945 +                        }
1946 +                    }
1947 +                    else if ((fh = f.hash) == MOVED) {
1948 +                        if ((fk = f.key) instanceof TreeBin) {
1949 +                            TreeBin t = (TreeBin)fk;
1950 +                            boolean validated = false;
1951 +                            t.acquire(0);
1952 +                            try {
1953 +                                if (tabAt(tab, i) == f) {
1954 +                                    validated = true;
1955 +                                    TreeNode p = t.getTreeNode(h, k, t.root);
1956 +                                    if (p != null)
1957 +                                        p.val = v;
1958 +                                    else {
1959 +                                        t.putTreeNode(h, k, v);
1960 +                                        ++delta;
1961 +                                    }
1962 +                                }
1963 +                            } finally {
1964 +                                t.release(0);
1965 +                            }
1966 +                            if (validated)
1967 +                                break;
1968 +                        }
1969 +                        else
1970 +                            tab = (Node[])fk;
1971 +                    }
1972 +                    else if ((fh & LOCKED) != 0) {
1973 +                        counter.add(delta);
1974 +                        delta = 0L;
1975 +                        checkForResize();
1976 +                        f.tryAwaitLock(tab, i);
1977 +                    }
1978 +                    else if (f.casHash(fh, fh | LOCKED)) {
1979 +                        int count = 0;
1980 +                        try {
1981 +                            if (tabAt(tab, i) == f) {
1982 +                                count = 1;
1983 +                                for (Node e = f;; ++count) {
1984 +                                    Object ek, ev;
1985 +                                    if ((e.hash & HASH_BITS) == h &&
1986 +                                        (ev = e.val) != null &&
1987 +                                        ((ek = e.key) == k || k.equals(ek))) {
1988 +                                        e.val = v;
1989 +                                        break;
1990 +                                    }
1991 +                                    Node last = e;
1992 +                                    if ((e = e.next) == null) {
1993 +                                        ++delta;
1994 +                                        last.next = new Node(h, k, v, null);
1995 +                                        if (count >= TREE_THRESHOLD)
1996 +                                            replaceWithTreeBin(tab, i, k);
1997 +                                        break;
1998 +                                    }
1999 +                                }
2000 +                            }
2001 +                        } finally {
2002 +                            if (!f.casHash(fh | LOCKED, fh)) {
2003 +                                f.hash = fh;
2004 +                                synchronized (f) { f.notifyAll(); };
2005 +                            }
2006 +                        }
2007 +                        if (count != 0) {
2008 +                            if (count > 1) {
2009 +                                counter.add(delta);
2010 +                                delta = 0L;
2011 +                                checkForResize();
2012 +                            }
2013 +                            break;
2014 +                        }
2015 +                    }
2016 +                }
2017 +            }
2018 +        } finally {
2019 +            if (delta != 0)
2020 +                counter.add(delta);
2021 +        }
2022 +        if (npe)
2023 +            throw new NullPointerException();
2024 +    }
2025 +
2026 +    /* ---------------- Table Initialization and Resizing -------------- */
2027 +
2028      /**
2029 <     * If not already resizing, initializes or creates next table and
2030 <     * transfers bins. Rechecks occupancy after a transfer to see if
621 <     * another resize is already needed because resizings are lagging
622 <     * additions.
623 <     *
624 <     * @param sizeHint overridden capacity target (nonzero only from putAll)
625 <     * @return current table
2029 >     * Returns a power of two table size for the given desired capacity.
2030 >     * See Hackers Delight, sec 3.2
2031       */
2032 <    private final Node[] grow(int sizeHint) {
2033 <        if (resizing == 0 &&
2034 <            UNSAFE.compareAndSwapInt(this, resizingOffset, 0, 1)) {
2035 <            try {
2036 <                for (;;) {
2037 <                    int cap, n;
2038 <                    Node[] tab = table;
2039 <                    if (tab == null) {
2040 <                        int c = initCap;
2041 <                        if (c < sizeHint)
2042 <                            c = sizeHint;
2043 <                        if (c == DEFAULT_CAPACITY)
2044 <                            cap = c;
2045 <                        else if (c >= MAXIMUM_CAPACITY)
2046 <                            cap = MAXIMUM_CAPACITY;
2047 <                        else {
2048 <                            cap = MINIMUM_CAPACITY;
2049 <                            while (cap < c)
2050 <                                cap <<= 1;
2051 <                        }
2032 >    private static final int tableSizeFor(int c) {
2033 >        int n = c - 1;
2034 >        n |= n >>> 1;
2035 >        n |= n >>> 2;
2036 >        n |= n >>> 4;
2037 >        n |= n >>> 8;
2038 >        n |= n >>> 16;
2039 >        return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
2040 >    }
2041 >
2042 >    /**
2043 >     * Initializes table, using the size recorded in sizeCtl.
2044 >     */
2045 >    private final Node[] initTable() {
2046 >        Node[] tab; int sc;
2047 >        while ((tab = table) == null) {
2048 >            if ((sc = sizeCtl) < 0)
2049 >                Thread.yield(); // lost initialization race; just spin
2050 >            else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2051 >                try {
2052 >                    if ((tab = table) == null) {
2053 >                        int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2054 >                        tab = table = new Node[n];
2055 >                        sc = n - (n >>> 2);
2056                      }
2057 <                    else if ((n = tab.length) < MAXIMUM_CAPACITY &&
2058 <                             (sizeHint <= 0 || n < sizeHint))
650 <                        cap = n << 1;
651 <                    else
652 <                        break;
653 <                    threshold = (int)(cap * loadFactor) - THRESHOLD_OFFSET;
654 <                    Node[] nextTab = new Node[cap];
655 <                    if (tab != null)
656 <                        transfer(tab, nextTab);
657 <                    table = nextTab;
658 <                    if (tab == null || cap >= MAXIMUM_CAPACITY ||
659 <                        ((sizeHint > 0) ? cap >= sizeHint :
660 <                         counter.sum() < threshold))
661 <                        break;
2057 >                } finally {
2058 >                    sizeCtl = sc;
2059                  }
2060 <            } finally {
664 <                resizing = 0;
2060 >                break;
2061              }
2062          }
2063 <        else if (table == null)
668 <            Thread.yield(); // lost initialization race; just spin
669 <        return table;
2063 >        return tab;
2064      }
2065  
2066      /**
2067 <     * Implementation for putAll and constructor with Map
2068 <     * argument. Tries to first override initial capacity or grow
2069 <     * based on map size to pre-allocate table space.
2067 >     * If table is too small and not already resizing, creates next
2068 >     * table and transfers bins.  Rechecks occupancy after a transfer
2069 >     * to see if another resize is already needed because resizings
2070 >     * are lagging additions.
2071       */
2072 <    private final void internalPutAll(Map<? extends K, ? extends V> m) {
2073 <        int s = m.size();
2074 <        grow((s >= (MAXIMUM_CAPACITY >>> 1)) ? s : s + (s >>> 1));
2075 <        for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
2076 <            Object k = e.getKey();
2077 <            Object v = e.getValue();
2078 <            if (k == null || v == null)
2079 <                throw new NullPointerException();
2080 <            internalPut(k, v, true);
2072 >    private final void checkForResize() {
2073 >        Node[] tab; int n, sc;
2074 >        while ((tab = table) != null &&
2075 >               (n = tab.length) < MAXIMUM_CAPACITY &&
2076 >               (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
2077 >               UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2078 >            try {
2079 >                if (tab == table) {
2080 >                    table = rebuild(tab);
2081 >                    sc = (n << 1) - (n >>> 1);
2082 >                }
2083 >            } finally {
2084 >                sizeCtl = sc;
2085 >            }
2086          }
2087      }
2088  
2089      /**
2090 <     * Implementation for clear. Steps through each bin, removing all nodes.
2090 >     * Tries to presize table to accommodate the given number of elements.
2091 >     *
2092 >     * @param size number of elements (doesn't need to be perfectly accurate)
2093       */
2094 <    private final void internalClear() {
2095 <        long deletions = 0L;
2096 <        int i = 0;
2097 <        Node[] tab = table;
2098 <        while (tab != null && i < tab.length) {
2099 <            Node e = tabAt(tab, i);
2100 <            if (e == null)
2101 <                ++i;
2102 <            else if (e.hash < 0)
2103 <                tab = (Node[])e.key;
2104 <            else {
2094 >    private final void tryPresize(int size) {
2095 >        int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2096 >            tableSizeFor(size + (size >>> 1) + 1);
2097 >        int sc;
2098 >        while ((sc = sizeCtl) >= 0) {
2099 >            Node[] tab = table; int n;
2100 >            if (tab == null || (n = tab.length) == 0) {
2101 >                n = (sc > c) ? sc : c;
2102 >                if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2103 >                    try {
2104 >                        if (table == tab) {
2105 >                            table = new Node[n];
2106 >                            sc = n - (n >>> 2);
2107 >                        }
2108 >                    } finally {
2109 >                        sizeCtl = sc;
2110 >                    }
2111 >                }
2112 >            }
2113 >            else if (c <= sc || n >= MAXIMUM_CAPACITY)
2114 >                break;
2115 >            else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2116 >                try {
2117 >                    if (table == tab) {
2118 >                        table = rebuild(tab);
2119 >                        sc = (n << 1) - (n >>> 1);
2120 >                    }
2121 >                } finally {
2122 >                    sizeCtl = sc;
2123 >                }
2124 >            }
2125 >        }
2126 >    }
2127 >
2128 >    /*
2129 >     * Moves and/or copies the nodes in each bin to new table. See
2130 >     * above for explanation.
2131 >     *
2132 >     * @return the new table
2133 >     */
2134 >    private static final Node[] rebuild(Node[] tab) {
2135 >        int n = tab.length;
2136 >        Node[] nextTab = new Node[n << 1];
2137 >        Node fwd = new Node(MOVED, nextTab, null, null);
2138 >        int[] buffer = null;       // holds bins to revisit; null until needed
2139 >        Node rev = null;           // reverse forwarder; null until needed
2140 >        int nbuffered = 0;         // the number of bins in buffer list
2141 >        int bufferIndex = 0;       // buffer index of current buffered bin
2142 >        int bin = n - 1;           // current non-buffered bin or -1 if none
2143 >
2144 >        for (int i = bin;;) {      // start upwards sweep
2145 >            int fh; Node f;
2146 >            if ((f = tabAt(tab, i)) == null) {
2147 >                if (bin >= 0) {    // Unbuffered; no lock needed (or available)
2148 >                    if (!casTabAt(tab, i, f, fwd))
2149 >                        continue;
2150 >                }
2151 >                else {             // transiently use a locked forwarding node
2152 >                    Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2153 >                    if (!casTabAt(tab, i, f, g))
2154 >                        continue;
2155 >                    setTabAt(nextTab, i, null);
2156 >                    setTabAt(nextTab, i + n, null);
2157 >                    setTabAt(tab, i, fwd);
2158 >                    if (!g.casHash(MOVED|LOCKED, MOVED)) {
2159 >                        g.hash = MOVED;
2160 >                        synchronized (g) { g.notifyAll(); }
2161 >                    }
2162 >                }
2163 >            }
2164 >            else if ((fh = f.hash) == MOVED) {
2165 >                Object fk = f.key;
2166 >                if (fk instanceof TreeBin) {
2167 >                    TreeBin t = (TreeBin)fk;
2168 >                    boolean validated = false;
2169 >                    t.acquire(0);
2170 >                    try {
2171 >                        if (tabAt(tab, i) == f) {
2172 >                            validated = true;
2173 >                            splitTreeBin(nextTab, i, t);
2174 >                            setTabAt(tab, i, fwd);
2175 >                        }
2176 >                    } finally {
2177 >                        t.release(0);
2178 >                    }
2179 >                    if (!validated)
2180 >                        continue;
2181 >                }
2182 >            }
2183 >            else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2184                  boolean validated = false;
2185 <                synchronized (e) {
2186 <                    if (tabAt(tab, i) == e) {
2185 >                try {              // split to lo and hi lists; copying as needed
2186 >                    if (tabAt(tab, i) == f) {
2187                          validated = true;
2188 <                        do {
2189 <                            if (e.val != null) {
709 <                                e.val = null;
710 <                                ++deletions;
711 <                            }
712 <                        } while ((e = e.next) != null);
713 <                        setTabAt(tab, i, null);
2188 >                        splitBin(nextTab, i, f);
2189 >                        setTabAt(tab, i, fwd);
2190                      }
2191 <                }
2192 <                if (validated) {
2193 <                    ++i;
2194 <                    if (deletions > THRESHOLD_OFFSET) { // bound lag in counts
719 <                        counter.add(-deletions);
720 <                        deletions = 0L;
2191 >                } finally {
2192 >                    if (!f.casHash(fh | LOCKED, fh)) {
2193 >                        f.hash = fh;
2194 >                        synchronized (f) { f.notifyAll(); };
2195                      }
2196                  }
2197 +                if (!validated)
2198 +                    continue;
2199 +            }
2200 +            else {
2201 +                if (buffer == null) // initialize buffer for revisits
2202 +                    buffer = new int[TRANSFER_BUFFER_SIZE];
2203 +                if (bin < 0 && bufferIndex > 0) {
2204 +                    int j = buffer[--bufferIndex];
2205 +                    buffer[bufferIndex] = i;
2206 +                    i = j;         // swap with another bin
2207 +                    continue;
2208 +                }
2209 +                if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2210 +                    f.tryAwaitLock(tab, i);
2211 +                    continue;      // no other options -- block
2212 +                }
2213 +                if (rev == null)   // initialize reverse-forwarder
2214 +                    rev = new Node(MOVED, tab, null, null);
2215 +                if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2216 +                    continue;      // recheck before adding to list
2217 +                buffer[nbuffered++] = i;
2218 +                setTabAt(nextTab, i, rev);     // install place-holders
2219 +                setTabAt(nextTab, i + n, rev);
2220 +            }
2221 +
2222 +            if (bin > 0)
2223 +                i = --bin;
2224 +            else if (buffer != null && nbuffered > 0) {
2225 +                bin = -1;
2226 +                i = buffer[bufferIndex = --nbuffered];
2227              }
2228 +            else
2229 +                return nextTab;
2230          }
725        if (deletions != 0L)
726            counter.add(-deletions);
2231      }
2232  
2233      /**
2234 <     * Base class for key, value, and entry iterators, plus internal
2235 <     * implementations of public traversal-based methods, to avoid
732 <     * duplicating traversal code.
2234 >     * Splits a normal bin with list headed by e into lo and hi parts;
2235 >     * installs in given table.
2236       */
2237 <    class HashIterator {
2238 <        private Node next;          // the next entry to return
2239 <        private Node[] tab;         // current table; updated if resized
2240 <        private Node lastReturned;  // the last entry returned, for remove
2241 <        private Object nextVal;     // cached value of next
2242 <        private int index;          // index of bin to use next
2243 <        private int baseIndex;      // current index of initial table
2244 <        private final int baseSize; // initial table size
2237 >    private static void splitBin(Node[] nextTab, int i, Node e) {
2238 >        int bit = nextTab.length >>> 1; // bit to split on
2239 >        int runBit = e.hash & bit;
2240 >        Node lastRun = e, lo = null, hi = null;
2241 >        for (Node p = e.next; p != null; p = p.next) {
2242 >            int b = p.hash & bit;
2243 >            if (b != runBit) {
2244 >                runBit = b;
2245 >                lastRun = p;
2246 >            }
2247 >        }
2248 >        if (runBit == 0)
2249 >            lo = lastRun;
2250 >        else
2251 >            hi = lastRun;
2252 >        for (Node p = e; p != lastRun; p = p.next) {
2253 >            int ph = p.hash & HASH_BITS;
2254 >            Object pk = p.key, pv = p.val;
2255 >            if ((ph & bit) == 0)
2256 >                lo = new Node(ph, pk, pv, lo);
2257 >            else
2258 >                hi = new Node(ph, pk, pv, hi);
2259 >        }
2260 >        setTabAt(nextTab, i, lo);
2261 >        setTabAt(nextTab, i + bit, hi);
2262 >    }
2263  
2264 <        HashIterator() {
2265 <            Node[] t = tab = table;
2266 <            if (t == null)
2267 <                baseSize = 0;
2264 >    /**
2265 >     * Splits a tree bin into lo and hi parts; installs in given table.
2266 >     */
2267 >    private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2268 >        int bit = nextTab.length >>> 1;
2269 >        TreeBin lt = new TreeBin();
2270 >        TreeBin ht = new TreeBin();
2271 >        int lc = 0, hc = 0;
2272 >        for (Node e = t.first; e != null; e = e.next) {
2273 >            int h = e.hash & HASH_BITS;
2274 >            Object k = e.key, v = e.val;
2275 >            if ((h & bit) == 0) {
2276 >                ++lc;
2277 >                lt.putTreeNode(h, k, v);
2278 >            }
2279              else {
2280 <                baseSize = t.length;
2281 <                advance(null);
2280 >                ++hc;
2281 >                ht.putTreeNode(h, k, v);
2282              }
2283          }
2284 +        Node ln, hn; // throw away trees if too small
2285 +        if (lc <= (TREE_THRESHOLD >>> 1)) {
2286 +            ln = null;
2287 +            for (Node p = lt.first; p != null; p = p.next)
2288 +                ln = new Node(p.hash, p.key, p.val, ln);
2289 +        }
2290 +        else
2291 +            ln = new Node(MOVED, lt, null, null);
2292 +        setTabAt(nextTab, i, ln);
2293 +        if (hc <= (TREE_THRESHOLD >>> 1)) {
2294 +            hn = null;
2295 +            for (Node p = ht.first; p != null; p = p.next)
2296 +                hn = new Node(p.hash, p.key, p.val, hn);
2297 +        }
2298 +        else
2299 +            hn = new Node(MOVED, ht, null, null);
2300 +        setTabAt(nextTab, i + bit, hn);
2301 +    }
2302  
2303 <        public final boolean hasNext()         { return next != null; }
2304 <        public final boolean hasMoreElements() { return next != null; }
2305 <
2306 <        /**
2307 <         * Advances next.  Normally, iteration proceeds bin-by-bin
2308 <         * traversing lists.  However, if the table has been resized,
2309 <         * then all future steps must traverse both the bin at the
2310 <         * current index as well as at (index + baseSize); and so on
2311 <         * for further resizings. To paranoically cope with potential
2312 <         * (improper) sharing of iterators across threads, table reads
2313 <         * are bounds-checked.
2314 <         */
2315 <        final void advance(Node e) {
2316 <            for (;;) {
2317 <                Node[] t; int i; // for bounds checks
2318 <                if (e != null) {
2319 <                    Object ek = e.key, ev = e.val;
2320 <                    if (ev != null && ek != null) {
2321 <                        nextVal = ev;
2322 <                        next = e;
2323 <                        break;
2303 >    /**
2304 >     * Implementation for clear. Steps through each bin, removing all
2305 >     * nodes.
2306 >     */
2307 >    private final void internalClear() {
2308 >        long delta = 0L; // negative number of deletions
2309 >        int i = 0;
2310 >        Node[] tab = table;
2311 >        while (tab != null && i < tab.length) {
2312 >            int fh; Object fk;
2313 >            Node f = tabAt(tab, i);
2314 >            if (f == null)
2315 >                ++i;
2316 >            else if ((fh = f.hash) == MOVED) {
2317 >                if ((fk = f.key) instanceof TreeBin) {
2318 >                    TreeBin t = (TreeBin)fk;
2319 >                    t.acquire(0);
2320 >                    try {
2321 >                        if (tabAt(tab, i) == f) {
2322 >                            for (Node p = t.first; p != null; p = p.next) {
2323 >                                if (p.val != null) { // (currently always true)
2324 >                                    p.val = null;
2325 >                                    --delta;
2326 >                                }
2327 >                            }
2328 >                            t.first = null;
2329 >                            t.root = null;
2330 >                            ++i;
2331 >                        }
2332 >                    } finally {
2333 >                        t.release(0);
2334                      }
775                    e = e.next;
2335                  }
2336 <                else if (baseIndex < baseSize && (t = tab) != null &&
2337 <                         t.length > (i = index) && i >= 0) {
2338 <                    if ((e = tabAt(t, i)) != null && e.hash < 0) {
2339 <                        tab = (Node[])e.key;
2340 <                        e = null;
2336 >                else
2337 >                    tab = (Node[])fk;
2338 >            }
2339 >            else if ((fh & LOCKED) != 0) {
2340 >                counter.add(delta); // opportunistically update count
2341 >                delta = 0L;
2342 >                f.tryAwaitLock(tab, i);
2343 >            }
2344 >            else if (f.casHash(fh, fh | LOCKED)) {
2345 >                try {
2346 >                    if (tabAt(tab, i) == f) {
2347 >                        for (Node e = f; e != null; e = e.next) {
2348 >                            if (e.val != null) {  // (currently always true)
2349 >                                e.val = null;
2350 >                                --delta;
2351 >                            }
2352 >                        }
2353 >                        setTabAt(tab, i, null);
2354 >                        ++i;
2355 >                    }
2356 >                } finally {
2357 >                    if (!f.casHash(fh | LOCKED, fh)) {
2358 >                        f.hash = fh;
2359 >                        synchronized (f) { f.notifyAll(); };
2360                      }
783                    else if (i + baseSize < t.length)
784                        index += baseSize;    // visit forwarded upper slots
785                    else
786                        index = ++baseIndex;
787                }
788                else {
789                    next = null;
790                    break;
2361                  }
2362              }
2363          }
2364 +        if (delta != 0)
2365 +            counter.add(delta);
2366 +    }
2367  
2368 <        final Object nextKey() {
796 <            Node e = next;
797 <            if (e == null)
798 <                throw new NoSuchElementException();
799 <            Object k = e.key;
800 <            advance((lastReturned = e).next);
801 <            return k;
802 <        }
2368 >    /* ----------------Table Traversal -------------- */
2369  
2370 <        final Object nextValue() {
2371 <            Node e = next;
2372 <            if (e == null)
2373 <                throw new NoSuchElementException();
2374 <            Object v = nextVal;
2375 <            advance((lastReturned = e).next);
2376 <            return v;
2370 >    /**
2371 >     * Encapsulates traversal for methods such as containsValue; also
2372 >     * serves as a base class for other iterators and bulk tasks.
2373 >     *
2374 >     * At each step, the iterator snapshots the key ("nextKey") and
2375 >     * value ("nextVal") of a valid node (i.e., one that, at point of
2376 >     * snapshot, has a non-null user value). Because val fields can
2377 >     * change (including to null, indicating deletion), field nextVal
2378 >     * might not be accurate at point of use, but still maintains the
2379 >     * weak consistency property of holding a value that was once
2380 >     * valid. To support iterator.remove, the nextKey field is not
2381 >     * updated (nulled out) when the iterator cannot advance.
2382 >     *
2383 >     * Internal traversals directly access these fields, as in:
2384 >     * {@code while (it.advance() != null) { process(it.nextKey); }}
2385 >     *
2386 >     * Exported iterators must track whether the iterator has advanced
2387 >     * (in hasNext vs next) (by setting/checking/nulling field
2388 >     * nextVal), and then extract key, value, or key-value pairs as
2389 >     * return values of next().
2390 >     *
2391 >     * The iterator visits once each still-valid node that was
2392 >     * reachable upon iterator construction. It might miss some that
2393 >     * were added to a bin after the bin was visited, which is OK wrt
2394 >     * consistency guarantees. Maintaining this property in the face
2395 >     * of possible ongoing resizes requires a fair amount of
2396 >     * bookkeeping state that is difficult to optimize away amidst
2397 >     * volatile accesses.  Even so, traversal maintains reasonable
2398 >     * throughput.
2399 >     *
2400 >     * Normally, iteration proceeds bin-by-bin traversing lists.
2401 >     * However, if the table has been resized, then all future steps
2402 >     * must traverse both the bin at the current index as well as at
2403 >     * (index + baseSize); and so on for further resizings. To
2404 >     * paranoically cope with potential sharing by users of iterators
2405 >     * across threads, iteration terminates if a bounds checks fails
2406 >     * for a table read.
2407 >     *
2408 >     * This class extends CountedCompleter to streamline parallel
2409 >     * iteration in bulk operations. This adds only a few fields of
2410 >     * space overhead, which is small enough in cases where it is not
2411 >     * needed to not worry about it.  Because CountedCompleter is
2412 >     * Serializable, but iterators need not be, we need to add warning
2413 >     * suppressions.
2414 >     */
2415 >    @SuppressWarnings("serial") static class Traverser<K,V,R> extends CountedCompleter<R> {
2416 >        final ConcurrentHashMapV8<K, V> map;
2417 >        Node next;           // the next entry to use
2418 >        Object nextKey;      // cached key field of next
2419 >        Object nextVal;      // cached val field of next
2420 >        Node[] tab;          // current table; updated if resized
2421 >        int index;           // index of bin to use next
2422 >        int baseIndex;       // current index of initial table
2423 >        int baseLimit;       // index bound for initial table
2424 >        int baseSize;        // initial table size
2425 >        int batch;           // split control
2426 >
2427 >        /** Creates iterator for all entries in the table. */
2428 >        Traverser(ConcurrentHashMapV8<K, V> map) {
2429 >            this.map = map;
2430 >        }
2431 >
2432 >        /** Creates iterator for split() methods and task constructors */
2433 >        Traverser(ConcurrentHashMapV8<K,V> map, Traverser<K,V,?> it, int batch) {
2434 >            super(it);
2435 >            this.batch = batch;
2436 >            if ((this.map = map) != null && it != null) { // split parent
2437 >                Node[] t;
2438 >                if ((t = it.tab) == null &&
2439 >                    (t = it.tab = map.table) != null)
2440 >                    it.baseLimit = it.baseSize = t.length;
2441 >                this.tab = t;
2442 >                this.baseSize = it.baseSize;
2443 >                int hi = this.baseLimit = it.baseLimit;
2444 >                it.baseLimit = this.index = this.baseIndex =
2445 >                    (hi + it.baseIndex + 1) >>> 1;
2446 >            }
2447          }
2448  
2449 <        final WriteThroughEntry nextEntry() {
2449 >        /**
2450 >         * Advances next; returns nextVal or null if terminated.
2451 >         * See above for explanation.
2452 >         */
2453 >        final Object advance() {
2454              Node e = next;
2455 <            if (e == null)
2456 <                throw new NoSuchElementException();
2457 <            WriteThroughEntry entry =
2458 <                new WriteThroughEntry(e.key, nextVal);
2459 <            advance((lastReturned = e).next);
2460 <            return entry;
2455 >            Object ev = null;
2456 >            outer: do {
2457 >                if (e != null)                  // advance past used/skipped node
2458 >                    e = e.next;
2459 >                while (e == null) {             // get to next non-null bin
2460 >                    ConcurrentHashMapV8<K, V> m;
2461 >                    Node[] t; int b, i, n; Object ek; // checks must use locals
2462 >                    if ((t = tab) != null)
2463 >                        n = t.length;
2464 >                    else if ((m = map) != null && (t = tab = m.table) != null)
2465 >                        n = baseLimit = baseSize = t.length;
2466 >                    else
2467 >                        break outer;
2468 >                    if ((b = baseIndex) >= baseLimit ||
2469 >                        (i = index) < 0 || i >= n)
2470 >                        break outer;
2471 >                    if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2472 >                        if ((ek = e.key) instanceof TreeBin)
2473 >                            e = ((TreeBin)ek).first;
2474 >                        else {
2475 >                            tab = (Node[])ek;
2476 >                            continue;           // restarts due to null val
2477 >                        }
2478 >                    }                           // visit upper slots if present
2479 >                    index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2480 >                }
2481 >                nextKey = e.key;
2482 >            } while ((ev = e.val) == null);    // skip deleted or special nodes
2483 >            next = e;
2484 >            return nextVal = ev;
2485          }
2486  
2487          public final void remove() {
2488 <            if (lastReturned == null)
2488 >            Object k = nextKey;
2489 >            if (k == null && (advance() == null || (k = nextKey) == null))
2490                  throw new IllegalStateException();
2491 <            ConcurrentHashMapV8.this.remove(lastReturned.key);
827 <            lastReturned = null;
2491 >            map.internalReplace(k, null, null);
2492          }
2493  
2494 <        /** Helper for serialization */
2495 <        final void writeEntries(java.io.ObjectOutputStream s)
832 <            throws java.io.IOException {
833 <            Node e;
834 <            while ((e = next) != null) {
835 <                s.writeObject(e.key);
836 <                s.writeObject(nextVal);
837 <                advance(e.next);
838 <            }
2494 >        public final boolean hasNext() {
2495 >            return nextVal != null || advance() != null;
2496          }
2497  
2498 <        /** Helper for containsValue */
842 <        final boolean containsVal(Object value) {
843 <            if (value != null) {
844 <                Node e;
845 <                while ((e = next) != null) {
846 <                    Object v = nextVal;
847 <                    if (value == v || value.equals(v))
848 <                        return true;
849 <                    advance(e.next);
850 <                }
851 <            }
852 <            return false;
853 <        }
2498 >        public final boolean hasMoreElements() { return hasNext(); }
2499  
2500 <        /** Helper for Map.hashCode */
856 <        final int mapHashCode() {
857 <            int h = 0;
858 <            Node e;
859 <            while ((e = next) != null) {
860 <                h += e.key.hashCode() ^ nextVal.hashCode();
861 <                advance(e.next);
862 <            }
863 <            return h;
864 <        }
2500 >        public void compute() { } // default no-op CountedCompleter body
2501  
2502 <        /** Helper for Map.toString */
2503 <        final String mapToString() {
2504 <            Node e = next;
2505 <            if (e == null)
2506 <                return "{}";
2507 <            StringBuilder sb = new StringBuilder();
2508 <            sb.append('{');
2509 <            for (;;) {
2510 <                sb.append(e.key   == this ? "(this Map)" : e.key);
2511 <                sb.append('=');
2512 <                sb.append(nextVal == this ? "(this Map)" : nextVal);
2513 <                advance(e.next);
2514 <                if ((e = next) != null)
2515 <                    sb.append(',').append(' ');
2516 <                else
2517 <                    return sb.append('}').toString();
2502 >        /**
2503 >         * Returns a batch value > 0 if this task should (and must) be
2504 >         * split, if so, adding to pending count, and in any case
2505 >         * updating batch value. The initial batch value is approx
2506 >         * exp2 of the number of times (minus one) to split task by
2507 >         * two before executing leaf action. This value is faster to
2508 >         * compute and more convenient to use as a guide to splitting
2509 >         * than is the depth, since it is used while dividing by two
2510 >         * anyway.
2511 >         */
2512 >        final int preSplit() {
2513 >            ConcurrentHashMapV8<K, V> m; int b; Node[] t;  ForkJoinPool pool;
2514 >            if ((b = batch) < 0 && (m = map) != null) { // force initialization
2515 >                if ((t = tab) == null && (t = tab = m.table) != null)
2516 >                    baseLimit = baseSize = t.length;
2517 >                if (t != null) {
2518 >                    long n = m.counter.sum();
2519 >                    int par = ((pool = getPool()) == null) ?
2520 >                        ForkJoinPool.getCommonPoolParallelism() :
2521 >                        pool.getParallelism();
2522 >                    int sp = par << 3; // slack of 8
2523 >                    b = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
2524 >                }
2525              }
2526 +            b = (b <= 1 || baseIndex == baseLimit) ? 0 : (b >>> 1);
2527 +            if ((batch = b) > 0)
2528 +                addToPendingCount(1);
2529 +            return b;
2530          }
2531 +
2532      }
2533  
2534      /* ---------------- Public operations -------------- */
2535  
2536      /**
2537 <     * Creates a new, empty map with the specified initial
890 <     * capacity, load factor and concurrency level.
891 <     *
892 <     * @param initialCapacity the initial capacity. The implementation
893 <     * performs internal sizing to accommodate this many elements.
894 <     * @param loadFactor  the load factor threshold, used to control resizing.
895 <     * Resizing may be performed when the average number of elements per
896 <     * bin exceeds this threshold.
897 <     * @param concurrencyLevel the estimated number of concurrently
898 <     * updating threads. The implementation may use this value as
899 <     * a sizing hint.
900 <     * @throws IllegalArgumentException if the initial capacity is
901 <     * negative or the load factor or concurrencyLevel are
902 <     * nonpositive.
2537 >     * Creates a new, empty map with the default initial table size (16).
2538       */
2539 <    public ConcurrentHashMapV8(int initialCapacity,
905 <                               float loadFactor, int concurrencyLevel) {
906 <        if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
907 <            throw new IllegalArgumentException();
908 <        this.initCap = initialCapacity;
909 <        this.loadFactor = loadFactor;
2539 >    public ConcurrentHashMapV8() {
2540          this.counter = new LongAdder();
2541      }
2542  
2543      /**
2544 <     * Creates a new, empty map with the specified initial capacity
2545 <     * and load factor and with the default concurrencyLevel (16).
2544 >     * Creates a new, empty map with an initial table size
2545 >     * accommodating the specified number of elements without the need
2546 >     * to dynamically resize.
2547       *
2548       * @param initialCapacity The implementation performs internal
2549       * sizing to accommodate this many elements.
2550 <     * @param loadFactor  the load factor threshold, used to control resizing.
2551 <     * Resizing may be performed when the average number of elements per
2552 <     * bin exceeds this threshold.
2550 >     * @throws IllegalArgumentException if the initial capacity of
2551 >     * elements is negative
2552 >     */
2553 >    public ConcurrentHashMapV8(int initialCapacity) {
2554 >        if (initialCapacity < 0)
2555 >            throw new IllegalArgumentException();
2556 >        int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2557 >                   MAXIMUM_CAPACITY :
2558 >                   tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2559 >        this.counter = new LongAdder();
2560 >        this.sizeCtl = cap;
2561 >    }
2562 >
2563 >    /**
2564 >     * Creates a new map with the same mappings as the given map.
2565 >     *
2566 >     * @param m the map
2567 >     */
2568 >    public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2569 >        this.counter = new LongAdder();
2570 >        this.sizeCtl = DEFAULT_CAPACITY;
2571 >        internalPutAll(m);
2572 >    }
2573 >
2574 >    /**
2575 >     * Creates a new, empty map with an initial table size based on
2576 >     * the given number of elements ({@code initialCapacity}) and
2577 >     * initial table density ({@code loadFactor}).
2578 >     *
2579 >     * @param initialCapacity the initial capacity. The implementation
2580 >     * performs internal sizing to accommodate this many elements,
2581 >     * given the specified load factor.
2582 >     * @param loadFactor the load factor (table density) for
2583 >     * establishing the initial table size
2584       * @throws IllegalArgumentException if the initial capacity of
2585       * elements is negative or the load factor is nonpositive
2586       *
2587       * @since 1.6
2588       */
2589      public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
2590 <        this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
2590 >        this(initialCapacity, loadFactor, 1);
2591      }
2592  
2593      /**
2594 <     * Creates a new, empty map with the specified initial capacity,
2595 <     * and with default load factor (0.75) and concurrencyLevel (16).
2594 >     * Creates a new, empty map with an initial table size based on
2595 >     * the given number of elements ({@code initialCapacity}), table
2596 >     * density ({@code loadFactor}), and number of concurrently
2597 >     * updating threads ({@code concurrencyLevel}).
2598       *
2599       * @param initialCapacity the initial capacity. The implementation
2600 <     * performs internal sizing to accommodate this many elements.
2601 <     * @throws IllegalArgumentException if the initial capacity of
2602 <     * elements is negative.
2600 >     * performs internal sizing to accommodate this many elements,
2601 >     * given the specified load factor.
2602 >     * @param loadFactor the load factor (table density) for
2603 >     * establishing the initial table size
2604 >     * @param concurrencyLevel the estimated number of concurrently
2605 >     * updating threads. The implementation may use this value as
2606 >     * a sizing hint.
2607 >     * @throws IllegalArgumentException if the initial capacity is
2608 >     * negative or the load factor or concurrencyLevel are
2609 >     * nonpositive
2610       */
2611 <    public ConcurrentHashMapV8(int initialCapacity) {
2612 <        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
2611 >    public ConcurrentHashMapV8(int initialCapacity,
2612 >                               float loadFactor, int concurrencyLevel) {
2613 >        if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2614 >            throw new IllegalArgumentException();
2615 >        if (initialCapacity < concurrencyLevel)   // Use at least as many bins
2616 >            initialCapacity = concurrencyLevel;   // as estimated threads
2617 >        long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2618 >        int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2619 >            MAXIMUM_CAPACITY : tableSizeFor((int)size);
2620 >        this.counter = new LongAdder();
2621 >        this.sizeCtl = cap;
2622      }
2623  
2624      /**
2625 <     * Creates a new, empty map with a default initial capacity (16),
2626 <     * load factor (0.75) and concurrencyLevel (16).
2625 >     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2626 >     * from the given type to {@code Boolean.TRUE}.
2627 >     *
2628 >     * @return the new set
2629       */
2630 <    public ConcurrentHashMapV8() {
2631 <        this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
2630 >    public static <K> KeySetView<K,Boolean> newKeySet() {
2631 >        return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(),
2632 >                                      Boolean.TRUE);
2633      }
2634  
2635      /**
2636 <     * Creates a new map with the same mappings as the given map.
2637 <     * The map is created with a capacity of 1.5 times the number
955 <     * of mappings in the given map or 16 (whichever is greater),
956 <     * and a default load factor (0.75) and concurrencyLevel (16).
2636 >     * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2637 >     * from the given type to {@code Boolean.TRUE}.
2638       *
2639 <     * @param m the map
2639 >     * @param initialCapacity The implementation performs internal
2640 >     * sizing to accommodate this many elements.
2641 >     * @throws IllegalArgumentException if the initial capacity of
2642 >     * elements is negative
2643 >     * @return the new set
2644       */
2645 <    public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2646 <        this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
2647 <        if (m == null)
963 <            throw new NullPointerException();
964 <        internalPutAll(m);
2645 >    public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2646 >        return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(initialCapacity),
2647 >                                      Boolean.TRUE);
2648      }
2649  
2650      /**
2651 <     * Returns {@code true} if this map contains no key-value mappings.
969 <     *
970 <     * @return {@code true} if this map contains no key-value mappings
2651 >     * {@inheritDoc}
2652       */
2653      public boolean isEmpty() {
2654          return counter.sum() <= 0L; // ignore transient negative values
2655      }
2656  
2657      /**
2658 <     * Returns the number of key-value mappings in this map.  If the
978 <     * map contains more than {@code Integer.MAX_VALUE} elements, returns
979 <     * {@code Integer.MAX_VALUE}.
980 <     *
981 <     * @return the number of key-value mappings in this map
2658 >     * {@inheritDoc}
2659       */
2660      public int size() {
2661          long n = counter.sum();
2662 <        return ((n >>> 31) == 0) ? (int)n : (n < 0L) ? 0 : Integer.MAX_VALUE;
2662 >        return ((n < 0L) ? 0 :
2663 >                (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2664 >                (int)n);
2665 >    }
2666 >
2667 >    /**
2668 >     * Returns the number of mappings. This method should be used
2669 >     * instead of {@link #size} because a ConcurrentHashMapV8 may
2670 >     * contain more mappings than can be represented as an int. The
2671 >     * value returned is an estimate; the actual count may differ if
2672 >     * there are concurrent insertions or removals.
2673 >     *
2674 >     * @return the number of mappings
2675 >     */
2676 >    public long mappingCount() {
2677 >        long n = counter.sum();
2678 >        return (n < 0L) ? 0L : n; // ignore transient negative values
2679      }
2680  
2681      /**
# Line 996 | Line 2689 | public class ConcurrentHashMapV8<K, V>
2689       *
2690       * @throws NullPointerException if the specified key is null
2691       */
2692 <    @SuppressWarnings("unchecked")
1000 <    public V get(Object key) {
2692 >    @SuppressWarnings("unchecked") public V get(Object key) {
2693          if (key == null)
2694              throw new NullPointerException();
2695          return (V)internalGet(key);
2696      }
2697  
2698      /**
2699 +     * Returns the value to which the specified key is mapped,
2700 +     * or the given defaultValue if this map contains no mapping for the key.
2701 +     *
2702 +     * @param key the key
2703 +     * @param defaultValue the value to return if this map contains
2704 +     * no mapping for the given key
2705 +     * @return the mapping for the key, if present; else the defaultValue
2706 +     * @throws NullPointerException if the specified key is null
2707 +     */
2708 +    @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2709 +        if (key == null)
2710 +            throw new NullPointerException();
2711 +        V v = (V) internalGet(key);
2712 +        return v == null ? defaultValue : v;
2713 +    }
2714 +
2715 +    /**
2716       * Tests if the specified object is a key in this table.
2717       *
2718       * @param  key   possible key
2719       * @return {@code true} if and only if the specified object
2720       *         is a key in this table, as determined by the
2721 <     *         {@code equals} method; {@code false} otherwise.
2721 >     *         {@code equals} method; {@code false} otherwise
2722       * @throws NullPointerException if the specified key is null
2723       */
2724      public boolean containsKey(Object key) {
# Line 1020 | Line 2729 | public class ConcurrentHashMapV8<K, V>
2729  
2730      /**
2731       * Returns {@code true} if this map maps one or more keys to the
2732 <     * specified value. Note: This method requires a full internal
2733 <     * traversal of the hash table, and so is much slower than
1025 <     * method {@code containsKey}.
2732 >     * specified value. Note: This method may require a full traversal
2733 >     * of the map, and is much slower than method {@code containsKey}.
2734       *
2735       * @param value value whose presence in this map is to be tested
2736       * @return {@code true} if this map maps one or more keys to the
# Line 1032 | Line 2740 | public class ConcurrentHashMapV8<K, V>
2740      public boolean containsValue(Object value) {
2741          if (value == null)
2742              throw new NullPointerException();
2743 <        return new HashIterator().containsVal(value);
2743 >        Object v;
2744 >        Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2745 >        while ((v = it.advance()) != null) {
2746 >            if (v == value || value.equals(v))
2747 >                return true;
2748 >        }
2749 >        return false;
2750      }
2751  
2752      /**
# Line 1058 | Line 2772 | public class ConcurrentHashMapV8<K, V>
2772       * Maps the specified key to the specified value in this table.
2773       * Neither the key nor the value can be null.
2774       *
2775 <     * <p> The value can be retrieved by calling the {@code get} method
2775 >     * <p>The value can be retrieved by calling the {@code get} method
2776       * with a key that is equal to the original key.
2777       *
2778       * @param key key with which the specified value is to be associated
# Line 1067 | Line 2781 | public class ConcurrentHashMapV8<K, V>
2781       *         {@code null} if there was no mapping for {@code key}
2782       * @throws NullPointerException if the specified key or value is null
2783       */
2784 <    @SuppressWarnings("unchecked")
1071 <    public V put(K key, V value) {
2784 >    @SuppressWarnings("unchecked") public V put(K key, V value) {
2785          if (key == null || value == null)
2786              throw new NullPointerException();
2787 <        return (V)internalPut(key, value, true);
2787 >        return (V)internalPut(key, value);
2788      }
2789  
2790      /**
# Line 1081 | Line 2794 | public class ConcurrentHashMapV8<K, V>
2794       *         or {@code null} if there was no mapping for the key
2795       * @throws NullPointerException if the specified key or value is null
2796       */
2797 <    @SuppressWarnings("unchecked")
1085 <    public V putIfAbsent(K key, V value) {
2797 >    @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2798          if (key == null || value == null)
2799              throw new NullPointerException();
2800 <        return (V)internalPut(key, value, false);
2800 >        return (V)internalPutIfAbsent(key, value);
2801      }
2802  
2803      /**
# Line 1096 | Line 2808 | public class ConcurrentHashMapV8<K, V>
2808       * @param m mappings to be stored in this map
2809       */
2810      public void putAll(Map<? extends K, ? extends V> m) {
1099        if (m == null)
1100            throw new NullPointerException();
2811          internalPutAll(m);
2812      }
2813  
2814      /**
2815       * If the specified key is not already associated with a value,
2816 <     * computes its value using the given mappingFunction, and if
2817 <     * non-null, enters it into the map.  This is equivalent to
2818 <     *  <pre> {@code
2816 >     * computes its value using the given mappingFunction and enters
2817 >     * it into the map unless null.  This is equivalent to
2818 >     * <pre> {@code
2819       * if (map.containsKey(key))
2820       *   return map.get(key);
2821 <     * value = mappingFunction.map(key);
2821 >     * value = mappingFunction.apply(key);
2822       * if (value != null)
2823       *   map.put(key, value);
2824       * return value;}</pre>
2825       *
2826 <     * except that the action is performed atomically.  Some attempted
2827 <     * update operations on this map by other threads may be blocked
2828 <     * while computation is in progress, so the computation should be
2829 <     * short and simple, and must not attempt to update any other
2830 <     * mappings of this Map. The most appropriate usage is to
2826 >     * except that the action is performed atomically.  If the
2827 >     * function returns {@code null} no mapping is recorded. If the
2828 >     * function itself throws an (unchecked) exception, the exception
2829 >     * is rethrown to its caller, and no mapping is recorded.  Some
2830 >     * attempted update operations on this map by other threads may be
2831 >     * blocked while computation is in progress, so the computation
2832 >     * should be short and simple, and must not attempt to update any
2833 >     * other mappings of this Map. The most appropriate usage is to
2834       * construct a new object serving as an initial mapped value, or
2835       * memoized result, as in:
2836 +     *
2837       *  <pre> {@code
2838 <     * map.computeIfAbsent(key, new MappingFunction<K, V>() {
2838 >     * map.computeIfAbsent(key, new Fun<K, V>() {
2839       *   public V map(K k) { return new Value(f(k)); }});}</pre>
2840       *
2841       * @param key key with which the specified value is to be associated
2842       * @param mappingFunction the function to compute a value
2843       * @return the current (existing or computed) value associated with
2844 <     *         the specified key, or {@code null} if the computation
1131 <     *         returned {@code null}.
2844 >     *         the specified key, or null if the computed value is null
2845       * @throws NullPointerException if the specified key or mappingFunction
2846 <     *         is null,
2846 >     *         is null
2847       * @throws IllegalStateException if the computation detectably
2848       *         attempts a recursive update to this map that would
2849 <     *         otherwise never complete.
2849 >     *         otherwise never complete
2850       * @throws RuntimeException or Error if the mappingFunction does so,
2851 <     *         in which case the mapping is left unestablished.
2851 >     *         in which case the mapping is left unestablished
2852       */
2853 <    public V computeIfAbsent(K key, MappingFunction<? super K, ? extends V> mappingFunction) {
2853 >    @SuppressWarnings("unchecked") public V computeIfAbsent
2854 >        (K key, Fun<? super K, ? extends V> mappingFunction) {
2855          if (key == null || mappingFunction == null)
2856              throw new NullPointerException();
2857 <        return internalCompute(key, mappingFunction, false);
2857 >        return (V)internalComputeIfAbsent(key, mappingFunction);
2858      }
2859  
2860      /**
2861 <     * Computes the value associated with the given key using the given
2862 <     * mappingFunction, and if non-null, enters it into the map.  This
1149 <     * is equivalent to
2861 >     * If the given key is present, computes a new mapping value given a key and
2862 >     * its current mapped value. This is equivalent to
2863       *  <pre> {@code
2864 <     * value = mappingFunction.map(key);
2865 <     * if (value != null)
2866 <     *   map.put(key, value);
2867 <     * else
2868 <     *   value = map.get(key);
2869 <     * return value;}</pre>
2864 >     *   if (map.containsKey(key)) {
2865 >     *     value = remappingFunction.apply(key, map.get(key));
2866 >     *     if (value != null)
2867 >     *       map.put(key, value);
2868 >     *     else
2869 >     *       map.remove(key);
2870 >     *   }
2871 >     * }</pre>
2872 >     *
2873 >     * except that the action is performed atomically.  If the
2874 >     * function returns {@code null}, the mapping is removed.  If the
2875 >     * function itself throws an (unchecked) exception, the exception
2876 >     * is rethrown to its caller, and the current mapping is left
2877 >     * unchanged.  Some attempted update operations on this map by
2878 >     * other threads may be blocked while computation is in progress,
2879 >     * so the computation should be short and simple, and must not
2880 >     * attempt to update any other mappings of this Map. For example,
2881 >     * to either create or append new messages to a value mapping:
2882       *
2883 <     * except that the action is performed atomically.  Some attempted
2884 <     * update operations on this map by other threads may be blocked
2885 <     * while computation is in progress, so the computation should be
2886 <     * short and simple, and must not attempt to update any other
2887 <     * mappings of this Map.
2883 >     * @param key key with which the specified value is to be associated
2884 >     * @param remappingFunction the function to compute a value
2885 >     * @return the new value associated with the specified key, or null if none
2886 >     * @throws NullPointerException if the specified key or remappingFunction
2887 >     *         is null
2888 >     * @throws IllegalStateException if the computation detectably
2889 >     *         attempts a recursive update to this map that would
2890 >     *         otherwise never complete
2891 >     * @throws RuntimeException or Error if the remappingFunction does so,
2892 >     *         in which case the mapping is unchanged
2893 >     */
2894 >    @SuppressWarnings("unchecked") public V computeIfPresent
2895 >        (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2896 >        if (key == null || remappingFunction == null)
2897 >            throw new NullPointerException();
2898 >        return (V)internalCompute(key, true, remappingFunction);
2899 >    }
2900 >
2901 >    /**
2902 >     * Computes a new mapping value given a key and
2903 >     * its current mapped value (or {@code null} if there is no current
2904 >     * mapping). This is equivalent to
2905 >     *  <pre> {@code
2906 >     *   value = remappingFunction.apply(key, map.get(key));
2907 >     *   if (value != null)
2908 >     *     map.put(key, value);
2909 >     *   else
2910 >     *     map.remove(key);
2911 >     * }</pre>
2912 >     *
2913 >     * except that the action is performed atomically.  If the
2914 >     * function returns {@code null}, the mapping is removed.  If the
2915 >     * function itself throws an (unchecked) exception, the exception
2916 >     * is rethrown to its caller, and the current mapping is left
2917 >     * unchanged.  Some attempted update operations on this map by
2918 >     * other threads may be blocked while computation is in progress,
2919 >     * so the computation should be short and simple, and must not
2920 >     * attempt to update any other mappings of this Map. For example,
2921 >     * to either create or append new messages to a value mapping:
2922 >     *
2923 >     * <pre> {@code
2924 >     * Map<Key, String> map = ...;
2925 >     * final String msg = ...;
2926 >     * map.compute(key, new BiFun<Key, String, String>() {
2927 >     *   public String apply(Key k, String v) {
2928 >     *    return (v == null) ? msg : v + msg;});}}</pre>
2929       *
2930       * @param key key with which the specified value is to be associated
2931 <     * @param mappingFunction the function to compute a value
2932 <     * @return the current value associated with
2933 <     *         the specified key, or {@code null} if the computation
2934 <     *         returned {@code null} and the value was not otherwise present.
1169 <     * @throws NullPointerException if the specified key or mappingFunction
1170 <     *         is null,
2931 >     * @param remappingFunction the function to compute a value
2932 >     * @return the new value associated with the specified key, or null if none
2933 >     * @throws NullPointerException if the specified key or remappingFunction
2934 >     *         is null
2935       * @throws IllegalStateException if the computation detectably
2936       *         attempts a recursive update to this map that would
2937 <     *         otherwise never complete.
2938 <     * @throws RuntimeException or Error if the mappingFunction does so,
2939 <     *         in which case the mapping is unchanged.
2940 <     */
2941 <    public V compute(K key, MappingFunction<? super K, ? extends V> mappingFunction) {
2942 <        if (key == null || mappingFunction == null)
2937 >     *         otherwise never complete
2938 >     * @throws RuntimeException or Error if the remappingFunction does so,
2939 >     *         in which case the mapping is unchanged
2940 >     */
2941 >    @SuppressWarnings("unchecked") public V compute
2942 >        (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2943 >        if (key == null || remappingFunction == null)
2944 >            throw new NullPointerException();
2945 >        return (V)internalCompute(key, false, remappingFunction);
2946 >    }
2947 >
2948 >    /**
2949 >     * If the specified key is not already associated
2950 >     * with a value, associate it with the given value.
2951 >     * Otherwise, replace the value with the results of
2952 >     * the given remapping function. This is equivalent to:
2953 >     *  <pre> {@code
2954 >     *   if (!map.containsKey(key))
2955 >     *     map.put(value);
2956 >     *   else {
2957 >     *     newValue = remappingFunction.apply(map.get(key), value);
2958 >     *     if (value != null)
2959 >     *       map.put(key, value);
2960 >     *     else
2961 >     *       map.remove(key);
2962 >     *   }
2963 >     * }</pre>
2964 >     * except that the action is performed atomically.  If the
2965 >     * function returns {@code null}, the mapping is removed.  If the
2966 >     * function itself throws an (unchecked) exception, the exception
2967 >     * is rethrown to its caller, and the current mapping is left
2968 >     * unchanged.  Some attempted update operations on this map by
2969 >     * other threads may be blocked while computation is in progress,
2970 >     * so the computation should be short and simple, and must not
2971 >     * attempt to update any other mappings of this Map.
2972 >     */
2973 >    @SuppressWarnings("unchecked") public V merge
2974 >        (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2975 >        if (key == null || value == null || remappingFunction == null)
2976              throw new NullPointerException();
2977 <        return internalCompute(key, mappingFunction, true);
2977 >        return (V)internalMerge(key, value, remappingFunction);
2978      }
2979  
2980      /**
# Line 1189 | Line 2986 | public class ConcurrentHashMapV8<K, V>
2986       *         {@code null} if there was no mapping for {@code key}
2987       * @throws NullPointerException if the specified key is null
2988       */
2989 <    @SuppressWarnings("unchecked")
1193 <    public V remove(Object key) {
2989 >    @SuppressWarnings("unchecked") public V remove(Object key) {
2990          if (key == null)
2991              throw new NullPointerException();
2992          return (V)internalReplace(key, null, null);
# Line 1227 | Line 3023 | public class ConcurrentHashMapV8<K, V>
3023       *         or {@code null} if there was no mapping for the key
3024       * @throws NullPointerException if the specified key or value is null
3025       */
3026 <    @SuppressWarnings("unchecked")
1231 <    public V replace(K key, V value) {
3026 >    @SuppressWarnings("unchecked") public V replace(K key, V value) {
3027          if (key == null || value == null)
3028              throw new NullPointerException();
3029          return (V)internalReplace(key, value, null);
# Line 1244 | Line 3039 | public class ConcurrentHashMapV8<K, V>
3039      /**
3040       * Returns a {@link Set} view of the keys contained in this map.
3041       * The set is backed by the map, so changes to the map are
3042 <     * reflected in the set, and vice-versa.  The set supports element
1248 <     * removal, which removes the corresponding mapping from this map,
1249 <     * via the {@code Iterator.remove}, {@code Set.remove},
1250 <     * {@code removeAll}, {@code retainAll}, and {@code clear}
1251 <     * operations.  It does not support the {@code add} or
1252 <     * {@code addAll} operations.
3042 >     * reflected in the set, and vice-versa.
3043       *
3044 <     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1255 <     * that will never throw {@link ConcurrentModificationException},
1256 <     * and guarantees to traverse elements as they existed upon
1257 <     * construction of the iterator, and may (but is not guaranteed to)
1258 <     * reflect any modifications subsequent to construction.
3044 >     * @return the set view
3045       */
3046 <    public Set<K> keySet() {
3047 <        Set<K> ks = keySet;
3048 <        return (ks != null) ? ks : (keySet = new KeySet());
3046 >    public KeySetView<K,V> keySet() {
3047 >        KeySetView<K,V> ks = keySet;
3048 >        return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
3049 >    }
3050 >
3051 >    /**
3052 >     * Returns a {@link Set} view of the keys in this map, using the
3053 >     * given common mapped value for any additions (i.e., {@link
3054 >     * Collection#add} and {@link Collection#addAll}). This is of
3055 >     * course only appropriate if it is acceptable to use the same
3056 >     * value for all additions from this view.
3057 >     *
3058 >     * @param mappedValue the mapped value to use for any
3059 >     * additions.
3060 >     * @return the set view
3061 >     * @throws NullPointerException if the mappedValue is null
3062 >     */
3063 >    public KeySetView<K,V> keySet(V mappedValue) {
3064 >        if (mappedValue == null)
3065 >            throw new NullPointerException();
3066 >        return new KeySetView<K,V>(this, mappedValue);
3067      }
3068  
3069      /**
3070       * Returns a {@link Collection} view of the values contained in this map.
3071       * The collection is backed by the map, so changes to the map are
3072 <     * reflected in the collection, and vice-versa.  The collection
1269 <     * supports element removal, which removes the corresponding
1270 <     * mapping from this map, via the {@code Iterator.remove},
1271 <     * {@code Collection.remove}, {@code removeAll},
1272 <     * {@code retainAll}, and {@code clear} operations.  It does not
1273 <     * support the {@code add} or {@code addAll} operations.
1274 <     *
1275 <     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1276 <     * that will never throw {@link ConcurrentModificationException},
1277 <     * and guarantees to traverse elements as they existed upon
1278 <     * construction of the iterator, and may (but is not guaranteed to)
1279 <     * reflect any modifications subsequent to construction.
3072 >     * reflected in the collection, and vice-versa.
3073       */
3074 <    public Collection<V> values() {
3075 <        Collection<V> vs = values;
3076 <        return (vs != null) ? vs : (values = new Values());
3074 >    public ValuesView<K,V> values() {
3075 >        ValuesView<K,V> vs = values;
3076 >        return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
3077      }
3078  
3079      /**
# Line 1300 | Line 3093 | public class ConcurrentHashMapV8<K, V>
3093       * reflect any modifications subsequent to construction.
3094       */
3095      public Set<Map.Entry<K,V>> entrySet() {
3096 <        Set<Map.Entry<K,V>> es = entrySet;
3097 <        return (es != null) ? es : (entrySet = new EntrySet());
3096 >        EntrySetView<K,V> es = entrySet;
3097 >        return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
3098      }
3099  
3100      /**
# Line 1311 | Line 3104 | public class ConcurrentHashMapV8<K, V>
3104       * @see #keySet()
3105       */
3106      public Enumeration<K> keys() {
3107 <        return new KeyIterator();
3107 >        return new KeyIterator<K,V>(this);
3108      }
3109  
3110      /**
# Line 1321 | Line 3114 | public class ConcurrentHashMapV8<K, V>
3114       * @see #values()
3115       */
3116      public Enumeration<V> elements() {
3117 <        return new ValueIterator();
3117 >        return new ValueIterator<K,V>(this);
3118 >    }
3119 >
3120 >    /**
3121 >     * Returns a partitionable iterator of the keys in this map.
3122 >     *
3123 >     * @return a partitionable iterator of the keys in this map
3124 >     */
3125 >    public Spliterator<K> keySpliterator() {
3126 >        return new KeyIterator<K,V>(this);
3127 >    }
3128 >
3129 >    /**
3130 >     * Returns a partitionable iterator of the values in this map.
3131 >     *
3132 >     * @return a partitionable iterator of the values in this map
3133 >     */
3134 >    public Spliterator<V> valueSpliterator() {
3135 >        return new ValueIterator<K,V>(this);
3136 >    }
3137 >
3138 >    /**
3139 >     * Returns a partitionable iterator of the entries in this map.
3140 >     *
3141 >     * @return a partitionable iterator of the entries in this map
3142 >     */
3143 >    public Spliterator<Map.Entry<K,V>> entrySpliterator() {
3144 >        return new EntryIterator<K,V>(this);
3145      }
3146  
3147      /**
# Line 1332 | Line 3152 | public class ConcurrentHashMapV8<K, V>
3152       * @return the hash code value for this map
3153       */
3154      public int hashCode() {
3155 <        return new HashIterator().mapHashCode();
3155 >        int h = 0;
3156 >        Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3157 >        Object v;
3158 >        while ((v = it.advance()) != null) {
3159 >            h += it.nextKey.hashCode() ^ v.hashCode();
3160 >        }
3161 >        return h;
3162      }
3163  
3164      /**
# Line 1347 | Line 3173 | public class ConcurrentHashMapV8<K, V>
3173       * @return a string representation of this map
3174       */
3175      public String toString() {
3176 <        return new HashIterator().mapToString();
3176 >        Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3177 >        StringBuilder sb = new StringBuilder();
3178 >        sb.append('{');
3179 >        Object v;
3180 >        if ((v = it.advance()) != null) {
3181 >            for (;;) {
3182 >                Object k = it.nextKey;
3183 >                sb.append(k == this ? "(this Map)" : k);
3184 >                sb.append('=');
3185 >                sb.append(v == this ? "(this Map)" : v);
3186 >                if ((v = it.advance()) == null)
3187 >                    break;
3188 >                sb.append(',').append(' ');
3189 >            }
3190 >        }
3191 >        return sb.append('}').toString();
3192      }
3193  
3194      /**
# Line 1361 | Line 3202 | public class ConcurrentHashMapV8<K, V>
3202       * @return {@code true} if the specified object is equal to this map
3203       */
3204      public boolean equals(Object o) {
3205 <        if (o == this)
3206 <            return true;
3207 <        if (!(o instanceof Map))
3208 <            return false;
3209 <        Map<?,?> m = (Map<?,?>) o;
3210 <        try {
3211 <            for (Map.Entry<K,V> e : this.entrySet())
3212 <                if (! e.getValue().equals(m.get(e.getKey())))
3205 >        if (o != this) {
3206 >            if (!(o instanceof Map))
3207 >                return false;
3208 >            Map<?,?> m = (Map<?,?>) o;
3209 >            Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3210 >            Object val;
3211 >            while ((val = it.advance()) != null) {
3212 >                Object v = m.get(it.nextKey);
3213 >                if (v == null || (v != val && !v.equals(val)))
3214                      return false;
3215 +            }
3216              for (Map.Entry<?,?> e : m.entrySet()) {
3217 <                Object k = e.getKey();
3218 <                Object v = e.getValue();
3219 <                if (k == null || v == null || !v.equals(get(k)))
3217 >                Object mk, mv, v;
3218 >                if ((mk = e.getKey()) == null ||
3219 >                    (mv = e.getValue()) == null ||
3220 >                    (v = internalGet(mk)) == null ||
3221 >                    (mv != v && !mv.equals(v)))
3222                      return false;
3223              }
1379            return true;
1380        } catch (ClassCastException unused) {
1381            return false;
1382        } catch (NullPointerException unused) {
1383            return false;
3224          }
3225 +        return true;
3226      }
3227  
3228 <    /**
1388 <     * Custom Entry class used by EntryIterator.next(), that relays
1389 <     * setValue changes to the underlying map.
1390 <     */
1391 <    final class WriteThroughEntry extends AbstractMap.SimpleEntry<K,V> {
1392 <        @SuppressWarnings("unchecked")
1393 <        WriteThroughEntry(Object k, Object v) {
1394 <            super((K)k, (V)v);
1395 <        }
3228 >    /* ----------------Iterators -------------- */
3229  
3230 <        /**
3231 <         * Sets our entry's value and writes through to the map. The
3232 <         * value to return is somewhat arbitrary here. Since a
3233 <         * WriteThroughEntry does not necessarily track asynchronous
3234 <         * changes, the most recent "previous" value could be
3235 <         * different from what we return (or could even have been
3236 <         * removed in which case the put will re-establish). We do not
3237 <         * and cannot guarantee more.
3238 <         */
3239 <        public V setValue(V value) {
3240 <            if (value == null) throw new NullPointerException();
3241 <            V v = super.setValue(value);
3242 <            ConcurrentHashMapV8.this.put(getKey(), value);
3243 <            return v;
3230 >    @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3231 >        implements Spliterator<K>, Enumeration<K> {
3232 >        KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3233 >        KeyIterator(ConcurrentHashMapV8<K, V> map, Traverser<K,V,Object> it) {
3234 >            super(map, it, -1);
3235 >        }
3236 >        public KeyIterator<K,V> split() {
3237 >            if (nextKey != null)
3238 >                throw new IllegalStateException();
3239 >            return new KeyIterator<K,V>(map, this);
3240 >        }
3241 >        @SuppressWarnings("unchecked") public final K next() {
3242 >            if (nextVal == null && advance() == null)
3243 >                throw new NoSuchElementException();
3244 >            Object k = nextKey;
3245 >            nextVal = null;
3246 >            return (K) k;
3247          }
1412    }
1413
1414    final class KeyIterator extends HashIterator
1415        implements Iterator<K>, Enumeration<K> {
1416        @SuppressWarnings("unchecked")
1417        public final K next()        { return (K)super.nextKey(); }
1418        @SuppressWarnings("unchecked")
1419        public final K nextElement() { return (K)super.nextKey(); }
1420    }
1421
1422    final class ValueIterator extends HashIterator
1423        implements Iterator<V>, Enumeration<V> {
1424        @SuppressWarnings("unchecked")
1425        public final V next()        { return (V)super.nextValue(); }
1426        @SuppressWarnings("unchecked")
1427        public final V nextElement() { return (V)super.nextValue(); }
1428    }
3248  
3249 <    final class EntryIterator extends HashIterator
1431 <        implements Iterator<Entry<K,V>> {
1432 <        public final Map.Entry<K,V> next() { return super.nextEntry(); }
3249 >        public final K nextElement() { return next(); }
3250      }
3251  
3252 <    final class KeySet extends AbstractSet<K> {
3253 <        public int size() {
3254 <            return ConcurrentHashMapV8.this.size();
3255 <        }
3256 <        public boolean isEmpty() {
1440 <            return ConcurrentHashMapV8.this.isEmpty();
1441 <        }
1442 <        public void clear() {
1443 <            ConcurrentHashMapV8.this.clear();
1444 <        }
1445 <        public Iterator<K> iterator() {
1446 <            return new KeyIterator();
3252 >    @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3253 >        implements Spliterator<V>, Enumeration<V> {
3254 >        ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3255 >        ValueIterator(ConcurrentHashMapV8<K, V> map, Traverser<K,V,Object> it) {
3256 >            super(map, it, -1);
3257          }
3258 <        public boolean contains(Object o) {
3259 <            return ConcurrentHashMapV8.this.containsKey(o);
3258 >        public ValueIterator<K,V> split() {
3259 >            if (nextKey != null)
3260 >                throw new IllegalStateException();
3261 >            return new ValueIterator<K,V>(map, this);
3262          }
3263 <        public boolean remove(Object o) {
3264 <            return ConcurrentHashMapV8.this.remove(o) != null;
3263 >
3264 >        @SuppressWarnings("unchecked") public final V next() {
3265 >            Object v;
3266 >            if ((v = nextVal) == null && (v = advance()) == null)
3267 >                throw new NoSuchElementException();
3268 >            nextVal = null;
3269 >            return (V) v;
3270          }
3271 +
3272 +        public final V nextElement() { return next(); }
3273      }
3274  
3275 <    final class Values extends AbstractCollection<V> {
3276 <        public int size() {
3277 <            return ConcurrentHashMapV8.this.size();
3278 <        }
3279 <        public boolean isEmpty() {
1461 <            return ConcurrentHashMapV8.this.isEmpty();
1462 <        }
1463 <        public void clear() {
1464 <            ConcurrentHashMapV8.this.clear();
3275 >    @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3276 >        implements Spliterator<Map.Entry<K,V>> {
3277 >        EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3278 >        EntryIterator(ConcurrentHashMapV8<K, V> map, Traverser<K,V,Object> it) {
3279 >            super(map, it, -1);
3280          }
3281 <        public Iterator<V> iterator() {
3282 <            return new ValueIterator();
3281 >        public EntryIterator<K,V> split() {
3282 >            if (nextKey != null)
3283 >                throw new IllegalStateException();
3284 >            return new EntryIterator<K,V>(map, this);
3285          }
3286 <        public boolean contains(Object o) {
3287 <            return ConcurrentHashMapV8.this.containsValue(o);
3286 >
3287 >        @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3288 >            Object v;
3289 >            if ((v = nextVal) == null && (v = advance()) == null)
3290 >                throw new NoSuchElementException();
3291 >            Object k = nextKey;
3292 >            nextVal = null;
3293 >            return new MapEntry<K,V>((K)k, (V)v, map);
3294          }
3295      }
3296  
3297 <    final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
3298 <        public int size() {
3299 <            return ConcurrentHashMapV8.this.size();
3300 <        }
3301 <        public boolean isEmpty() {
3302 <            return ConcurrentHashMapV8.this.isEmpty();
3303 <        }
3304 <        public void clear() {
3305 <            ConcurrentHashMapV8.this.clear();
3306 <        }
3307 <        public Iterator<Map.Entry<K,V>> iterator() {
1485 <            return new EntryIterator();
3297 >    /**
3298 >     * Exported Entry for iterators
3299 >     */
3300 >    static final class MapEntry<K,V> implements Map.Entry<K, V> {
3301 >        final K key; // non-null
3302 >        V val;       // non-null
3303 >        final ConcurrentHashMapV8<K, V> map;
3304 >        MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) {
3305 >            this.key = key;
3306 >            this.val = val;
3307 >            this.map = map;
3308          }
3309 <        public boolean contains(Object o) {
3310 <            if (!(o instanceof Map.Entry))
3311 <                return false;
3312 <            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
3313 <            V v = ConcurrentHashMapV8.this.get(e.getKey());
3314 <            return v != null && v.equals(e.getValue());
3309 >        public final K getKey()       { return key; }
3310 >        public final V getValue()     { return val; }
3311 >        public final int hashCode()   { return key.hashCode() ^ val.hashCode(); }
3312 >        public final String toString(){ return key + "=" + val; }
3313 >
3314 >        public final boolean equals(Object o) {
3315 >            Object k, v; Map.Entry<?,?> e;
3316 >            return ((o instanceof Map.Entry) &&
3317 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3318 >                    (v = e.getValue()) != null &&
3319 >                    (k == key || k.equals(key)) &&
3320 >                    (v == val || v.equals(val)));
3321          }
3322 <        public boolean remove(Object o) {
3323 <            if (!(o instanceof Map.Entry))
3324 <                return false;
3325 <            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
3326 <            return ConcurrentHashMapV8.this.remove(e.getKey(), e.getValue());
3322 >
3323 >        /**
3324 >         * Sets our entry's value and writes through to the map. The
3325 >         * value to return is somewhat arbitrary here. Since we do not
3326 >         * necessarily track asynchronous changes, the most recent
3327 >         * "previous" value could be different from what we return (or
3328 >         * could even have been removed in which case the put will
3329 >         * re-establish). We do not and cannot guarantee more.
3330 >         */
3331 >        public final V setValue(V value) {
3332 >            if (value == null) throw new NullPointerException();
3333 >            V v = val;
3334 >            val = value;
3335 >            map.put(key, value);
3336 >            return v;
3337          }
3338      }
3339  
3340 +    /**
3341 +     * Returns exportable snapshot entry for the given key and value
3342 +     * when write-through can't or shouldn't be used.
3343 +     */
3344 +    static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
3345 +        return new AbstractMap.SimpleEntry<K,V>(k, v);
3346 +    }
3347 +
3348      /* ---------------- Serialization Support -------------- */
3349  
3350      /**
3351 <     * Helper class used in previous version, declared for the sake of
3352 <     * serialization compatibility
3351 >     * Stripped-down version of helper class used in previous version,
3352 >     * declared for the sake of serialization compatibility
3353       */
3354 <    static class Segment<K,V> extends java.util.concurrent.locks.ReentrantLock
1509 <        implements Serializable {
3354 >    static class Segment<K,V> implements Serializable {
3355          private static final long serialVersionUID = 2249069246763182397L;
3356          final float loadFactor;
3357          Segment(float lf) { this.loadFactor = lf; }
# Line 1521 | Line 3366 | public class ConcurrentHashMapV8<K, V>
3366       * for each key-value mapping, followed by a null pair.
3367       * The key-value mappings are emitted in no particular order.
3368       */
3369 <    @SuppressWarnings("unchecked")
3370 <    private void writeObject(java.io.ObjectOutputStream s)
1526 <            throws java.io.IOException {
3369 >    @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3370 >        throws java.io.IOException {
3371          if (segments == null) { // for serialization compatibility
3372              segments = (Segment<K,V>[])
3373                  new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3374              for (int i = 0; i < segments.length; ++i)
3375 <                segments[i] = new Segment<K,V>(loadFactor);
3375 >                segments[i] = new Segment<K,V>(LOAD_FACTOR);
3376          }
3377          s.defaultWriteObject();
3378 <        new HashIterator().writeEntries(s);
3378 >        Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3379 >        Object v;
3380 >        while ((v = it.advance()) != null) {
3381 >            s.writeObject(it.nextKey);
3382 >            s.writeObject(v);
3383 >        }
3384          s.writeObject(null);
3385          s.writeObject(null);
3386          segments = null; // throw away
# Line 1541 | Line 3390 | public class ConcurrentHashMapV8<K, V>
3390       * Reconstitutes the instance from a stream (that is, deserializes it).
3391       * @param s the stream
3392       */
3393 <    @SuppressWarnings("unchecked")
3394 <    private void readObject(java.io.ObjectInputStream s)
1546 <            throws java.io.IOException, ClassNotFoundException {
3393 >    @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3394 >        throws java.io.IOException, ClassNotFoundException {
3395          s.defaultReadObject();
1548        // find load factor in a segment, if one exists
1549        if (segments != null && segments.length != 0)
1550            this.loadFactor = segments[0].loadFactor;
1551        else
1552            this.loadFactor = DEFAULT_LOAD_FACTOR;
1553        this.initCap = DEFAULT_CAPACITY;
1554        LongAdder ct = new LongAdder(); // force final field write
1555        UNSAFE.putObjectVolatile(this, counterOffset, ct);
3396          this.segments = null; // unneeded
3397 +        // initialize transient final field
3398 +        UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3399  
3400 <        // Read the keys and values, and put the mappings in the table
3400 >        // Create all nodes, then place in table once size is known
3401 >        long size = 0L;
3402 >        Node p = null;
3403          for (;;) {
3404 <            K key = (K) s.readObject();
3405 <            V value = (V) s.readObject();
3406 <            if (key == null)
3404 >            K k = (K) s.readObject();
3405 >            V v = (V) s.readObject();
3406 >            if (k != null && v != null) {
3407 >                int h = spread(k.hashCode());
3408 >                p = new Node(h, k, v, p);
3409 >                ++size;
3410 >            }
3411 >            else
3412                  break;
3413 <            put(key, value);
3413 >        }
3414 >        if (p != null) {
3415 >            boolean init = false;
3416 >            int n;
3417 >            if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3418 >                n = MAXIMUM_CAPACITY;
3419 >            else {
3420 >                int sz = (int)size;
3421 >                n = tableSizeFor(sz + (sz >>> 1) + 1);
3422 >            }
3423 >            int sc = sizeCtl;
3424 >            boolean collide = false;
3425 >            if (n > sc &&
3426 >                UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3427 >                try {
3428 >                    if (table == null) {
3429 >                        init = true;
3430 >                        Node[] tab = new Node[n];
3431 >                        int mask = n - 1;
3432 >                        while (p != null) {
3433 >                            int j = p.hash & mask;
3434 >                            Node next = p.next;
3435 >                            Node q = p.next = tabAt(tab, j);
3436 >                            setTabAt(tab, j, p);
3437 >                            if (!collide && q != null && q.hash == p.hash)
3438 >                                collide = true;
3439 >                            p = next;
3440 >                        }
3441 >                        table = tab;
3442 >                        counter.add(size);
3443 >                        sc = n - (n >>> 2);
3444 >                    }
3445 >                } finally {
3446 >                    sizeCtl = sc;
3447 >                }
3448 >                if (collide) { // rescan and convert to TreeBins
3449 >                    Node[] tab = table;
3450 >                    for (int i = 0; i < tab.length; ++i) {
3451 >                        int c = 0;
3452 >                        for (Node e = tabAt(tab, i); e != null; e = e.next) {
3453 >                            if (++c > TREE_THRESHOLD &&
3454 >                                (e.key instanceof Comparable)) {
3455 >                                replaceWithTreeBin(tab, i, e.key);
3456 >                                break;
3457 >                            }
3458 >                        }
3459 >                    }
3460 >                }
3461 >            }
3462 >            if (!init) { // Can only happen if unsafely published.
3463 >                while (p != null) {
3464 >                    internalPut(p.key, p.val);
3465 >                    p = p.next;
3466 >                }
3467 >            }
3468 >        }
3469 >    }
3470 >
3471 >
3472 >    // -------------------------------------------------------
3473 >
3474 >    // Sams
3475 >    /** Interface describing a void action of one argument */
3476 >    public interface Action<A> { void apply(A a); }
3477 >    /** Interface describing a void action of two arguments */
3478 >    public interface BiAction<A,B> { void apply(A a, B b); }
3479 >    /** Interface describing a function of one argument */
3480 >    public interface Fun<A,T> { T apply(A a); }
3481 >    /** Interface describing a function of two arguments */
3482 >    public interface BiFun<A,B,T> { T apply(A a, B b); }
3483 >    /** Interface describing a function of no arguments */
3484 >    public interface Generator<T> { T apply(); }
3485 >    /** Interface describing a function mapping its argument to a double */
3486 >    public interface ObjectToDouble<A> { double apply(A a); }
3487 >    /** Interface describing a function mapping its argument to a long */
3488 >    public interface ObjectToLong<A> { long apply(A a); }
3489 >    /** Interface describing a function mapping its argument to an int */
3490 >    public interface ObjectToInt<A> {int apply(A a); }
3491 >    /** Interface describing a function mapping two arguments to a double */
3492 >    public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3493 >    /** Interface describing a function mapping two arguments to a long */
3494 >    public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3495 >    /** Interface describing a function mapping two arguments to an int */
3496 >    public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3497 >    /** Interface describing a function mapping a double to a double */
3498 >    public interface DoubleToDouble { double apply(double a); }
3499 >    /** Interface describing a function mapping a long to a long */
3500 >    public interface LongToLong { long apply(long a); }
3501 >    /** Interface describing a function mapping an int to an int */
3502 >    public interface IntToInt { int apply(int a); }
3503 >    /** Interface describing a function mapping two doubles to a double */
3504 >    public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3505 >    /** Interface describing a function mapping two longs to a long */
3506 >    public interface LongByLongToLong { long apply(long a, long b); }
3507 >    /** Interface describing a function mapping two ints to an int */
3508 >    public interface IntByIntToInt { int apply(int a, int b); }
3509 >
3510 >
3511 >    // -------------------------------------------------------
3512 >
3513 >    /**
3514 >     * Performs the given action for each (key, value).
3515 >     *
3516 >     * @param action the action
3517 >     */
3518 >    public void forEach(BiAction<K,V> action) {
3519 >        ForkJoinTasks.forEach
3520 >            (this, action).invoke();
3521 >    }
3522 >
3523 >    /**
3524 >     * Performs the given action for each non-null transformation
3525 >     * of each (key, value).
3526 >     *
3527 >     * @param transformer a function returning the transformation
3528 >     * for an element, or null of there is no transformation (in
3529 >     * which case the action is not applied).
3530 >     * @param action the action
3531 >     */
3532 >    public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3533 >                            Action<U> action) {
3534 >        ForkJoinTasks.forEach
3535 >            (this, transformer, action).invoke();
3536 >    }
3537 >
3538 >    /**
3539 >     * Returns a non-null result from applying the given search
3540 >     * function on each (key, value), or null if none.  Upon
3541 >     * success, further element processing is suppressed and the
3542 >     * results of any other parallel invocations of the search
3543 >     * function are ignored.
3544 >     *
3545 >     * @param searchFunction a function returning a non-null
3546 >     * result on success, else null
3547 >     * @return a non-null result from applying the given search
3548 >     * function on each (key, value), or null if none
3549 >     */
3550 >    public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3551 >        return ForkJoinTasks.search
3552 >            (this, searchFunction).invoke();
3553 >    }
3554 >
3555 >    /**
3556 >     * Returns the result of accumulating the given transformation
3557 >     * of all (key, value) pairs using the given reducer to
3558 >     * combine values, or null if none.
3559 >     *
3560 >     * @param transformer a function returning the transformation
3561 >     * for an element, or null of there is no transformation (in
3562 >     * which case it is not combined).
3563 >     * @param reducer a commutative associative combining function
3564 >     * @return the result of accumulating the given transformation
3565 >     * of all (key, value) pairs
3566 >     */
3567 >    public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3568 >                        BiFun<? super U, ? super U, ? extends U> reducer) {
3569 >        return ForkJoinTasks.reduce
3570 >            (this, transformer, reducer).invoke();
3571 >    }
3572 >
3573 >    /**
3574 >     * Returns the result of accumulating the given transformation
3575 >     * of all (key, value) pairs using the given reducer to
3576 >     * combine values, and the given basis as an identity value.
3577 >     *
3578 >     * @param transformer a function returning the transformation
3579 >     * for an element
3580 >     * @param basis the identity (initial default value) for the reduction
3581 >     * @param reducer a commutative associative combining function
3582 >     * @return the result of accumulating the given transformation
3583 >     * of all (key, value) pairs
3584 >     */
3585 >    public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3586 >                                 double basis,
3587 >                                 DoubleByDoubleToDouble reducer) {
3588 >        return ForkJoinTasks.reduceToDouble
3589 >            (this, transformer, basis, reducer).invoke();
3590 >    }
3591 >
3592 >    /**
3593 >     * Returns the result of accumulating the given transformation
3594 >     * of all (key, value) pairs using the given reducer to
3595 >     * combine values, and the given basis as an identity value.
3596 >     *
3597 >     * @param transformer a function returning the transformation
3598 >     * for an element
3599 >     * @param basis the identity (initial default value) for the reduction
3600 >     * @param reducer a commutative associative combining function
3601 >     * @return the result of accumulating the given transformation
3602 >     * of all (key, value) pairs
3603 >     */
3604 >    public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3605 >                             long basis,
3606 >                             LongByLongToLong reducer) {
3607 >        return ForkJoinTasks.reduceToLong
3608 >            (this, transformer, basis, reducer).invoke();
3609 >    }
3610 >
3611 >    /**
3612 >     * Returns the result of accumulating the given transformation
3613 >     * of all (key, value) pairs using the given reducer to
3614 >     * combine values, and the given basis as an identity value.
3615 >     *
3616 >     * @param transformer a function returning the transformation
3617 >     * for an element
3618 >     * @param basis the identity (initial default value) for the reduction
3619 >     * @param reducer a commutative associative combining function
3620 >     * @return the result of accumulating the given transformation
3621 >     * of all (key, value) pairs
3622 >     */
3623 >    public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3624 >                           int basis,
3625 >                           IntByIntToInt reducer) {
3626 >        return ForkJoinTasks.reduceToInt
3627 >            (this, transformer, basis, reducer).invoke();
3628 >    }
3629 >
3630 >    /**
3631 >     * Performs the given action for each key.
3632 >     *
3633 >     * @param action the action
3634 >     */
3635 >    public void forEachKey(Action<K> action) {
3636 >        ForkJoinTasks.forEachKey
3637 >            (this, action).invoke();
3638 >    }
3639 >
3640 >    /**
3641 >     * Performs the given action for each non-null transformation
3642 >     * of each key.
3643 >     *
3644 >     * @param transformer a function returning the transformation
3645 >     * for an element, or null of there is no transformation (in
3646 >     * which case the action is not applied).
3647 >     * @param action the action
3648 >     */
3649 >    public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3650 >                               Action<U> action) {
3651 >        ForkJoinTasks.forEachKey
3652 >            (this, transformer, action).invoke();
3653 >    }
3654 >
3655 >    /**
3656 >     * Returns a non-null result from applying the given search
3657 >     * function on each key, or null if none. Upon success,
3658 >     * further element processing is suppressed and the results of
3659 >     * any other parallel invocations of the search function are
3660 >     * ignored.
3661 >     *
3662 >     * @param searchFunction a function returning a non-null
3663 >     * result on success, else null
3664 >     * @return a non-null result from applying the given search
3665 >     * function on each key, or null if none
3666 >     */
3667 >    public <U> U searchKeys(Fun<? super K, ? extends U> searchFunction) {
3668 >        return ForkJoinTasks.searchKeys
3669 >            (this, searchFunction).invoke();
3670 >    }
3671 >
3672 >    /**
3673 >     * Returns the result of accumulating all keys using the given
3674 >     * reducer to combine values, or null if none.
3675 >     *
3676 >     * @param reducer a commutative associative combining function
3677 >     * @return the result of accumulating all keys using the given
3678 >     * reducer to combine values, or null if none
3679 >     */
3680 >    public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3681 >        return ForkJoinTasks.reduceKeys
3682 >            (this, reducer).invoke();
3683 >    }
3684 >
3685 >    /**
3686 >     * Returns the result of accumulating the given transformation
3687 >     * of all keys using the given reducer to combine values, or
3688 >     * null if none.
3689 >     *
3690 >     * @param transformer a function returning the transformation
3691 >     * for an element, or null of there is no transformation (in
3692 >     * which case it is not combined).
3693 >     * @param reducer a commutative associative combining function
3694 >     * @return the result of accumulating the given transformation
3695 >     * of all keys
3696 >     */
3697 >    public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3698 >                            BiFun<? super U, ? super U, ? extends U> reducer) {
3699 >        return ForkJoinTasks.reduceKeys
3700 >            (this, transformer, reducer).invoke();
3701 >    }
3702 >
3703 >    /**
3704 >     * Returns the result of accumulating the given transformation
3705 >     * of all keys using the given reducer to combine values, and
3706 >     * the given basis as an identity value.
3707 >     *
3708 >     * @param transformer a function returning the transformation
3709 >     * for an element
3710 >     * @param basis the identity (initial default value) for the reduction
3711 >     * @param reducer a commutative associative combining function
3712 >     * @return  the result of accumulating the given transformation
3713 >     * of all keys
3714 >     */
3715 >    public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3716 >                                     double basis,
3717 >                                     DoubleByDoubleToDouble reducer) {
3718 >        return ForkJoinTasks.reduceKeysToDouble
3719 >            (this, transformer, basis, reducer).invoke();
3720 >    }
3721 >
3722 >    /**
3723 >     * Returns the result of accumulating the given transformation
3724 >     * of all keys using the given reducer to combine values, and
3725 >     * the given basis as an identity value.
3726 >     *
3727 >     * @param transformer a function returning the transformation
3728 >     * for an element
3729 >     * @param basis the identity (initial default value) for the reduction
3730 >     * @param reducer a commutative associative combining function
3731 >     * @return the result of accumulating the given transformation
3732 >     * of all keys
3733 >     */
3734 >    public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3735 >                                 long basis,
3736 >                                 LongByLongToLong reducer) {
3737 >        return ForkJoinTasks.reduceKeysToLong
3738 >            (this, transformer, basis, reducer).invoke();
3739 >    }
3740 >
3741 >    /**
3742 >     * Returns the result of accumulating the given transformation
3743 >     * of all keys using the given reducer to combine values, and
3744 >     * the given basis as an identity value.
3745 >     *
3746 >     * @param transformer a function returning the transformation
3747 >     * for an element
3748 >     * @param basis the identity (initial default value) for the reduction
3749 >     * @param reducer a commutative associative combining function
3750 >     * @return the result of accumulating the given transformation
3751 >     * of all keys
3752 >     */
3753 >    public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3754 >                               int basis,
3755 >                               IntByIntToInt reducer) {
3756 >        return ForkJoinTasks.reduceKeysToInt
3757 >            (this, transformer, basis, reducer).invoke();
3758 >    }
3759 >
3760 >    /**
3761 >     * Performs the given action for each value.
3762 >     *
3763 >     * @param action the action
3764 >     */
3765 >    public void forEachValue(Action<V> action) {
3766 >        ForkJoinTasks.forEachValue
3767 >            (this, action).invoke();
3768 >    }
3769 >
3770 >    /**
3771 >     * Performs the given action for each non-null transformation
3772 >     * of each value.
3773 >     *
3774 >     * @param transformer a function returning the transformation
3775 >     * for an element, or null of there is no transformation (in
3776 >     * which case the action is not applied).
3777 >     */
3778 >    public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3779 >                                 Action<U> action) {
3780 >        ForkJoinTasks.forEachValue
3781 >            (this, transformer, action).invoke();
3782 >    }
3783 >
3784 >    /**
3785 >     * Returns a non-null result from applying the given search
3786 >     * function on each value, or null if none.  Upon success,
3787 >     * further element processing is suppressed and the results of
3788 >     * any other parallel invocations of the search function are
3789 >     * ignored.
3790 >     *
3791 >     * @param searchFunction a function returning a non-null
3792 >     * result on success, else null
3793 >     * @return a non-null result from applying the given search
3794 >     * function on each value, or null if none
3795 >     *
3796 >     */
3797 >    public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
3798 >        return ForkJoinTasks.searchValues
3799 >            (this, searchFunction).invoke();
3800 >    }
3801 >
3802 >    /**
3803 >     * Returns the result of accumulating all values using the
3804 >     * given reducer to combine values, or null if none.
3805 >     *
3806 >     * @param reducer a commutative associative combining function
3807 >     * @return  the result of accumulating all values
3808 >     */
3809 >    public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
3810 >        return ForkJoinTasks.reduceValues
3811 >            (this, reducer).invoke();
3812 >    }
3813 >
3814 >    /**
3815 >     * Returns the result of accumulating the given transformation
3816 >     * of all values using the given reducer to combine values, or
3817 >     * null if none.
3818 >     *
3819 >     * @param transformer a function returning the transformation
3820 >     * for an element, or null of there is no transformation (in
3821 >     * which case it is not combined).
3822 >     * @param reducer a commutative associative combining function
3823 >     * @return the result of accumulating the given transformation
3824 >     * of all values
3825 >     */
3826 >    public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
3827 >                              BiFun<? super U, ? super U, ? extends U> reducer) {
3828 >        return ForkJoinTasks.reduceValues
3829 >            (this, transformer, reducer).invoke();
3830 >    }
3831 >
3832 >    /**
3833 >     * Returns the result of accumulating the given transformation
3834 >     * of all values using the given reducer to combine values,
3835 >     * and the given basis as an identity value.
3836 >     *
3837 >     * @param transformer a function returning the transformation
3838 >     * for an element
3839 >     * @param basis the identity (initial default value) for the reduction
3840 >     * @param reducer a commutative associative combining function
3841 >     * @return the result of accumulating the given transformation
3842 >     * of all values
3843 >     */
3844 >    public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
3845 >                                       double basis,
3846 >                                       DoubleByDoubleToDouble reducer) {
3847 >        return ForkJoinTasks.reduceValuesToDouble
3848 >            (this, transformer, basis, reducer).invoke();
3849 >    }
3850 >
3851 >    /**
3852 >     * Returns the result of accumulating the given transformation
3853 >     * of all values using the given reducer to combine values,
3854 >     * and the given basis as an identity value.
3855 >     *
3856 >     * @param transformer a function returning the transformation
3857 >     * for an element
3858 >     * @param basis the identity (initial default value) for the reduction
3859 >     * @param reducer a commutative associative combining function
3860 >     * @return the result of accumulating the given transformation
3861 >     * of all values
3862 >     */
3863 >    public long reduceValuesToLong(ObjectToLong<? super V> transformer,
3864 >                                   long basis,
3865 >                                   LongByLongToLong reducer) {
3866 >        return ForkJoinTasks.reduceValuesToLong
3867 >            (this, transformer, basis, reducer).invoke();
3868 >    }
3869 >
3870 >    /**
3871 >     * Returns the result of accumulating the given transformation
3872 >     * of all values using the given reducer to combine values,
3873 >     * and the given basis as an identity value.
3874 >     *
3875 >     * @param transformer a function returning the transformation
3876 >     * for an element
3877 >     * @param basis the identity (initial default value) for the reduction
3878 >     * @param reducer a commutative associative combining function
3879 >     * @return the result of accumulating the given transformation
3880 >     * of all values
3881 >     */
3882 >    public int reduceValuesToInt(ObjectToInt<? super V> transformer,
3883 >                                 int basis,
3884 >                                 IntByIntToInt reducer) {
3885 >        return ForkJoinTasks.reduceValuesToInt
3886 >            (this, transformer, basis, reducer).invoke();
3887 >    }
3888 >
3889 >    /**
3890 >     * Performs the given action for each entry.
3891 >     *
3892 >     * @param action the action
3893 >     */
3894 >    public void forEachEntry(Action<Map.Entry<K,V>> action) {
3895 >        ForkJoinTasks.forEachEntry
3896 >            (this, action).invoke();
3897 >    }
3898 >
3899 >    /**
3900 >     * Performs the given action for each non-null transformation
3901 >     * of each entry.
3902 >     *
3903 >     * @param transformer a function returning the transformation
3904 >     * for an element, or null of there is no transformation (in
3905 >     * which case the action is not applied).
3906 >     * @param action the action
3907 >     */
3908 >    public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
3909 >                                 Action<U> action) {
3910 >        ForkJoinTasks.forEachEntry
3911 >            (this, transformer, action).invoke();
3912 >    }
3913 >
3914 >    /**
3915 >     * Returns a non-null result from applying the given search
3916 >     * function on each entry, or null if none.  Upon success,
3917 >     * further element processing is suppressed and the results of
3918 >     * any other parallel invocations of the search function are
3919 >     * ignored.
3920 >     *
3921 >     * @param searchFunction a function returning a non-null
3922 >     * result on success, else null
3923 >     * @return a non-null result from applying the given search
3924 >     * function on each entry, or null if none
3925 >     */
3926 >    public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
3927 >        return ForkJoinTasks.searchEntries
3928 >            (this, searchFunction).invoke();
3929 >    }
3930 >
3931 >    /**
3932 >     * Returns the result of accumulating all entries using the
3933 >     * given reducer to combine values, or null if none.
3934 >     *
3935 >     * @param reducer a commutative associative combining function
3936 >     * @return the result of accumulating all entries
3937 >     */
3938 >    public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
3939 >        return ForkJoinTasks.reduceEntries
3940 >            (this, reducer).invoke();
3941 >    }
3942 >
3943 >    /**
3944 >     * Returns the result of accumulating the given transformation
3945 >     * of all entries using the given reducer to combine values,
3946 >     * or null if none.
3947 >     *
3948 >     * @param transformer a function returning the transformation
3949 >     * for an element, or null of there is no transformation (in
3950 >     * which case it is not combined).
3951 >     * @param reducer a commutative associative combining function
3952 >     * @return the result of accumulating the given transformation
3953 >     * of all entries
3954 >     */
3955 >    public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
3956 >                               BiFun<? super U, ? super U, ? extends U> reducer) {
3957 >        return ForkJoinTasks.reduceEntries
3958 >            (this, transformer, reducer).invoke();
3959 >    }
3960 >
3961 >    /**
3962 >     * Returns the result of accumulating the given transformation
3963 >     * of all entries using the given reducer to combine values,
3964 >     * and the given basis as an identity value.
3965 >     *
3966 >     * @param transformer a function returning the transformation
3967 >     * for an element
3968 >     * @param basis the identity (initial default value) for the reduction
3969 >     * @param reducer a commutative associative combining function
3970 >     * @return the result of accumulating the given transformation
3971 >     * of all entries
3972 >     */
3973 >    public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
3974 >                                        double basis,
3975 >                                        DoubleByDoubleToDouble reducer) {
3976 >        return ForkJoinTasks.reduceEntriesToDouble
3977 >            (this, transformer, basis, reducer).invoke();
3978 >    }
3979 >
3980 >    /**
3981 >     * Returns the result of accumulating the given transformation
3982 >     * of all entries using the given reducer to combine values,
3983 >     * and the given basis as an identity value.
3984 >     *
3985 >     * @param transformer a function returning the transformation
3986 >     * for an element
3987 >     * @param basis the identity (initial default value) for the reduction
3988 >     * @param reducer a commutative associative combining function
3989 >     * @return  the result of accumulating the given transformation
3990 >     * of all entries
3991 >     */
3992 >    public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
3993 >                                    long basis,
3994 >                                    LongByLongToLong reducer) {
3995 >        return ForkJoinTasks.reduceEntriesToLong
3996 >            (this, transformer, basis, reducer).invoke();
3997 >    }
3998 >
3999 >    /**
4000 >     * Returns the result of accumulating the given transformation
4001 >     * of all entries using the given reducer to combine values,
4002 >     * and the given basis as an identity value.
4003 >     *
4004 >     * @param transformer a function returning the transformation
4005 >     * for an element
4006 >     * @param basis the identity (initial default value) for the reduction
4007 >     * @param reducer a commutative associative combining function
4008 >     * @return the result of accumulating the given transformation
4009 >     * of all entries
4010 >     */
4011 >    public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4012 >                                  int basis,
4013 >                                  IntByIntToInt reducer) {
4014 >        return ForkJoinTasks.reduceEntriesToInt
4015 >            (this, transformer, basis, reducer).invoke();
4016 >    }
4017 >
4018 >    /* ----------------Views -------------- */
4019 >
4020 >    /**
4021 >     * Base class for views.
4022 >     */
4023 >    static abstract class CHMView<K, V> {
4024 >        final ConcurrentHashMapV8<K, V> map;
4025 >        CHMView(ConcurrentHashMapV8<K, V> map)  { this.map = map; }
4026 >
4027 >        /**
4028 >         * Returns the map backing this view.
4029 >         *
4030 >         * @return the map backing this view
4031 >         */
4032 >        public ConcurrentHashMapV8<K,V> getMap() { return map; }
4033 >
4034 >        public final int size()                 { return map.size(); }
4035 >        public final boolean isEmpty()          { return map.isEmpty(); }
4036 >        public final void clear()               { map.clear(); }
4037 >
4038 >        // implementations below rely on concrete classes supplying these
4039 >        abstract public Iterator<?> iterator();
4040 >        abstract public boolean contains(Object o);
4041 >        abstract public boolean remove(Object o);
4042 >
4043 >        private static final String oomeMsg = "Required array size too large";
4044 >
4045 >        public final Object[] toArray() {
4046 >            long sz = map.mappingCount();
4047 >            if (sz > (long)(MAX_ARRAY_SIZE))
4048 >                throw new OutOfMemoryError(oomeMsg);
4049 >            int n = (int)sz;
4050 >            Object[] r = new Object[n];
4051 >            int i = 0;
4052 >            Iterator<?> it = iterator();
4053 >            while (it.hasNext()) {
4054 >                if (i == n) {
4055 >                    if (n >= MAX_ARRAY_SIZE)
4056 >                        throw new OutOfMemoryError(oomeMsg);
4057 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4058 >                        n = MAX_ARRAY_SIZE;
4059 >                    else
4060 >                        n += (n >>> 1) + 1;
4061 >                    r = Arrays.copyOf(r, n);
4062 >                }
4063 >                r[i++] = it.next();
4064 >            }
4065 >            return (i == n) ? r : Arrays.copyOf(r, i);
4066 >        }
4067 >
4068 >        @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
4069 >            long sz = map.mappingCount();
4070 >            if (sz > (long)(MAX_ARRAY_SIZE))
4071 >                throw new OutOfMemoryError(oomeMsg);
4072 >            int m = (int)sz;
4073 >            T[] r = (a.length >= m) ? a :
4074 >                (T[])java.lang.reflect.Array
4075 >                .newInstance(a.getClass().getComponentType(), m);
4076 >            int n = r.length;
4077 >            int i = 0;
4078 >            Iterator<?> it = iterator();
4079 >            while (it.hasNext()) {
4080 >                if (i == n) {
4081 >                    if (n >= MAX_ARRAY_SIZE)
4082 >                        throw new OutOfMemoryError(oomeMsg);
4083 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4084 >                        n = MAX_ARRAY_SIZE;
4085 >                    else
4086 >                        n += (n >>> 1) + 1;
4087 >                    r = Arrays.copyOf(r, n);
4088 >                }
4089 >                r[i++] = (T)it.next();
4090 >            }
4091 >            if (a == r && i < n) {
4092 >                r[i] = null; // null-terminate
4093 >                return r;
4094 >            }
4095 >            return (i == n) ? r : Arrays.copyOf(r, i);
4096 >        }
4097 >
4098 >        public final int hashCode() {
4099 >            int h = 0;
4100 >            for (Iterator<?> it = iterator(); it.hasNext();)
4101 >                h += it.next().hashCode();
4102 >            return h;
4103 >        }
4104 >
4105 >        public final String toString() {
4106 >            StringBuilder sb = new StringBuilder();
4107 >            sb.append('[');
4108 >            Iterator<?> it = iterator();
4109 >            if (it.hasNext()) {
4110 >                for (;;) {
4111 >                    Object e = it.next();
4112 >                    sb.append(e == this ? "(this Collection)" : e);
4113 >                    if (!it.hasNext())
4114 >                        break;
4115 >                    sb.append(',').append(' ');
4116 >                }
4117 >            }
4118 >            return sb.append(']').toString();
4119 >        }
4120 >
4121 >        public final boolean containsAll(Collection<?> c) {
4122 >            if (c != this) {
4123 >                for (Iterator<?> it = c.iterator(); it.hasNext();) {
4124 >                    Object e = it.next();
4125 >                    if (e == null || !contains(e))
4126 >                        return false;
4127 >                }
4128 >            }
4129 >            return true;
4130 >        }
4131 >
4132 >        public final boolean removeAll(Collection<?> c) {
4133 >            boolean modified = false;
4134 >            for (Iterator<?> it = iterator(); it.hasNext();) {
4135 >                if (c.contains(it.next())) {
4136 >                    it.remove();
4137 >                    modified = true;
4138 >                }
4139 >            }
4140 >            return modified;
4141 >        }
4142 >
4143 >        public final boolean retainAll(Collection<?> c) {
4144 >            boolean modified = false;
4145 >            for (Iterator<?> it = iterator(); it.hasNext();) {
4146 >                if (!c.contains(it.next())) {
4147 >                    it.remove();
4148 >                    modified = true;
4149 >                }
4150 >            }
4151 >            return modified;
4152 >        }
4153 >
4154 >    }
4155 >
4156 >    /**
4157 >     * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
4158 >     * which additions may optionally be enabled by mapping to a
4159 >     * common value.  This class cannot be directly instantiated. See
4160 >     * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
4161 >     * {@link #newKeySet(int)}.
4162 >     */
4163 >    public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable {
4164 >        private static final long serialVersionUID = 7249069246763182397L;
4165 >        private final V value;
4166 >        KeySetView(ConcurrentHashMapV8<K, V> map, V value) {  // non-public
4167 >            super(map);
4168 >            this.value = value;
4169 >        }
4170 >
4171 >        /**
4172 >         * Returns the default mapped value for additions,
4173 >         * or {@code null} if additions are not supported.
4174 >         *
4175 >         * @return the default mapped value for additions, or {@code null}
4176 >         * if not supported.
4177 >         */
4178 >        public V getMappedValue() { return value; }
4179 >
4180 >        // implement Set API
4181 >
4182 >        public boolean contains(Object o) { return map.containsKey(o); }
4183 >        public boolean remove(Object o)   { return map.remove(o) != null; }
4184 >
4185 >        /**
4186 >         * Returns a "weakly consistent" iterator that will never
4187 >         * throw {@link ConcurrentModificationException}, and
4188 >         * guarantees to traverse elements as they existed upon
4189 >         * construction of the iterator, and may (but is not
4190 >         * guaranteed to) reflect any modifications subsequent to
4191 >         * construction.
4192 >         *
4193 >         * @return an iterator over the keys of this map
4194 >         */
4195 >        public Iterator<K> iterator()     { return new KeyIterator<K,V>(map); }
4196 >        public boolean add(K e) {
4197 >            V v;
4198 >            if ((v = value) == null)
4199 >                throw new UnsupportedOperationException();
4200 >            if (e == null)
4201 >                throw new NullPointerException();
4202 >            return map.internalPutIfAbsent(e, v) == null;
4203 >        }
4204 >        public boolean addAll(Collection<? extends K> c) {
4205 >            boolean added = false;
4206 >            V v;
4207 >            if ((v = value) == null)
4208 >                throw new UnsupportedOperationException();
4209 >            for (K e : c) {
4210 >                if (e == null)
4211 >                    throw new NullPointerException();
4212 >                if (map.internalPutIfAbsent(e, v) == null)
4213 >                    added = true;
4214 >            }
4215 >            return added;
4216 >        }
4217 >        public boolean equals(Object o) {
4218 >            Set<?> c;
4219 >            return ((o instanceof Set) &&
4220 >                    ((c = (Set<?>)o) == this ||
4221 >                     (containsAll(c) && c.containsAll(this))));
4222 >        }
4223 >
4224 >        /**
4225 >         * Performs the given action for each key.
4226 >         *
4227 >         * @param action the action
4228 >         */
4229 >        public void forEach(Action<K> action) {
4230 >            ForkJoinTasks.forEachKey
4231 >                (map, action).invoke();
4232 >        }
4233 >
4234 >        /**
4235 >         * Performs the given action for each non-null transformation
4236 >         * of each key.
4237 >         *
4238 >         * @param transformer a function returning the transformation
4239 >         * for an element, or null of there is no transformation (in
4240 >         * which case the action is not applied).
4241 >         * @param action the action
4242 >         */
4243 >        public <U> void forEach(Fun<? super K, ? extends U> transformer,
4244 >                                Action<U> action) {
4245 >            ForkJoinTasks.forEachKey
4246 >                (map, transformer, action).invoke();
4247 >        }
4248 >
4249 >        /**
4250 >         * Returns a non-null result from applying the given search
4251 >         * function on each key, or null if none. Upon success,
4252 >         * further element processing is suppressed and the results of
4253 >         * any other parallel invocations of the search function are
4254 >         * ignored.
4255 >         *
4256 >         * @param searchFunction a function returning a non-null
4257 >         * result on success, else null
4258 >         * @return a non-null result from applying the given search
4259 >         * function on each key, or null if none
4260 >         */
4261 >        public <U> U search(Fun<? super K, ? extends U> searchFunction) {
4262 >            return ForkJoinTasks.searchKeys
4263 >                (map, searchFunction).invoke();
4264 >        }
4265 >
4266 >        /**
4267 >         * Returns the result of accumulating all keys using the given
4268 >         * reducer to combine values, or null if none.
4269 >         *
4270 >         * @param reducer a commutative associative combining function
4271 >         * @return the result of accumulating all keys using the given
4272 >         * reducer to combine values, or null if none
4273 >         */
4274 >        public K reduce(BiFun<? super K, ? super K, ? extends K> reducer) {
4275 >            return ForkJoinTasks.reduceKeys
4276 >                (map, reducer).invoke();
4277 >        }
4278 >
4279 >        /**
4280 >         * Returns the result of accumulating the given transformation
4281 >         * of all keys using the given reducer to combine values, and
4282 >         * the given basis as an identity value.
4283 >         *
4284 >         * @param transformer a function returning the transformation
4285 >         * for an element
4286 >         * @param basis the identity (initial default value) for the reduction
4287 >         * @param reducer a commutative associative combining function
4288 >         * @return  the result of accumulating the given transformation
4289 >         * of all keys
4290 >         */
4291 >        public double reduceToDouble(ObjectToDouble<? super K> transformer,
4292 >                                     double basis,
4293 >                                     DoubleByDoubleToDouble reducer) {
4294 >            return ForkJoinTasks.reduceKeysToDouble
4295 >                (map, transformer, basis, reducer).invoke();
4296 >        }
4297 >
4298 >
4299 >        /**
4300 >         * Returns the result of accumulating the given transformation
4301 >         * of all keys using the given reducer to combine values, and
4302 >         * the given basis as an identity value.
4303 >         *
4304 >         * @param transformer a function returning the transformation
4305 >         * for an element
4306 >         * @param basis the identity (initial default value) for the reduction
4307 >         * @param reducer a commutative associative combining function
4308 >         * @return the result of accumulating the given transformation
4309 >         * of all keys
4310 >         */
4311 >        public long reduceToLong(ObjectToLong<? super K> transformer,
4312 >                                 long basis,
4313 >                                 LongByLongToLong reducer) {
4314 >            return ForkJoinTasks.reduceKeysToLong
4315 >                (map, transformer, basis, reducer).invoke();
4316 >        }
4317 >
4318 >        /**
4319 >         * Returns the result of accumulating the given transformation
4320 >         * of all keys using the given reducer to combine values, and
4321 >         * the given basis as an identity value.
4322 >         *
4323 >         * @param transformer a function returning the transformation
4324 >         * for an element
4325 >         * @param basis the identity (initial default value) for the reduction
4326 >         * @param reducer a commutative associative combining function
4327 >         * @return the result of accumulating the given transformation
4328 >         * of all keys
4329 >         */
4330 >        public int reduceToInt(ObjectToInt<? super K> transformer,
4331 >                               int basis,
4332 >                               IntByIntToInt reducer) {
4333 >            return ForkJoinTasks.reduceKeysToInt
4334 >                (map, transformer, basis, reducer).invoke();
4335 >        }
4336 >
4337 >    }
4338 >
4339 >    /**
4340 >     * A view of a ConcurrentHashMapV8 as a {@link Collection} of
4341 >     * values, in which additions are disabled. This class cannot be
4342 >     * directly instantiated. See {@link #values},
4343 >     *
4344 >     * <p>The view's {@code iterator} is a "weakly consistent" iterator
4345 >     * that will never throw {@link ConcurrentModificationException},
4346 >     * and guarantees to traverse elements as they existed upon
4347 >     * construction of the iterator, and may (but is not guaranteed to)
4348 >     * reflect any modifications subsequent to construction.
4349 >     */
4350 >    public static final class ValuesView<K,V> extends CHMView<K,V>
4351 >        implements Collection<V> {
4352 >        ValuesView(ConcurrentHashMapV8<K, V> map)   { super(map); }
4353 >        public final boolean contains(Object o) { return map.containsValue(o); }
4354 >        public final boolean remove(Object o) {
4355 >            if (o != null) {
4356 >                Iterator<V> it = new ValueIterator<K,V>(map);
4357 >                while (it.hasNext()) {
4358 >                    if (o.equals(it.next())) {
4359 >                        it.remove();
4360 >                        return true;
4361 >                    }
4362 >                }
4363 >            }
4364 >            return false;
4365 >        }
4366 >
4367 >        /**
4368 >         * Returns a "weakly consistent" iterator that will never
4369 >         * throw {@link ConcurrentModificationException}, and
4370 >         * guarantees to traverse elements as they existed upon
4371 >         * construction of the iterator, and may (but is not
4372 >         * guaranteed to) reflect any modifications subsequent to
4373 >         * construction.
4374 >         *
4375 >         * @return an iterator over the values of this map
4376 >         */
4377 >        public final Iterator<V> iterator() {
4378 >            return new ValueIterator<K,V>(map);
4379 >        }
4380 >        public final boolean add(V e) {
4381 >            throw new UnsupportedOperationException();
4382 >        }
4383 >        public final boolean addAll(Collection<? extends V> c) {
4384 >            throw new UnsupportedOperationException();
4385 >        }
4386 >
4387 >        /**
4388 >         * Performs the given action for each value.
4389 >         *
4390 >         * @param action the action
4391 >         */
4392 >        public void forEach(Action<V> action) {
4393 >            ForkJoinTasks.forEachValue
4394 >                (map, action).invoke();
4395 >        }
4396 >
4397 >        /**
4398 >         * Performs the given action for each non-null transformation
4399 >         * of each value.
4400 >         *
4401 >         * @param transformer a function returning the transformation
4402 >         * for an element, or null of there is no transformation (in
4403 >         * which case the action is not applied).
4404 >         */
4405 >        public <U> void forEach(Fun<? super V, ? extends U> transformer,
4406 >                                     Action<U> action) {
4407 >            ForkJoinTasks.forEachValue
4408 >                (map, transformer, action).invoke();
4409 >        }
4410 >
4411 >        /**
4412 >         * Returns a non-null result from applying the given search
4413 >         * function on each value, or null if none.  Upon success,
4414 >         * further element processing is suppressed and the results of
4415 >         * any other parallel invocations of the search function are
4416 >         * ignored.
4417 >         *
4418 >         * @param searchFunction a function returning a non-null
4419 >         * result on success, else null
4420 >         * @return a non-null result from applying the given search
4421 >         * function on each value, or null if none
4422 >         *
4423 >         */
4424 >        public <U> U search(Fun<? super V, ? extends U> searchFunction) {
4425 >            return ForkJoinTasks.searchValues
4426 >                (map, searchFunction).invoke();
4427 >        }
4428 >
4429 >        /**
4430 >         * Returns the result of accumulating all values using the
4431 >         * given reducer to combine values, or null if none.
4432 >         *
4433 >         * @param reducer a commutative associative combining function
4434 >         * @return  the result of accumulating all values
4435 >         */
4436 >        public V reduce(BiFun<? super V, ? super V, ? extends V> reducer) {
4437 >            return ForkJoinTasks.reduceValues
4438 >                (map, reducer).invoke();
4439 >        }
4440 >
4441 >        /**
4442 >         * Returns the result of accumulating the given transformation
4443 >         * of all values using the given reducer to combine values, or
4444 >         * null if none.
4445 >         *
4446 >         * @param transformer a function returning the transformation
4447 >         * for an element, or null of there is no transformation (in
4448 >         * which case it is not combined).
4449 >         * @param reducer a commutative associative combining function
4450 >         * @return the result of accumulating the given transformation
4451 >         * of all values
4452 >         */
4453 >        public <U> U reduce(Fun<? super V, ? extends U> transformer,
4454 >                            BiFun<? super U, ? super U, ? extends U> reducer) {
4455 >            return ForkJoinTasks.reduceValues
4456 >                (map, transformer, reducer).invoke();
4457 >        }
4458 >
4459 >        /**
4460 >         * Returns the result of accumulating the given transformation
4461 >         * of all values using the given reducer to combine values,
4462 >         * and the given basis as an identity value.
4463 >         *
4464 >         * @param transformer a function returning the transformation
4465 >         * for an element
4466 >         * @param basis the identity (initial default value) for the reduction
4467 >         * @param reducer a commutative associative combining function
4468 >         * @return the result of accumulating the given transformation
4469 >         * of all values
4470 >         */
4471 >        public double reduceToDouble(ObjectToDouble<? super V> transformer,
4472 >                                     double basis,
4473 >                                     DoubleByDoubleToDouble reducer) {
4474 >            return ForkJoinTasks.reduceValuesToDouble
4475 >                (map, transformer, basis, reducer).invoke();
4476 >        }
4477 >
4478 >        /**
4479 >         * Returns the result of accumulating the given transformation
4480 >         * of all values using the given reducer to combine values,
4481 >         * and the given basis as an identity value.
4482 >         *
4483 >         * @param transformer a function returning the transformation
4484 >         * for an element
4485 >         * @param basis the identity (initial default value) for the reduction
4486 >         * @param reducer a commutative associative combining function
4487 >         * @return the result of accumulating the given transformation
4488 >         * of all values
4489 >         */
4490 >        public long reduceToLong(ObjectToLong<? super V> transformer,
4491 >                                 long basis,
4492 >                                 LongByLongToLong reducer) {
4493 >            return ForkJoinTasks.reduceValuesToLong
4494 >                (map, transformer, basis, reducer).invoke();
4495 >        }
4496 >
4497 >        /**
4498 >         * Returns the result of accumulating the given transformation
4499 >         * of all values using the given reducer to combine values,
4500 >         * and the given basis as an identity value.
4501 >         *
4502 >         * @param transformer a function returning the transformation
4503 >         * for an element
4504 >         * @param basis the identity (initial default value) for the reduction
4505 >         * @param reducer a commutative associative combining function
4506 >         * @return the result of accumulating the given transformation
4507 >         * of all values
4508 >         */
4509 >        public int reduceToInt(ObjectToInt<? super V> transformer,
4510 >                               int basis,
4511 >                               IntByIntToInt reducer) {
4512 >            return ForkJoinTasks.reduceValuesToInt
4513 >                (map, transformer, basis, reducer).invoke();
4514 >        }
4515 >
4516 >    }
4517 >
4518 >    /**
4519 >     * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value)
4520 >     * entries.  This class cannot be directly instantiated. See
4521 >     * {@link #entrySet}.
4522 >     */
4523 >    public static final class EntrySetView<K,V> extends CHMView<K,V>
4524 >        implements Set<Map.Entry<K,V>> {
4525 >        EntrySetView(ConcurrentHashMapV8<K, V> map) { super(map); }
4526 >        public final boolean contains(Object o) {
4527 >            Object k, v, r; Map.Entry<?,?> e;
4528 >            return ((o instanceof Map.Entry) &&
4529 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4530 >                    (r = map.get(k)) != null &&
4531 >                    (v = e.getValue()) != null &&
4532 >                    (v == r || v.equals(r)));
4533 >        }
4534 >        public final boolean remove(Object o) {
4535 >            Object k, v; Map.Entry<?,?> e;
4536 >            return ((o instanceof Map.Entry) &&
4537 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4538 >                    (v = e.getValue()) != null &&
4539 >                    map.remove(k, v));
4540 >        }
4541 >
4542 >        /**
4543 >         * Returns a "weakly consistent" iterator that will never
4544 >         * throw {@link ConcurrentModificationException}, and
4545 >         * guarantees to traverse elements as they existed upon
4546 >         * construction of the iterator, and may (but is not
4547 >         * guaranteed to) reflect any modifications subsequent to
4548 >         * construction.
4549 >         *
4550 >         * @return an iterator over the entries of this map
4551 >         */
4552 >        public final Iterator<Map.Entry<K,V>> iterator() {
4553 >            return new EntryIterator<K,V>(map);
4554 >        }
4555 >
4556 >        public final boolean add(Entry<K,V> e) {
4557 >            K key = e.getKey();
4558 >            V value = e.getValue();
4559 >            if (key == null || value == null)
4560 >                throw new NullPointerException();
4561 >            return map.internalPut(key, value) == null;
4562 >        }
4563 >        public final boolean addAll(Collection<? extends Entry<K,V>> c) {
4564 >            boolean added = false;
4565 >            for (Entry<K,V> e : c) {
4566 >                if (add(e))
4567 >                    added = true;
4568 >            }
4569 >            return added;
4570 >        }
4571 >        public boolean equals(Object o) {
4572 >            Set<?> c;
4573 >            return ((o instanceof Set) &&
4574 >                    ((c = (Set<?>)o) == this ||
4575 >                     (containsAll(c) && c.containsAll(this))));
4576 >        }
4577 >
4578 >        /**
4579 >         * Performs the given action for each entry.
4580 >         *
4581 >         * @param action the action
4582 >         */
4583 >        public void forEach(Action<Map.Entry<K,V>> action) {
4584 >            ForkJoinTasks.forEachEntry
4585 >                (map, action).invoke();
4586 >        }
4587 >
4588 >        /**
4589 >         * Performs the given action for each non-null transformation
4590 >         * of each entry.
4591 >         *
4592 >         * @param transformer a function returning the transformation
4593 >         * for an element, or null of there is no transformation (in
4594 >         * which case the action is not applied).
4595 >         * @param action the action
4596 >         */
4597 >        public <U> void forEach(Fun<Map.Entry<K,V>, ? extends U> transformer,
4598 >                                Action<U> action) {
4599 >            ForkJoinTasks.forEachEntry
4600 >                (map, transformer, action).invoke();
4601 >        }
4602 >
4603 >        /**
4604 >         * Returns a non-null result from applying the given search
4605 >         * function on each entry, or null if none.  Upon success,
4606 >         * further element processing is suppressed and the results of
4607 >         * any other parallel invocations of the search function are
4608 >         * ignored.
4609 >         *
4610 >         * @param searchFunction a function returning a non-null
4611 >         * result on success, else null
4612 >         * @return a non-null result from applying the given search
4613 >         * function on each entry, or null if none
4614 >         */
4615 >        public <U> U search(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4616 >            return ForkJoinTasks.searchEntries
4617 >                (map, searchFunction).invoke();
4618 >        }
4619 >
4620 >        /**
4621 >         * Returns the result of accumulating all entries using the
4622 >         * given reducer to combine values, or null if none.
4623 >         *
4624 >         * @param reducer a commutative associative combining function
4625 >         * @return the result of accumulating all entries
4626 >         */
4627 >        public Map.Entry<K,V> reduce(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4628 >            return ForkJoinTasks.reduceEntries
4629 >                (map, reducer).invoke();
4630 >        }
4631 >
4632 >        /**
4633 >         * Returns the result of accumulating the given transformation
4634 >         * of all entries using the given reducer to combine values,
4635 >         * or null if none.
4636 >         *
4637 >         * @param transformer a function returning the transformation
4638 >         * for an element, or null of there is no transformation (in
4639 >         * which case it is not combined).
4640 >         * @param reducer a commutative associative combining function
4641 >         * @return the result of accumulating the given transformation
4642 >         * of all entries
4643 >         */
4644 >        public <U> U reduce(Fun<Map.Entry<K,V>, ? extends U> transformer,
4645 >                            BiFun<? super U, ? super U, ? extends U> reducer) {
4646 >            return ForkJoinTasks.reduceEntries
4647 >                (map, transformer, reducer).invoke();
4648 >        }
4649 >
4650 >        /**
4651 >         * Returns the result of accumulating the given transformation
4652 >         * of all entries using the given reducer to combine values,
4653 >         * and the given basis as an identity value.
4654 >         *
4655 >         * @param transformer a function returning the transformation
4656 >         * for an element
4657 >         * @param basis the identity (initial default value) for the reduction
4658 >         * @param reducer a commutative associative combining function
4659 >         * @return the result of accumulating the given transformation
4660 >         * of all entries
4661 >         */
4662 >        public double reduceToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4663 >                                     double basis,
4664 >                                     DoubleByDoubleToDouble reducer) {
4665 >            return ForkJoinTasks.reduceEntriesToDouble
4666 >                (map, transformer, basis, reducer).invoke();
4667 >        }
4668 >
4669 >        /**
4670 >         * Returns the result of accumulating the given transformation
4671 >         * of all entries using the given reducer to combine values,
4672 >         * and the given basis as an identity value.
4673 >         *
4674 >         * @param transformer a function returning the transformation
4675 >         * for an element
4676 >         * @param basis the identity (initial default value) for the reduction
4677 >         * @param reducer a commutative associative combining function
4678 >         * @return  the result of accumulating the given transformation
4679 >         * of all entries
4680 >         */
4681 >        public long reduceToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4682 >                                 long basis,
4683 >                                 LongByLongToLong reducer) {
4684 >            return ForkJoinTasks.reduceEntriesToLong
4685 >                (map, transformer, basis, reducer).invoke();
4686 >        }
4687 >
4688 >        /**
4689 >         * Returns the result of accumulating the given transformation
4690 >         * of all entries using the given reducer to combine values,
4691 >         * and the given basis as an identity value.
4692 >         *
4693 >         * @param transformer a function returning the transformation
4694 >         * for an element
4695 >         * @param basis the identity (initial default value) for the reduction
4696 >         * @param reducer a commutative associative combining function
4697 >         * @return the result of accumulating the given transformation
4698 >         * of all entries
4699 >         */
4700 >        public int reduceToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4701 >                               int basis,
4702 >                               IntByIntToInt reducer) {
4703 >            return ForkJoinTasks.reduceEntriesToInt
4704 >                (map, transformer, basis, reducer).invoke();
4705 >        }
4706 >
4707 >    }
4708 >
4709 >    // ---------------------------------------------------------------------
4710 >
4711 >    /**
4712 >     * Predefined tasks for performing bulk parallel operations on
4713 >     * ConcurrentHashMapV8s. These tasks follow the forms and rules used
4714 >     * for bulk operations. Each method has the same name, but returns
4715 >     * a task rather than invoking it. These methods may be useful in
4716 >     * custom applications such as submitting a task without waiting
4717 >     * for completion, using a custom pool, or combining with other
4718 >     * tasks.
4719 >     */
4720 >    public static class ForkJoinTasks {
4721 >        private ForkJoinTasks() {}
4722 >
4723 >        /**
4724 >         * Returns a task that when invoked, performs the given
4725 >         * action for each (key, value)
4726 >         *
4727 >         * @param map the map
4728 >         * @param action the action
4729 >         * @return the task
4730 >         */
4731 >        public static <K,V> ForkJoinTask<Void> forEach
4732 >            (ConcurrentHashMapV8<K,V> map,
4733 >             BiAction<K,V> action) {
4734 >            if (action == null) throw new NullPointerException();
4735 >            return new ForEachMappingTask<K,V>(map, null, -1, action);
4736 >        }
4737 >
4738 >        /**
4739 >         * Returns a task that when invoked, performs the given
4740 >         * action for each non-null transformation of each (key, value)
4741 >         *
4742 >         * @param map the map
4743 >         * @param transformer a function returning the transformation
4744 >         * for an element, or null if there is no transformation (in
4745 >         * which case the action is not applied)
4746 >         * @param action the action
4747 >         * @return the task
4748 >         */
4749 >        public static <K,V,U> ForkJoinTask<Void> forEach
4750 >            (ConcurrentHashMapV8<K,V> map,
4751 >             BiFun<? super K, ? super V, ? extends U> transformer,
4752 >             Action<U> action) {
4753 >            if (transformer == null || action == null)
4754 >                throw new NullPointerException();
4755 >            return new ForEachTransformedMappingTask<K,V,U>
4756 >                (map, null, -1, transformer, action);
4757 >        }
4758 >
4759 >        /**
4760 >         * Returns a task that when invoked, returns a non-null result
4761 >         * from applying the given search function on each (key,
4762 >         * value), or null if none. Upon success, further element
4763 >         * processing is suppressed and the results of any other
4764 >         * parallel invocations of the search function are ignored.
4765 >         *
4766 >         * @param map the map
4767 >         * @param searchFunction a function returning a non-null
4768 >         * result on success, else null
4769 >         * @return the task
4770 >         */
4771 >        public static <K,V,U> ForkJoinTask<U> search
4772 >            (ConcurrentHashMapV8<K,V> map,
4773 >             BiFun<? super K, ? super V, ? extends U> searchFunction) {
4774 >            if (searchFunction == null) throw new NullPointerException();
4775 >            return new SearchMappingsTask<K,V,U>
4776 >                (map, null, -1, searchFunction,
4777 >                 new AtomicReference<U>());
4778 >        }
4779 >
4780 >        /**
4781 >         * Returns a task that when invoked, returns the result of
4782 >         * accumulating the given transformation of all (key, value) pairs
4783 >         * using the given reducer to combine values, or null if none.
4784 >         *
4785 >         * @param map the map
4786 >         * @param transformer a function returning the transformation
4787 >         * for an element, or null if there is no transformation (in
4788 >         * which case it is not combined).
4789 >         * @param reducer a commutative associative combining function
4790 >         * @return the task
4791 >         */
4792 >        public static <K,V,U> ForkJoinTask<U> reduce
4793 >            (ConcurrentHashMapV8<K,V> map,
4794 >             BiFun<? super K, ? super V, ? extends U> transformer,
4795 >             BiFun<? super U, ? super U, ? extends U> reducer) {
4796 >            if (transformer == null || reducer == null)
4797 >                throw new NullPointerException();
4798 >            return new MapReduceMappingsTask<K,V,U>
4799 >                (map, null, -1, null, transformer, reducer);
4800 >        }
4801 >
4802 >        /**
4803 >         * Returns a task that when invoked, returns the result of
4804 >         * accumulating the given transformation of all (key, value) pairs
4805 >         * using the given reducer to combine values, and the given
4806 >         * basis as an identity value.
4807 >         *
4808 >         * @param map the map
4809 >         * @param transformer a function returning the transformation
4810 >         * for an element
4811 >         * @param basis the identity (initial default value) for the reduction
4812 >         * @param reducer a commutative associative combining function
4813 >         * @return the task
4814 >         */
4815 >        public static <K,V> ForkJoinTask<Double> reduceToDouble
4816 >            (ConcurrentHashMapV8<K,V> map,
4817 >             ObjectByObjectToDouble<? super K, ? super V> transformer,
4818 >             double basis,
4819 >             DoubleByDoubleToDouble reducer) {
4820 >            if (transformer == null || reducer == null)
4821 >                throw new NullPointerException();
4822 >            return new MapReduceMappingsToDoubleTask<K,V>
4823 >                (map, null, -1, null, transformer, basis, reducer);
4824 >        }
4825 >
4826 >        /**
4827 >         * Returns a task that when invoked, returns the result of
4828 >         * accumulating the given transformation of all (key, value) pairs
4829 >         * using the given reducer to combine values, and the given
4830 >         * basis as an identity value.
4831 >         *
4832 >         * @param map the map
4833 >         * @param transformer a function returning the transformation
4834 >         * for an element
4835 >         * @param basis the identity (initial default value) for the reduction
4836 >         * @param reducer a commutative associative combining function
4837 >         * @return the task
4838 >         */
4839 >        public static <K,V> ForkJoinTask<Long> reduceToLong
4840 >            (ConcurrentHashMapV8<K,V> map,
4841 >             ObjectByObjectToLong<? super K, ? super V> transformer,
4842 >             long basis,
4843 >             LongByLongToLong reducer) {
4844 >            if (transformer == null || reducer == null)
4845 >                throw new NullPointerException();
4846 >            return new MapReduceMappingsToLongTask<K,V>
4847 >                (map, null, -1, null, transformer, basis, reducer);
4848 >        }
4849 >
4850 >        /**
4851 >         * Returns a task that when invoked, returns the result of
4852 >         * accumulating the given transformation of all (key, value) pairs
4853 >         * using the given reducer to combine values, and the given
4854 >         * basis as an identity value.
4855 >         *
4856 >         * @param transformer a function returning the transformation
4857 >         * for an element
4858 >         * @param basis the identity (initial default value) for the reduction
4859 >         * @param reducer a commutative associative combining function
4860 >         * @return the task
4861 >         */
4862 >        public static <K,V> ForkJoinTask<Integer> reduceToInt
4863 >            (ConcurrentHashMapV8<K,V> map,
4864 >             ObjectByObjectToInt<? super K, ? super V> transformer,
4865 >             int basis,
4866 >             IntByIntToInt reducer) {
4867 >            if (transformer == null || reducer == null)
4868 >                throw new NullPointerException();
4869 >            return new MapReduceMappingsToIntTask<K,V>
4870 >                (map, null, -1, null, transformer, basis, reducer);
4871 >        }
4872 >
4873 >        /**
4874 >         * Returns a task that when invoked, performs the given action
4875 >         * for each key.
4876 >         *
4877 >         * @param map the map
4878 >         * @param action the action
4879 >         * @return the task
4880 >         */
4881 >        public static <K,V> ForkJoinTask<Void> forEachKey
4882 >            (ConcurrentHashMapV8<K,V> map,
4883 >             Action<K> action) {
4884 >            if (action == null) throw new NullPointerException();
4885 >            return new ForEachKeyTask<K,V>(map, null, -1, action);
4886 >        }
4887 >
4888 >        /**
4889 >         * Returns a task that when invoked, performs the given action
4890 >         * for each non-null transformation of each key.
4891 >         *
4892 >         * @param map the map
4893 >         * @param transformer a function returning the transformation
4894 >         * for an element, or null if there is no transformation (in
4895 >         * which case the action is not applied)
4896 >         * @param action the action
4897 >         * @return the task
4898 >         */
4899 >        public static <K,V,U> ForkJoinTask<Void> forEachKey
4900 >            (ConcurrentHashMapV8<K,V> map,
4901 >             Fun<? super K, ? extends U> transformer,
4902 >             Action<U> action) {
4903 >            if (transformer == null || action == null)
4904 >                throw new NullPointerException();
4905 >            return new ForEachTransformedKeyTask<K,V,U>
4906 >                (map, null, -1, transformer, action);
4907 >        }
4908 >
4909 >        /**
4910 >         * Returns a task that when invoked, returns a non-null result
4911 >         * from applying the given search function on each key, or
4912 >         * null if none.  Upon success, further element processing is
4913 >         * suppressed and the results of any other parallel
4914 >         * invocations of the search function are ignored.
4915 >         *
4916 >         * @param map the map
4917 >         * @param searchFunction a function returning a non-null
4918 >         * result on success, else null
4919 >         * @return the task
4920 >         */
4921 >        public static <K,V,U> ForkJoinTask<U> searchKeys
4922 >            (ConcurrentHashMapV8<K,V> map,
4923 >             Fun<? super K, ? extends U> searchFunction) {
4924 >            if (searchFunction == null) throw new NullPointerException();
4925 >            return new SearchKeysTask<K,V,U>
4926 >                (map, null, -1, searchFunction,
4927 >                 new AtomicReference<U>());
4928 >        }
4929 >
4930 >        /**
4931 >         * Returns a task that when invoked, returns the result of
4932 >         * accumulating all keys using the given reducer to combine
4933 >         * values, or null if none.
4934 >         *
4935 >         * @param map the map
4936 >         * @param reducer a commutative associative combining function
4937 >         * @return the task
4938 >         */
4939 >        public static <K,V> ForkJoinTask<K> reduceKeys
4940 >            (ConcurrentHashMapV8<K,V> map,
4941 >             BiFun<? super K, ? super K, ? extends K> reducer) {
4942 >            if (reducer == null) throw new NullPointerException();
4943 >            return new ReduceKeysTask<K,V>
4944 >                (map, null, -1, null, reducer);
4945 >        }
4946 >
4947 >        /**
4948 >         * Returns a task that when invoked, returns the result of
4949 >         * accumulating the given transformation of all keys using the given
4950 >         * reducer to combine values, or null if none.
4951 >         *
4952 >         * @param map the map
4953 >         * @param transformer a function returning the transformation
4954 >         * for an element, or null if there is no transformation (in
4955 >         * which case it is not combined).
4956 >         * @param reducer a commutative associative combining function
4957 >         * @return the task
4958 >         */
4959 >        public static <K,V,U> ForkJoinTask<U> reduceKeys
4960 >            (ConcurrentHashMapV8<K,V> map,
4961 >             Fun<? super K, ? extends U> transformer,
4962 >             BiFun<? super U, ? super U, ? extends U> reducer) {
4963 >            if (transformer == null || reducer == null)
4964 >                throw new NullPointerException();
4965 >            return new MapReduceKeysTask<K,V,U>
4966 >                (map, null, -1, null, transformer, reducer);
4967 >        }
4968 >
4969 >        /**
4970 >         * Returns a task that when invoked, returns the result of
4971 >         * accumulating the given transformation of all keys using the given
4972 >         * reducer to combine values, and the given basis as an
4973 >         * identity value.
4974 >         *
4975 >         * @param map the map
4976 >         * @param transformer a function returning the transformation
4977 >         * for an element
4978 >         * @param basis the identity (initial default value) for the reduction
4979 >         * @param reducer a commutative associative combining function
4980 >         * @return the task
4981 >         */
4982 >        public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4983 >            (ConcurrentHashMapV8<K,V> map,
4984 >             ObjectToDouble<? super K> transformer,
4985 >             double basis,
4986 >             DoubleByDoubleToDouble reducer) {
4987 >            if (transformer == null || reducer == null)
4988 >                throw new NullPointerException();
4989 >            return new MapReduceKeysToDoubleTask<K,V>
4990 >                (map, null, -1, null, transformer, basis, reducer);
4991 >        }
4992 >
4993 >        /**
4994 >         * Returns a task that when invoked, returns the result of
4995 >         * accumulating the given transformation of all keys using the given
4996 >         * reducer to combine values, and the given basis as an
4997 >         * identity value.
4998 >         *
4999 >         * @param map the map
5000 >         * @param transformer a function returning the transformation
5001 >         * for an element
5002 >         * @param basis the identity (initial default value) for the reduction
5003 >         * @param reducer a commutative associative combining function
5004 >         * @return the task
5005 >         */
5006 >        public static <K,V> ForkJoinTask<Long> reduceKeysToLong
5007 >            (ConcurrentHashMapV8<K,V> map,
5008 >             ObjectToLong<? super K> transformer,
5009 >             long basis,
5010 >             LongByLongToLong reducer) {
5011 >            if (transformer == null || reducer == null)
5012 >                throw new NullPointerException();
5013 >            return new MapReduceKeysToLongTask<K,V>
5014 >                (map, null, -1, null, transformer, basis, reducer);
5015 >        }
5016 >
5017 >        /**
5018 >         * Returns a task that when invoked, returns the result of
5019 >         * accumulating the given transformation of all keys using the given
5020 >         * reducer to combine values, and the given basis as an
5021 >         * identity value.
5022 >         *
5023 >         * @param map the map
5024 >         * @param transformer a function returning the transformation
5025 >         * for an element
5026 >         * @param basis the identity (initial default value) for the reduction
5027 >         * @param reducer a commutative associative combining function
5028 >         * @return the task
5029 >         */
5030 >        public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
5031 >            (ConcurrentHashMapV8<K,V> map,
5032 >             ObjectToInt<? super K> transformer,
5033 >             int basis,
5034 >             IntByIntToInt reducer) {
5035 >            if (transformer == null || reducer == null)
5036 >                throw new NullPointerException();
5037 >            return new MapReduceKeysToIntTask<K,V>
5038 >                (map, null, -1, null, transformer, basis, reducer);
5039 >        }
5040 >
5041 >        /**
5042 >         * Returns a task that when invoked, performs the given action
5043 >         * for each value.
5044 >         *
5045 >         * @param map the map
5046 >         * @param action the action
5047 >         */
5048 >        public static <K,V> ForkJoinTask<Void> forEachValue
5049 >            (ConcurrentHashMapV8<K,V> map,
5050 >             Action<V> action) {
5051 >            if (action == null) throw new NullPointerException();
5052 >            return new ForEachValueTask<K,V>(map, null, -1, action);
5053 >        }
5054 >
5055 >        /**
5056 >         * Returns a task that when invoked, performs the given action
5057 >         * for each non-null transformation of each value.
5058 >         *
5059 >         * @param map the map
5060 >         * @param transformer a function returning the transformation
5061 >         * for an element, or null if there is no transformation (in
5062 >         * which case the action is not applied)
5063 >         * @param action the action
5064 >         */
5065 >        public static <K,V,U> ForkJoinTask<Void> forEachValue
5066 >            (ConcurrentHashMapV8<K,V> map,
5067 >             Fun<? super V, ? extends U> transformer,
5068 >             Action<U> action) {
5069 >            if (transformer == null || action == null)
5070 >                throw new NullPointerException();
5071 >            return new ForEachTransformedValueTask<K,V,U>
5072 >                (map, null, -1, transformer, action);
5073 >        }
5074 >
5075 >        /**
5076 >         * Returns a task that when invoked, returns a non-null result
5077 >         * from applying the given search function on each value, or
5078 >         * null if none.  Upon success, further element processing is
5079 >         * suppressed and the results of any other parallel
5080 >         * invocations of the search function are ignored.
5081 >         *
5082 >         * @param map the map
5083 >         * @param searchFunction a function returning a non-null
5084 >         * result on success, else null
5085 >         * @return the task
5086 >         */
5087 >        public static <K,V,U> ForkJoinTask<U> searchValues
5088 >            (ConcurrentHashMapV8<K,V> map,
5089 >             Fun<? super V, ? extends U> searchFunction) {
5090 >            if (searchFunction == null) throw new NullPointerException();
5091 >            return new SearchValuesTask<K,V,U>
5092 >                (map, null, -1, searchFunction,
5093 >                 new AtomicReference<U>());
5094 >        }
5095 >
5096 >        /**
5097 >         * Returns a task that when invoked, returns the result of
5098 >         * accumulating all values using the given reducer to combine
5099 >         * values, or null if none.
5100 >         *
5101 >         * @param map the map
5102 >         * @param reducer a commutative associative combining function
5103 >         * @return the task
5104 >         */
5105 >        public static <K,V> ForkJoinTask<V> reduceValues
5106 >            (ConcurrentHashMapV8<K,V> map,
5107 >             BiFun<? super V, ? super V, ? extends V> reducer) {
5108 >            if (reducer == null) throw new NullPointerException();
5109 >            return new ReduceValuesTask<K,V>
5110 >                (map, null, -1, null, reducer);
5111 >        }
5112 >
5113 >        /**
5114 >         * Returns a task that when invoked, returns the result of
5115 >         * accumulating the given transformation of all values using the
5116 >         * given reducer to combine values, or null if none.
5117 >         *
5118 >         * @param map the map
5119 >         * @param transformer a function returning the transformation
5120 >         * for an element, or null if there is no transformation (in
5121 >         * which case it is not combined).
5122 >         * @param reducer a commutative associative combining function
5123 >         * @return the task
5124 >         */
5125 >        public static <K,V,U> ForkJoinTask<U> reduceValues
5126 >            (ConcurrentHashMapV8<K,V> map,
5127 >             Fun<? super V, ? extends U> transformer,
5128 >             BiFun<? super U, ? super U, ? extends U> reducer) {
5129 >            if (transformer == null || reducer == null)
5130 >                throw new NullPointerException();
5131 >            return new MapReduceValuesTask<K,V,U>
5132 >                (map, null, -1, null, transformer, reducer);
5133 >        }
5134 >
5135 >        /**
5136 >         * Returns a task that when invoked, returns the result of
5137 >         * accumulating the given transformation of all values using the
5138 >         * given reducer to combine values, and the given basis as an
5139 >         * identity value.
5140 >         *
5141 >         * @param map the map
5142 >         * @param transformer a function returning the transformation
5143 >         * for an element
5144 >         * @param basis the identity (initial default value) for the reduction
5145 >         * @param reducer a commutative associative combining function
5146 >         * @return the task
5147 >         */
5148 >        public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
5149 >            (ConcurrentHashMapV8<K,V> map,
5150 >             ObjectToDouble<? super V> transformer,
5151 >             double basis,
5152 >             DoubleByDoubleToDouble reducer) {
5153 >            if (transformer == null || reducer == null)
5154 >                throw new NullPointerException();
5155 >            return new MapReduceValuesToDoubleTask<K,V>
5156 >                (map, null, -1, null, transformer, basis, reducer);
5157 >        }
5158 >
5159 >        /**
5160 >         * Returns a task that when invoked, returns the result of
5161 >         * accumulating the given transformation of all values using the
5162 >         * given reducer to combine values, and the given basis as an
5163 >         * identity value.
5164 >         *
5165 >         * @param map the map
5166 >         * @param transformer a function returning the transformation
5167 >         * for an element
5168 >         * @param basis the identity (initial default value) for the reduction
5169 >         * @param reducer a commutative associative combining function
5170 >         * @return the task
5171 >         */
5172 >        public static <K,V> ForkJoinTask<Long> reduceValuesToLong
5173 >            (ConcurrentHashMapV8<K,V> map,
5174 >             ObjectToLong<? super V> transformer,
5175 >             long basis,
5176 >             LongByLongToLong reducer) {
5177 >            if (transformer == null || reducer == null)
5178 >                throw new NullPointerException();
5179 >            return new MapReduceValuesToLongTask<K,V>
5180 >                (map, null, -1, null, transformer, basis, reducer);
5181 >        }
5182 >
5183 >        /**
5184 >         * Returns a task that when invoked, returns the result of
5185 >         * accumulating the given transformation of all values using the
5186 >         * given reducer to combine values, and the given basis as an
5187 >         * identity value.
5188 >         *
5189 >         * @param map the map
5190 >         * @param transformer a function returning the transformation
5191 >         * for an element
5192 >         * @param basis the identity (initial default value) for the reduction
5193 >         * @param reducer a commutative associative combining function
5194 >         * @return the task
5195 >         */
5196 >        public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
5197 >            (ConcurrentHashMapV8<K,V> map,
5198 >             ObjectToInt<? super V> transformer,
5199 >             int basis,
5200 >             IntByIntToInt reducer) {
5201 >            if (transformer == null || reducer == null)
5202 >                throw new NullPointerException();
5203 >            return new MapReduceValuesToIntTask<K,V>
5204 >                (map, null, -1, null, transformer, basis, reducer);
5205 >        }
5206 >
5207 >        /**
5208 >         * Returns a task that when invoked, perform the given action
5209 >         * for each entry.
5210 >         *
5211 >         * @param map the map
5212 >         * @param action the action
5213 >         */
5214 >        public static <K,V> ForkJoinTask<Void> forEachEntry
5215 >            (ConcurrentHashMapV8<K,V> map,
5216 >             Action<Map.Entry<K,V>> action) {
5217 >            if (action == null) throw new NullPointerException();
5218 >            return new ForEachEntryTask<K,V>(map, null, -1, action);
5219 >        }
5220 >
5221 >        /**
5222 >         * Returns a task that when invoked, perform the given action
5223 >         * for each non-null transformation of each entry.
5224 >         *
5225 >         * @param map the map
5226 >         * @param transformer a function returning the transformation
5227 >         * for an element, or null if there is no transformation (in
5228 >         * which case the action is not applied)
5229 >         * @param action the action
5230 >         */
5231 >        public static <K,V,U> ForkJoinTask<Void> forEachEntry
5232 >            (ConcurrentHashMapV8<K,V> map,
5233 >             Fun<Map.Entry<K,V>, ? extends U> transformer,
5234 >             Action<U> action) {
5235 >            if (transformer == null || action == null)
5236 >                throw new NullPointerException();
5237 >            return new ForEachTransformedEntryTask<K,V,U>
5238 >                (map, null, -1, transformer, action);
5239 >        }
5240 >
5241 >        /**
5242 >         * Returns a task that when invoked, returns a non-null result
5243 >         * from applying the given search function on each entry, or
5244 >         * null if none.  Upon success, further element processing is
5245 >         * suppressed and the results of any other parallel
5246 >         * invocations of the search function are ignored.
5247 >         *
5248 >         * @param map the map
5249 >         * @param searchFunction a function returning a non-null
5250 >         * result on success, else null
5251 >         * @return the task
5252 >         */
5253 >        public static <K,V,U> ForkJoinTask<U> searchEntries
5254 >            (ConcurrentHashMapV8<K,V> map,
5255 >             Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
5256 >            if (searchFunction == null) throw new NullPointerException();
5257 >            return new SearchEntriesTask<K,V,U>
5258 >                (map, null, -1, searchFunction,
5259 >                 new AtomicReference<U>());
5260 >        }
5261 >
5262 >        /**
5263 >         * Returns a task that when invoked, returns the result of
5264 >         * accumulating all entries using the given reducer to combine
5265 >         * values, or null if none.
5266 >         *
5267 >         * @param map the map
5268 >         * @param reducer a commutative associative combining function
5269 >         * @return the task
5270 >         */
5271 >        public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
5272 >            (ConcurrentHashMapV8<K,V> map,
5273 >             BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5274 >            if (reducer == null) throw new NullPointerException();
5275 >            return new ReduceEntriesTask<K,V>
5276 >                (map, null, -1, null, reducer);
5277 >        }
5278 >
5279 >        /**
5280 >         * Returns a task that when invoked, returns the result of
5281 >         * accumulating the given transformation of all entries using the
5282 >         * given reducer to combine values, or null if none.
5283 >         *
5284 >         * @param map the map
5285 >         * @param transformer a function returning the transformation
5286 >         * for an element, or null if there is no transformation (in
5287 >         * which case it is not combined).
5288 >         * @param reducer a commutative associative combining function
5289 >         * @return the task
5290 >         */
5291 >        public static <K,V,U> ForkJoinTask<U> reduceEntries
5292 >            (ConcurrentHashMapV8<K,V> map,
5293 >             Fun<Map.Entry<K,V>, ? extends U> transformer,
5294 >             BiFun<? super U, ? super U, ? extends U> reducer) {
5295 >            if (transformer == null || reducer == null)
5296 >                throw new NullPointerException();
5297 >            return new MapReduceEntriesTask<K,V,U>
5298 >                (map, null, -1, null, transformer, reducer);
5299 >        }
5300 >
5301 >        /**
5302 >         * Returns a task that when invoked, returns the result of
5303 >         * accumulating the given transformation of all entries using the
5304 >         * given reducer to combine values, and the given basis as an
5305 >         * identity value.
5306 >         *
5307 >         * @param map the map
5308 >         * @param transformer a function returning the transformation
5309 >         * for an element
5310 >         * @param basis the identity (initial default value) for the reduction
5311 >         * @param reducer a commutative associative combining function
5312 >         * @return the task
5313 >         */
5314 >        public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
5315 >            (ConcurrentHashMapV8<K,V> map,
5316 >             ObjectToDouble<Map.Entry<K,V>> transformer,
5317 >             double basis,
5318 >             DoubleByDoubleToDouble reducer) {
5319 >            if (transformer == null || reducer == null)
5320 >                throw new NullPointerException();
5321 >            return new MapReduceEntriesToDoubleTask<K,V>
5322 >                (map, null, -1, null, transformer, basis, reducer);
5323 >        }
5324 >
5325 >        /**
5326 >         * Returns a task that when invoked, returns the result of
5327 >         * accumulating the given transformation of all entries using the
5328 >         * given reducer to combine values, and the given basis as an
5329 >         * identity value.
5330 >         *
5331 >         * @param map the map
5332 >         * @param transformer a function returning the transformation
5333 >         * for an element
5334 >         * @param basis the identity (initial default value) for the reduction
5335 >         * @param reducer a commutative associative combining function
5336 >         * @return the task
5337 >         */
5338 >        public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
5339 >            (ConcurrentHashMapV8<K,V> map,
5340 >             ObjectToLong<Map.Entry<K,V>> transformer,
5341 >             long basis,
5342 >             LongByLongToLong reducer) {
5343 >            if (transformer == null || reducer == null)
5344 >                throw new NullPointerException();
5345 >            return new MapReduceEntriesToLongTask<K,V>
5346 >                (map, null, -1, null, transformer, basis, reducer);
5347 >        }
5348 >
5349 >        /**
5350 >         * Returns a task that when invoked, returns the result of
5351 >         * accumulating the given transformation of all entries using the
5352 >         * given reducer to combine values, and the given basis as an
5353 >         * identity value.
5354 >         *
5355 >         * @param map the map
5356 >         * @param transformer a function returning the transformation
5357 >         * for an element
5358 >         * @param basis the identity (initial default value) for the reduction
5359 >         * @param reducer a commutative associative combining function
5360 >         * @return the task
5361 >         */
5362 >        public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
5363 >            (ConcurrentHashMapV8<K,V> map,
5364 >             ObjectToInt<Map.Entry<K,V>> transformer,
5365 >             int basis,
5366 >             IntByIntToInt reducer) {
5367 >            if (transformer == null || reducer == null)
5368 >                throw new NullPointerException();
5369 >            return new MapReduceEntriesToIntTask<K,V>
5370 >                (map, null, -1, null, transformer, basis, reducer);
5371 >        }
5372 >    }
5373 >
5374 >    // -------------------------------------------------------
5375 >
5376 >    /*
5377 >     * Task classes. Coded in a regular but ugly format/style to
5378 >     * simplify checks that each variant differs in the right way from
5379 >     * others.
5380 >     */
5381 >
5382 >    @SuppressWarnings("serial") static final class ForEachKeyTask<K,V>
5383 >        extends Traverser<K,V,Void> {
5384 >        final Action<K> action;
5385 >        ForEachKeyTask
5386 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5387 >             Action<K> action) {
5388 >            super(m, p, b);
5389 >            this.action = action;
5390 >        }
5391 >        @SuppressWarnings("unchecked") public final void compute() {
5392 >            final Action<K> action;
5393 >            if ((action = this.action) == null)
5394 >                throw new NullPointerException();
5395 >            for (int b; (b = preSplit()) > 0;)
5396 >                new ForEachKeyTask<K,V>(map, this, b, action).fork();
5397 >            while (advance() != null)
5398 >                action.apply((K)nextKey);
5399 >            propagateCompletion();
5400 >        }
5401 >    }
5402 >
5403 >    @SuppressWarnings("serial") static final class ForEachValueTask<K,V>
5404 >        extends Traverser<K,V,Void> {
5405 >        final Action<V> action;
5406 >        ForEachValueTask
5407 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5408 >             Action<V> action) {
5409 >            super(m, p, b);
5410 >            this.action = action;
5411 >        }
5412 >        @SuppressWarnings("unchecked") public final void compute() {
5413 >            final Action<V> action;
5414 >            if ((action = this.action) == null)
5415 >                throw new NullPointerException();
5416 >            for (int b; (b = preSplit()) > 0;)
5417 >                new ForEachValueTask<K,V>(map, this, b, action).fork();
5418 >            Object v;
5419 >            while ((v = advance()) != null)
5420 >                action.apply((V)v);
5421 >            propagateCompletion();
5422 >        }
5423 >    }
5424 >
5425 >    @SuppressWarnings("serial") static final class ForEachEntryTask<K,V>
5426 >        extends Traverser<K,V,Void> {
5427 >        final Action<Entry<K,V>> action;
5428 >        ForEachEntryTask
5429 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5430 >             Action<Entry<K,V>> action) {
5431 >            super(m, p, b);
5432 >            this.action = action;
5433 >        }
5434 >        @SuppressWarnings("unchecked") public final void compute() {
5435 >            final Action<Entry<K,V>> action;
5436 >            if ((action = this.action) == null)
5437 >                throw new NullPointerException();
5438 >            for (int b; (b = preSplit()) > 0;)
5439 >                new ForEachEntryTask<K,V>(map, this, b, action).fork();
5440 >            Object v;
5441 >            while ((v = advance()) != null)
5442 >                action.apply(entryFor((K)nextKey, (V)v));
5443 >            propagateCompletion();
5444 >        }
5445 >    }
5446 >
5447 >    @SuppressWarnings("serial") static final class ForEachMappingTask<K,V>
5448 >        extends Traverser<K,V,Void> {
5449 >        final BiAction<K,V> action;
5450 >        ForEachMappingTask
5451 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5452 >             BiAction<K,V> action) {
5453 >            super(m, p, b);
5454 >            this.action = action;
5455 >        }
5456 >        @SuppressWarnings("unchecked") public final void compute() {
5457 >            final BiAction<K,V> action;
5458 >            if ((action = this.action) == null)
5459 >                throw new NullPointerException();
5460 >            for (int b; (b = preSplit()) > 0;)
5461 >                new ForEachMappingTask<K,V>(map, this, b, action).fork();
5462 >            Object v;
5463 >            while ((v = advance()) != null)
5464 >                action.apply((K)nextKey, (V)v);
5465 >            propagateCompletion();
5466 >        }
5467 >    }
5468 >
5469 >    @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U>
5470 >        extends Traverser<K,V,Void> {
5471 >        final Fun<? super K, ? extends U> transformer;
5472 >        final Action<U> action;
5473 >        ForEachTransformedKeyTask
5474 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5475 >             Fun<? super K, ? extends U> transformer, Action<U> action) {
5476 >            super(m, p, b);
5477 >            this.transformer = transformer; this.action = action;
5478 >        }
5479 >        @SuppressWarnings("unchecked") public final void compute() {
5480 >            final Fun<? super K, ? extends U> transformer;
5481 >            final Action<U> action;
5482 >            if ((transformer = this.transformer) == null ||
5483 >                (action = this.action) == null)
5484 >                throw new NullPointerException();
5485 >            for (int b; (b = preSplit()) > 0;)
5486 >                new ForEachTransformedKeyTask<K,V,U>
5487 >                     (map, this, b, transformer, action).fork();
5488 >            U u;
5489 >            while (advance() != null) {
5490 >                if ((u = transformer.apply((K)nextKey)) != null)
5491 >                    action.apply(u);
5492 >            }
5493 >            propagateCompletion();
5494 >        }
5495 >    }
5496 >
5497 >    @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U>
5498 >        extends Traverser<K,V,Void> {
5499 >        final Fun<? super V, ? extends U> transformer;
5500 >        final Action<U> action;
5501 >        ForEachTransformedValueTask
5502 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5503 >             Fun<? super V, ? extends U> transformer, Action<U> action) {
5504 >            super(m, p, b);
5505 >            this.transformer = transformer; this.action = action;
5506 >        }
5507 >        @SuppressWarnings("unchecked") public final void compute() {
5508 >            final Fun<? super V, ? extends U> transformer;
5509 >            final Action<U> action;
5510 >            if ((transformer = this.transformer) == null ||
5511 >                (action = this.action) == null)
5512 >                throw new NullPointerException();
5513 >            for (int b; (b = preSplit()) > 0;)
5514 >                new ForEachTransformedValueTask<K,V,U>
5515 >                    (map, this, b, transformer, action).fork();
5516 >            Object v; U u;
5517 >            while ((v = advance()) != null) {
5518 >                if ((u = transformer.apply((V)v)) != null)
5519 >                    action.apply(u);
5520 >            }
5521 >            propagateCompletion();
5522 >        }
5523 >    }
5524 >
5525 >    @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U>
5526 >        extends Traverser<K,V,Void> {
5527 >        final Fun<Map.Entry<K,V>, ? extends U> transformer;
5528 >        final Action<U> action;
5529 >        ForEachTransformedEntryTask
5530 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5531 >             Fun<Map.Entry<K,V>, ? extends U> transformer, Action<U> action) {
5532 >            super(m, p, b);
5533 >            this.transformer = transformer; this.action = action;
5534 >        }
5535 >        @SuppressWarnings("unchecked") public final void compute() {
5536 >            final Fun<Map.Entry<K,V>, ? extends U> transformer;
5537 >            final Action<U> action;
5538 >            if ((transformer = this.transformer) == null ||
5539 >                (action = this.action) == null)
5540 >                throw new NullPointerException();
5541 >            for (int b; (b = preSplit()) > 0;)
5542 >                new ForEachTransformedEntryTask<K,V,U>
5543 >                    (map, this, b, transformer, action).fork();
5544 >            Object v; U u;
5545 >            while ((v = advance()) != null) {
5546 >                if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5547 >                    action.apply(u);
5548 >            }
5549 >            propagateCompletion();
5550 >        }
5551 >    }
5552 >
5553 >    @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U>
5554 >        extends Traverser<K,V,Void> {
5555 >        final BiFun<? super K, ? super V, ? extends U> transformer;
5556 >        final Action<U> action;
5557 >        ForEachTransformedMappingTask
5558 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5559 >             BiFun<? super K, ? super V, ? extends U> transformer,
5560 >             Action<U> action) {
5561 >            super(m, p, b);
5562 >            this.transformer = transformer; this.action = action;
5563 >        }
5564 >        @SuppressWarnings("unchecked") public final void compute() {
5565 >            final BiFun<? super K, ? super V, ? extends U> transformer;
5566 >            final Action<U> action;
5567 >            if ((transformer = this.transformer) == null ||
5568 >                (action = this.action) == null)
5569 >                throw new NullPointerException();
5570 >            for (int b; (b = preSplit()) > 0;)
5571 >                new ForEachTransformedMappingTask<K,V,U>
5572 >                    (map, this, b, transformer, action).fork();
5573 >            Object v; U u;
5574 >            while ((v = advance()) != null) {
5575 >                if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5576 >                    action.apply(u);
5577 >            }
5578 >            propagateCompletion();
5579 >        }
5580 >    }
5581 >
5582 >    @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U>
5583 >        extends Traverser<K,V,U> {
5584 >        final Fun<? super K, ? extends U> searchFunction;
5585 >        final AtomicReference<U> result;
5586 >        SearchKeysTask
5587 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5588 >             Fun<? super K, ? extends U> searchFunction,
5589 >             AtomicReference<U> result) {
5590 >            super(m, p, b);
5591 >            this.searchFunction = searchFunction; this.result = result;
5592 >        }
5593 >        public final U getRawResult() { return result.get(); }
5594 >        @SuppressWarnings("unchecked") public final void compute() {
5595 >            final Fun<? super K, ? extends U> searchFunction;
5596 >            final AtomicReference<U> result;
5597 >            if ((searchFunction = this.searchFunction) == null ||
5598 >                (result = this.result) == null)
5599 >                throw new NullPointerException();
5600 >            for (int b;;) {
5601 >                if (result.get() != null)
5602 >                    return;
5603 >                if ((b = preSplit()) <= 0)
5604 >                    break;
5605 >                new SearchKeysTask<K,V,U>
5606 >                    (map, this, b, searchFunction, result).fork();
5607 >            }
5608 >            while (result.get() == null) {
5609 >                U u;
5610 >                if (advance() == null) {
5611 >                    propagateCompletion();
5612 >                    break;
5613 >                }
5614 >                if ((u = searchFunction.apply((K)nextKey)) != null) {
5615 >                    if (result.compareAndSet(null, u))
5616 >                        quietlyCompleteRoot();
5617 >                    break;
5618 >                }
5619 >            }
5620 >        }
5621 >    }
5622 >
5623 >    @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U>
5624 >        extends Traverser<K,V,U> {
5625 >        final Fun<? super V, ? extends U> searchFunction;
5626 >        final AtomicReference<U> result;
5627 >        SearchValuesTask
5628 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5629 >             Fun<? super V, ? extends U> searchFunction,
5630 >             AtomicReference<U> result) {
5631 >            super(m, p, b);
5632 >            this.searchFunction = searchFunction; this.result = result;
5633 >        }
5634 >        public final U getRawResult() { return result.get(); }
5635 >        @SuppressWarnings("unchecked") public final void compute() {
5636 >            final Fun<? super V, ? extends U> searchFunction;
5637 >            final AtomicReference<U> result;
5638 >            if ((searchFunction = this.searchFunction) == null ||
5639 >                (result = this.result) == null)
5640 >                throw new NullPointerException();
5641 >            for (int b;;) {
5642 >                if (result.get() != null)
5643 >                    return;
5644 >                if ((b = preSplit()) <= 0)
5645 >                    break;
5646 >                new SearchValuesTask<K,V,U>
5647 >                    (map, this, b, searchFunction, result).fork();
5648 >            }
5649 >            while (result.get() == null) {
5650 >                Object v; U u;
5651 >                if ((v = advance()) == null) {
5652 >                    propagateCompletion();
5653 >                    break;
5654 >                }
5655 >                if ((u = searchFunction.apply((V)v)) != null) {
5656 >                    if (result.compareAndSet(null, u))
5657 >                        quietlyCompleteRoot();
5658 >                    break;
5659 >                }
5660 >            }
5661 >        }
5662 >    }
5663 >
5664 >    @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U>
5665 >        extends Traverser<K,V,U> {
5666 >        final Fun<Entry<K,V>, ? extends U> searchFunction;
5667 >        final AtomicReference<U> result;
5668 >        SearchEntriesTask
5669 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5670 >             Fun<Entry<K,V>, ? extends U> searchFunction,
5671 >             AtomicReference<U> result) {
5672 >            super(m, p, b);
5673 >            this.searchFunction = searchFunction; this.result = result;
5674 >        }
5675 >        public final U getRawResult() { return result.get(); }
5676 >        @SuppressWarnings("unchecked") public final void compute() {
5677 >            final Fun<Entry<K,V>, ? extends U> searchFunction;
5678 >            final AtomicReference<U> result;
5679 >            if ((searchFunction = this.searchFunction) == null ||
5680 >                (result = this.result) == null)
5681 >                throw new NullPointerException();
5682 >            for (int b;;) {
5683 >                if (result.get() != null)
5684 >                    return;
5685 >                if ((b = preSplit()) <= 0)
5686 >                    break;
5687 >                new SearchEntriesTask<K,V,U>
5688 >                    (map, this, b, searchFunction, result).fork();
5689 >            }
5690 >            while (result.get() == null) {
5691 >                Object v; U u;
5692 >                if ((v = advance()) == null) {
5693 >                    propagateCompletion();
5694 >                    break;
5695 >                }
5696 >                if ((u = searchFunction.apply(entryFor((K)nextKey, (V)v))) != null) {
5697 >                    if (result.compareAndSet(null, u))
5698 >                        quietlyCompleteRoot();
5699 >                    return;
5700 >                }
5701 >            }
5702 >        }
5703 >    }
5704 >
5705 >    @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U>
5706 >        extends Traverser<K,V,U> {
5707 >        final BiFun<? super K, ? super V, ? extends U> searchFunction;
5708 >        final AtomicReference<U> result;
5709 >        SearchMappingsTask
5710 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5711 >             BiFun<? super K, ? super V, ? extends U> searchFunction,
5712 >             AtomicReference<U> result) {
5713 >            super(m, p, b);
5714 >            this.searchFunction = searchFunction; this.result = result;
5715 >        }
5716 >        public final U getRawResult() { return result.get(); }
5717 >        @SuppressWarnings("unchecked") public final void compute() {
5718 >            final BiFun<? super K, ? super V, ? extends U> searchFunction;
5719 >            final AtomicReference<U> result;
5720 >            if ((searchFunction = this.searchFunction) == null ||
5721 >                (result = this.result) == null)
5722 >                throw new NullPointerException();
5723 >            for (int b;;) {
5724 >                if (result.get() != null)
5725 >                    return;
5726 >                if ((b = preSplit()) <= 0)
5727 >                    break;
5728 >                new SearchMappingsTask<K,V,U>
5729 >                    (map, this, b, searchFunction, result).fork();
5730 >            }
5731 >            while (result.get() == null) {
5732 >                Object v; U u;
5733 >                if ((v = advance()) == null) {
5734 >                    propagateCompletion();
5735 >                    break;
5736 >                }
5737 >                if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5738 >                    if (result.compareAndSet(null, u))
5739 >                        quietlyCompleteRoot();
5740 >                    break;
5741 >                }
5742 >            }
5743 >        }
5744 >    }
5745 >
5746 >    @SuppressWarnings("serial") static final class ReduceKeysTask<K,V>
5747 >        extends Traverser<K,V,K> {
5748 >        final BiFun<? super K, ? super K, ? extends K> reducer;
5749 >        K result;
5750 >        ReduceKeysTask<K,V> rights, nextRight;
5751 >        ReduceKeysTask
5752 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5753 >             ReduceKeysTask<K,V> nextRight,
5754 >             BiFun<? super K, ? super K, ? extends K> reducer) {
5755 >            super(m, p, b); this.nextRight = nextRight;
5756 >            this.reducer = reducer;
5757 >        }
5758 >        public final K getRawResult() { return result; }
5759 >        @SuppressWarnings("unchecked") public final void compute() {
5760 >            final BiFun<? super K, ? super K, ? extends K> reducer =
5761 >                this.reducer;
5762 >            if (reducer == null)
5763 >                throw new NullPointerException();
5764 >            for (int b; (b = preSplit()) > 0;)
5765 >                (rights = new ReduceKeysTask<K,V>
5766 >                 (map, this, b, rights, reducer)).fork();
5767 >            K r = null;
5768 >            while (advance() != null) {
5769 >                K u = (K)nextKey;
5770 >                r = (r == null) ? u : reducer.apply(r, u);
5771 >            }
5772 >            result = r;
5773 >            CountedCompleter<?> c;
5774 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
5775 >                ReduceKeysTask<K,V>
5776 >                    t = (ReduceKeysTask<K,V>)c,
5777 >                    s = t.rights;
5778 >                while (s != null) {
5779 >                    K tr, sr;
5780 >                    if ((sr = s.result) != null)
5781 >                        t.result = (((tr = t.result) == null) ? sr :
5782 >                                    reducer.apply(tr, sr));
5783 >                    s = t.rights = s.nextRight;
5784 >                }
5785 >            }
5786 >        }
5787 >    }
5788 >
5789 >    @SuppressWarnings("serial") static final class ReduceValuesTask<K,V>
5790 >        extends Traverser<K,V,V> {
5791 >        final BiFun<? super V, ? super V, ? extends V> reducer;
5792 >        V result;
5793 >        ReduceValuesTask<K,V> rights, nextRight;
5794 >        ReduceValuesTask
5795 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5796 >             ReduceValuesTask<K,V> nextRight,
5797 >             BiFun<? super V, ? super V, ? extends V> reducer) {
5798 >            super(m, p, b); this.nextRight = nextRight;
5799 >            this.reducer = reducer;
5800 >        }
5801 >        public final V getRawResult() { return result; }
5802 >        @SuppressWarnings("unchecked") public final void compute() {
5803 >            final BiFun<? super V, ? super V, ? extends V> reducer =
5804 >                this.reducer;
5805 >            if (reducer == null)
5806 >                throw new NullPointerException();
5807 >            for (int b; (b = preSplit()) > 0;)
5808 >                (rights = new ReduceValuesTask<K,V>
5809 >                 (map, this, b, rights, reducer)).fork();
5810 >            V r = null;
5811 >            Object v;
5812 >            while ((v = advance()) != null) {
5813 >                V u = (V)v;
5814 >                r = (r == null) ? u : reducer.apply(r, u);
5815 >            }
5816 >            result = r;
5817 >            CountedCompleter<?> c;
5818 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
5819 >                ReduceValuesTask<K,V>
5820 >                    t = (ReduceValuesTask<K,V>)c,
5821 >                    s = t.rights;
5822 >                while (s != null) {
5823 >                    V tr, sr;
5824 >                    if ((sr = s.result) != null)
5825 >                        t.result = (((tr = t.result) == null) ? sr :
5826 >                                    reducer.apply(tr, sr));
5827 >                    s = t.rights = s.nextRight;
5828 >                }
5829 >            }
5830 >        }
5831 >    }
5832 >
5833 >    @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V>
5834 >        extends Traverser<K,V,Map.Entry<K,V>> {
5835 >        final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5836 >        Map.Entry<K,V> result;
5837 >        ReduceEntriesTask<K,V> rights, nextRight;
5838 >        ReduceEntriesTask
5839 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5840 >             ReduceEntriesTask<K,V> nextRight,
5841 >             BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5842 >            super(m, p, b); this.nextRight = nextRight;
5843 >            this.reducer = reducer;
5844 >        }
5845 >        public final Map.Entry<K,V> getRawResult() { return result; }
5846 >        @SuppressWarnings("unchecked") public final void compute() {
5847 >            final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer =
5848 >                this.reducer;
5849 >            if (reducer == null)
5850 >                throw new NullPointerException();
5851 >            for (int b; (b = preSplit()) > 0;)
5852 >                (rights = new ReduceEntriesTask<K,V>
5853 >                 (map, this, b, rights, reducer)).fork();
5854 >            Map.Entry<K,V> r = null;
5855 >            Object v;
5856 >            while ((v = advance()) != null) {
5857 >                Map.Entry<K,V> u = entryFor((K)nextKey, (V)v);
5858 >                r = (r == null) ? u : reducer.apply(r, u);
5859 >            }
5860 >            result = r;
5861 >            CountedCompleter<?> c;
5862 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
5863 >                ReduceEntriesTask<K,V>
5864 >                    t = (ReduceEntriesTask<K,V>)c,
5865 >                    s = t.rights;
5866 >                while (s != null) {
5867 >                    Map.Entry<K,V> tr, sr;
5868 >                    if ((sr = s.result) != null)
5869 >                        t.result = (((tr = t.result) == null) ? sr :
5870 >                                    reducer.apply(tr, sr));
5871 >                    s = t.rights = s.nextRight;
5872 >                }
5873 >            }
5874 >        }
5875 >    }
5876 >
5877 >    @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U>
5878 >        extends Traverser<K,V,U> {
5879 >        final Fun<? super K, ? extends U> transformer;
5880 >        final BiFun<? super U, ? super U, ? extends U> reducer;
5881 >        U result;
5882 >        MapReduceKeysTask<K,V,U> rights, nextRight;
5883 >        MapReduceKeysTask
5884 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5885 >             MapReduceKeysTask<K,V,U> nextRight,
5886 >             Fun<? super K, ? extends U> transformer,
5887 >             BiFun<? super U, ? super U, ? extends U> reducer) {
5888 >            super(m, p, b); this.nextRight = nextRight;
5889 >            this.transformer = transformer;
5890 >            this.reducer = reducer;
5891 >        }
5892 >        public final U getRawResult() { return result; }
5893 >        @SuppressWarnings("unchecked") public final void compute() {
5894 >            final Fun<? super K, ? extends U> transformer =
5895 >                this.transformer;
5896 >            final BiFun<? super U, ? super U, ? extends U> reducer =
5897 >                this.reducer;
5898 >            if (transformer == null || reducer == null)
5899 >                throw new NullPointerException();
5900 >            for (int b; (b = preSplit()) > 0;)
5901 >                (rights = new MapReduceKeysTask<K,V,U>
5902 >                 (map, this, b, rights, transformer, reducer)).fork();
5903 >            U r = null, u;
5904 >            while (advance() != null) {
5905 >                if ((u = transformer.apply((K)nextKey)) != null)
5906 >                    r = (r == null) ? u : reducer.apply(r, u);
5907 >            }
5908 >            result = r;
5909 >            CountedCompleter<?> c;
5910 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
5911 >                MapReduceKeysTask<K,V,U>
5912 >                    t = (MapReduceKeysTask<K,V,U>)c,
5913 >                    s = t.rights;
5914 >                while (s != null) {
5915 >                    U tr, sr;
5916 >                    if ((sr = s.result) != null)
5917 >                        t.result = (((tr = t.result) == null) ? sr :
5918 >                                    reducer.apply(tr, sr));
5919 >                    s = t.rights = s.nextRight;
5920 >                }
5921 >            }
5922 >        }
5923 >    }
5924 >
5925 >    @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U>
5926 >        extends Traverser<K,V,U> {
5927 >        final Fun<? super V, ? extends U> transformer;
5928 >        final BiFun<? super U, ? super U, ? extends U> reducer;
5929 >        U result;
5930 >        MapReduceValuesTask<K,V,U> rights, nextRight;
5931 >        MapReduceValuesTask
5932 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5933 >             MapReduceValuesTask<K,V,U> nextRight,
5934 >             Fun<? super V, ? extends U> transformer,
5935 >             BiFun<? super U, ? super U, ? extends U> reducer) {
5936 >            super(m, p, b); this.nextRight = nextRight;
5937 >            this.transformer = transformer;
5938 >            this.reducer = reducer;
5939 >        }
5940 >        public final U getRawResult() { return result; }
5941 >        @SuppressWarnings("unchecked") public final void compute() {
5942 >            final Fun<? super V, ? extends U> transformer =
5943 >                this.transformer;
5944 >            final BiFun<? super U, ? super U, ? extends U> reducer =
5945 >                this.reducer;
5946 >            if (transformer == null || reducer == null)
5947 >                throw new NullPointerException();
5948 >            for (int b; (b = preSplit()) > 0;)
5949 >                (rights = new MapReduceValuesTask<K,V,U>
5950 >                 (map, this, b, rights, transformer, reducer)).fork();
5951 >            U r = null, u;
5952 >            Object v;
5953 >            while ((v = advance()) != null) {
5954 >                if ((u = transformer.apply((V)v)) != null)
5955 >                    r = (r == null) ? u : reducer.apply(r, u);
5956 >            }
5957 >            result = r;
5958 >            CountedCompleter<?> c;
5959 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
5960 >                MapReduceValuesTask<K,V,U>
5961 >                    t = (MapReduceValuesTask<K,V,U>)c,
5962 >                    s = t.rights;
5963 >                while (s != null) {
5964 >                    U tr, sr;
5965 >                    if ((sr = s.result) != null)
5966 >                        t.result = (((tr = t.result) == null) ? sr :
5967 >                                    reducer.apply(tr, sr));
5968 >                    s = t.rights = s.nextRight;
5969 >                }
5970 >            }
5971 >        }
5972 >    }
5973 >
5974 >    @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U>
5975 >        extends Traverser<K,V,U> {
5976 >        final Fun<Map.Entry<K,V>, ? extends U> transformer;
5977 >        final BiFun<? super U, ? super U, ? extends U> reducer;
5978 >        U result;
5979 >        MapReduceEntriesTask<K,V,U> rights, nextRight;
5980 >        MapReduceEntriesTask
5981 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
5982 >             MapReduceEntriesTask<K,V,U> nextRight,
5983 >             Fun<Map.Entry<K,V>, ? extends U> transformer,
5984 >             BiFun<? super U, ? super U, ? extends U> reducer) {
5985 >            super(m, p, b); this.nextRight = nextRight;
5986 >            this.transformer = transformer;
5987 >            this.reducer = reducer;
5988 >        }
5989 >        public final U getRawResult() { return result; }
5990 >        @SuppressWarnings("unchecked") public final void compute() {
5991 >            final Fun<Map.Entry<K,V>, ? extends U> transformer =
5992 >                this.transformer;
5993 >            final BiFun<? super U, ? super U, ? extends U> reducer =
5994 >                this.reducer;
5995 >            if (transformer == null || reducer == null)
5996 >                throw new NullPointerException();
5997 >            for (int b; (b = preSplit()) > 0;)
5998 >                (rights = new MapReduceEntriesTask<K,V,U>
5999 >                 (map, this, b, rights, transformer, reducer)).fork();
6000 >            U r = null, u;
6001 >            Object v;
6002 >            while ((v = advance()) != null) {
6003 >                if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
6004 >                    r = (r == null) ? u : reducer.apply(r, u);
6005 >            }
6006 >            result = r;
6007 >            CountedCompleter<?> c;
6008 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6009 >                MapReduceEntriesTask<K,V,U>
6010 >                    t = (MapReduceEntriesTask<K,V,U>)c,
6011 >                    s = t.rights;
6012 >                while (s != null) {
6013 >                    U tr, sr;
6014 >                    if ((sr = s.result) != null)
6015 >                        t.result = (((tr = t.result) == null) ? sr :
6016 >                                    reducer.apply(tr, sr));
6017 >                    s = t.rights = s.nextRight;
6018 >                }
6019 >            }
6020 >        }
6021 >    }
6022 >
6023 >    @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U>
6024 >        extends Traverser<K,V,U> {
6025 >        final BiFun<? super K, ? super V, ? extends U> transformer;
6026 >        final BiFun<? super U, ? super U, ? extends U> reducer;
6027 >        U result;
6028 >        MapReduceMappingsTask<K,V,U> rights, nextRight;
6029 >        MapReduceMappingsTask
6030 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6031 >             MapReduceMappingsTask<K,V,U> nextRight,
6032 >             BiFun<? super K, ? super V, ? extends U> transformer,
6033 >             BiFun<? super U, ? super U, ? extends U> reducer) {
6034 >            super(m, p, b); this.nextRight = nextRight;
6035 >            this.transformer = transformer;
6036 >            this.reducer = reducer;
6037 >        }
6038 >        public final U getRawResult() { return result; }
6039 >        @SuppressWarnings("unchecked") public final void compute() {
6040 >            final BiFun<? super K, ? super V, ? extends U> transformer =
6041 >                this.transformer;
6042 >            final BiFun<? super U, ? super U, ? extends U> reducer =
6043 >                this.reducer;
6044 >            if (transformer == null || reducer == null)
6045 >                throw new NullPointerException();
6046 >            for (int b; (b = preSplit()) > 0;)
6047 >                (rights = new MapReduceMappingsTask<K,V,U>
6048 >                 (map, this, b, rights, transformer, reducer)).fork();
6049 >            U r = null, u;
6050 >            Object v;
6051 >            while ((v = advance()) != null) {
6052 >                if ((u = transformer.apply((K)nextKey, (V)v)) != null)
6053 >                    r = (r == null) ? u : reducer.apply(r, u);
6054 >            }
6055 >            result = r;
6056 >            CountedCompleter<?> c;
6057 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6058 >                MapReduceMappingsTask<K,V,U>
6059 >                    t = (MapReduceMappingsTask<K,V,U>)c,
6060 >                    s = t.rights;
6061 >                while (s != null) {
6062 >                    U tr, sr;
6063 >                    if ((sr = s.result) != null)
6064 >                        t.result = (((tr = t.result) == null) ? sr :
6065 >                                    reducer.apply(tr, sr));
6066 >                    s = t.rights = s.nextRight;
6067 >                }
6068 >            }
6069 >        }
6070 >    }
6071 >
6072 >    @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V>
6073 >        extends Traverser<K,V,Double> {
6074 >        final ObjectToDouble<? super K> transformer;
6075 >        final DoubleByDoubleToDouble reducer;
6076 >        final double basis;
6077 >        double result;
6078 >        MapReduceKeysToDoubleTask<K,V> rights, nextRight;
6079 >        MapReduceKeysToDoubleTask
6080 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6081 >             MapReduceKeysToDoubleTask<K,V> nextRight,
6082 >             ObjectToDouble<? super K> transformer,
6083 >             double basis,
6084 >             DoubleByDoubleToDouble reducer) {
6085 >            super(m, p, b); this.nextRight = nextRight;
6086 >            this.transformer = transformer;
6087 >            this.basis = basis; this.reducer = reducer;
6088 >        }
6089 >        public final Double getRawResult() { return result; }
6090 >        @SuppressWarnings("unchecked") public final void compute() {
6091 >            final ObjectToDouble<? super K> transformer =
6092 >                this.transformer;
6093 >            final DoubleByDoubleToDouble reducer = this.reducer;
6094 >            if (transformer == null || reducer == null)
6095 >                throw new NullPointerException();
6096 >            double r = this.basis;
6097 >            for (int b; (b = preSplit()) > 0;)
6098 >                (rights = new MapReduceKeysToDoubleTask<K,V>
6099 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6100 >            while (advance() != null)
6101 >                r = reducer.apply(r, transformer.apply((K)nextKey));
6102 >            result = r;
6103 >            CountedCompleter<?> c;
6104 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6105 >                MapReduceKeysToDoubleTask<K,V>
6106 >                    t = (MapReduceKeysToDoubleTask<K,V>)c,
6107 >                    s = t.rights;
6108 >                while (s != null) {
6109 >                    t.result = reducer.apply(t.result, s.result);
6110 >                    s = t.rights = s.nextRight;
6111 >                }
6112 >            }
6113 >        }
6114 >    }
6115 >
6116 >    @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V>
6117 >        extends Traverser<K,V,Double> {
6118 >        final ObjectToDouble<? super V> transformer;
6119 >        final DoubleByDoubleToDouble reducer;
6120 >        final double basis;
6121 >        double result;
6122 >        MapReduceValuesToDoubleTask<K,V> rights, nextRight;
6123 >        MapReduceValuesToDoubleTask
6124 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6125 >             MapReduceValuesToDoubleTask<K,V> nextRight,
6126 >             ObjectToDouble<? super V> transformer,
6127 >             double basis,
6128 >             DoubleByDoubleToDouble reducer) {
6129 >            super(m, p, b); this.nextRight = nextRight;
6130 >            this.transformer = transformer;
6131 >            this.basis = basis; this.reducer = reducer;
6132 >        }
6133 >        public final Double getRawResult() { return result; }
6134 >        @SuppressWarnings("unchecked") public final void compute() {
6135 >            final ObjectToDouble<? super V> transformer =
6136 >                this.transformer;
6137 >            final DoubleByDoubleToDouble reducer = this.reducer;
6138 >            if (transformer == null || reducer == null)
6139 >                throw new NullPointerException();
6140 >            double r = this.basis;
6141 >            for (int b; (b = preSplit()) > 0;)
6142 >                (rights = new MapReduceValuesToDoubleTask<K,V>
6143 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6144 >            Object v;
6145 >            while ((v = advance()) != null)
6146 >                r = reducer.apply(r, transformer.apply((V)v));
6147 >            result = r;
6148 >            CountedCompleter<?> c;
6149 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6150 >                MapReduceValuesToDoubleTask<K,V>
6151 >                    t = (MapReduceValuesToDoubleTask<K,V>)c,
6152 >                    s = t.rights;
6153 >                while (s != null) {
6154 >                    t.result = reducer.apply(t.result, s.result);
6155 >                    s = t.rights = s.nextRight;
6156 >                }
6157 >            }
6158 >        }
6159 >    }
6160 >
6161 >    @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V>
6162 >        extends Traverser<K,V,Double> {
6163 >        final ObjectToDouble<Map.Entry<K,V>> transformer;
6164 >        final DoubleByDoubleToDouble reducer;
6165 >        final double basis;
6166 >        double result;
6167 >        MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
6168 >        MapReduceEntriesToDoubleTask
6169 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6170 >             MapReduceEntriesToDoubleTask<K,V> nextRight,
6171 >             ObjectToDouble<Map.Entry<K,V>> transformer,
6172 >             double basis,
6173 >             DoubleByDoubleToDouble reducer) {
6174 >            super(m, p, b); this.nextRight = nextRight;
6175 >            this.transformer = transformer;
6176 >            this.basis = basis; this.reducer = reducer;
6177 >        }
6178 >        public final Double getRawResult() { return result; }
6179 >        @SuppressWarnings("unchecked") public final void compute() {
6180 >            final ObjectToDouble<Map.Entry<K,V>> transformer =
6181 >                this.transformer;
6182 >            final DoubleByDoubleToDouble reducer = this.reducer;
6183 >            if (transformer == null || reducer == null)
6184 >                throw new NullPointerException();
6185 >            double r = this.basis;
6186 >            for (int b; (b = preSplit()) > 0;)
6187 >                (rights = new MapReduceEntriesToDoubleTask<K,V>
6188 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6189 >            Object v;
6190 >            while ((v = advance()) != null)
6191 >                r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6192 >            result = r;
6193 >            CountedCompleter<?> c;
6194 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6195 >                MapReduceEntriesToDoubleTask<K,V>
6196 >                    t = (MapReduceEntriesToDoubleTask<K,V>)c,
6197 >                    s = t.rights;
6198 >                while (s != null) {
6199 >                    t.result = reducer.apply(t.result, s.result);
6200 >                    s = t.rights = s.nextRight;
6201 >                }
6202 >            }
6203 >        }
6204 >    }
6205 >
6206 >    @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V>
6207 >        extends Traverser<K,V,Double> {
6208 >        final ObjectByObjectToDouble<? super K, ? super V> transformer;
6209 >        final DoubleByDoubleToDouble reducer;
6210 >        final double basis;
6211 >        double result;
6212 >        MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
6213 >        MapReduceMappingsToDoubleTask
6214 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6215 >             MapReduceMappingsToDoubleTask<K,V> nextRight,
6216 >             ObjectByObjectToDouble<? super K, ? super V> transformer,
6217 >             double basis,
6218 >             DoubleByDoubleToDouble reducer) {
6219 >            super(m, p, b); this.nextRight = nextRight;
6220 >            this.transformer = transformer;
6221 >            this.basis = basis; this.reducer = reducer;
6222 >        }
6223 >        public final Double getRawResult() { return result; }
6224 >        @SuppressWarnings("unchecked") public final void compute() {
6225 >            final ObjectByObjectToDouble<? super K, ? super V> transformer =
6226 >                this.transformer;
6227 >            final DoubleByDoubleToDouble reducer = this.reducer;
6228 >            if (transformer == null || reducer == null)
6229 >                throw new NullPointerException();
6230 >            double r = this.basis;
6231 >            for (int b; (b = preSplit()) > 0;)
6232 >                (rights = new MapReduceMappingsToDoubleTask<K,V>
6233 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6234 >            Object v;
6235 >            while ((v = advance()) != null)
6236 >                r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6237 >            result = r;
6238 >            CountedCompleter<?> c;
6239 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6240 >                MapReduceMappingsToDoubleTask<K,V>
6241 >                    t = (MapReduceMappingsToDoubleTask<K,V>)c,
6242 >                    s = t.rights;
6243 >                while (s != null) {
6244 >                    t.result = reducer.apply(t.result, s.result);
6245 >                    s = t.rights = s.nextRight;
6246 >                }
6247 >            }
6248 >        }
6249 >    }
6250 >
6251 >    @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V>
6252 >        extends Traverser<K,V,Long> {
6253 >        final ObjectToLong<? super K> transformer;
6254 >        final LongByLongToLong reducer;
6255 >        final long basis;
6256 >        long result;
6257 >        MapReduceKeysToLongTask<K,V> rights, nextRight;
6258 >        MapReduceKeysToLongTask
6259 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6260 >             MapReduceKeysToLongTask<K,V> nextRight,
6261 >             ObjectToLong<? super K> transformer,
6262 >             long basis,
6263 >             LongByLongToLong reducer) {
6264 >            super(m, p, b); this.nextRight = nextRight;
6265 >            this.transformer = transformer;
6266 >            this.basis = basis; this.reducer = reducer;
6267 >        }
6268 >        public final Long getRawResult() { return result; }
6269 >        @SuppressWarnings("unchecked") public final void compute() {
6270 >            final ObjectToLong<? super K> transformer =
6271 >                this.transformer;
6272 >            final LongByLongToLong reducer = this.reducer;
6273 >            if (transformer == null || reducer == null)
6274 >                throw new NullPointerException();
6275 >            long r = this.basis;
6276 >            for (int b; (b = preSplit()) > 0;)
6277 >                (rights = new MapReduceKeysToLongTask<K,V>
6278 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6279 >            while (advance() != null)
6280 >                r = reducer.apply(r, transformer.apply((K)nextKey));
6281 >            result = r;
6282 >            CountedCompleter<?> c;
6283 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6284 >                MapReduceKeysToLongTask<K,V>
6285 >                    t = (MapReduceKeysToLongTask<K,V>)c,
6286 >                    s = t.rights;
6287 >                while (s != null) {
6288 >                    t.result = reducer.apply(t.result, s.result);
6289 >                    s = t.rights = s.nextRight;
6290 >                }
6291 >            }
6292 >        }
6293 >    }
6294 >
6295 >    @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V>
6296 >        extends Traverser<K,V,Long> {
6297 >        final ObjectToLong<? super V> transformer;
6298 >        final LongByLongToLong reducer;
6299 >        final long basis;
6300 >        long result;
6301 >        MapReduceValuesToLongTask<K,V> rights, nextRight;
6302 >        MapReduceValuesToLongTask
6303 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6304 >             MapReduceValuesToLongTask<K,V> nextRight,
6305 >             ObjectToLong<? super V> transformer,
6306 >             long basis,
6307 >             LongByLongToLong reducer) {
6308 >            super(m, p, b); this.nextRight = nextRight;
6309 >            this.transformer = transformer;
6310 >            this.basis = basis; this.reducer = reducer;
6311 >        }
6312 >        public final Long getRawResult() { return result; }
6313 >        @SuppressWarnings("unchecked") public final void compute() {
6314 >            final ObjectToLong<? super V> transformer =
6315 >                this.transformer;
6316 >            final LongByLongToLong reducer = this.reducer;
6317 >            if (transformer == null || reducer == null)
6318 >                throw new NullPointerException();
6319 >            long r = this.basis;
6320 >            for (int b; (b = preSplit()) > 0;)
6321 >                (rights = new MapReduceValuesToLongTask<K,V>
6322 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6323 >            Object v;
6324 >            while ((v = advance()) != null)
6325 >                r = reducer.apply(r, transformer.apply((V)v));
6326 >            result = r;
6327 >            CountedCompleter<?> c;
6328 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6329 >                MapReduceValuesToLongTask<K,V>
6330 >                    t = (MapReduceValuesToLongTask<K,V>)c,
6331 >                    s = t.rights;
6332 >                while (s != null) {
6333 >                    t.result = reducer.apply(t.result, s.result);
6334 >                    s = t.rights = s.nextRight;
6335 >                }
6336 >            }
6337 >        }
6338 >    }
6339 >
6340 >    @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V>
6341 >        extends Traverser<K,V,Long> {
6342 >        final ObjectToLong<Map.Entry<K,V>> transformer;
6343 >        final LongByLongToLong reducer;
6344 >        final long basis;
6345 >        long result;
6346 >        MapReduceEntriesToLongTask<K,V> rights, nextRight;
6347 >        MapReduceEntriesToLongTask
6348 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6349 >             MapReduceEntriesToLongTask<K,V> nextRight,
6350 >             ObjectToLong<Map.Entry<K,V>> transformer,
6351 >             long basis,
6352 >             LongByLongToLong reducer) {
6353 >            super(m, p, b); this.nextRight = nextRight;
6354 >            this.transformer = transformer;
6355 >            this.basis = basis; this.reducer = reducer;
6356 >        }
6357 >        public final Long getRawResult() { return result; }
6358 >        @SuppressWarnings("unchecked") public final void compute() {
6359 >            final ObjectToLong<Map.Entry<K,V>> transformer =
6360 >                this.transformer;
6361 >            final LongByLongToLong reducer = this.reducer;
6362 >            if (transformer == null || reducer == null)
6363 >                throw new NullPointerException();
6364 >            long r = this.basis;
6365 >            for (int b; (b = preSplit()) > 0;)
6366 >                (rights = new MapReduceEntriesToLongTask<K,V>
6367 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6368 >            Object v;
6369 >            while ((v = advance()) != null)
6370 >                r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6371 >            result = r;
6372 >            CountedCompleter<?> c;
6373 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6374 >                MapReduceEntriesToLongTask<K,V>
6375 >                    t = (MapReduceEntriesToLongTask<K,V>)c,
6376 >                    s = t.rights;
6377 >                while (s != null) {
6378 >                    t.result = reducer.apply(t.result, s.result);
6379 >                    s = t.rights = s.nextRight;
6380 >                }
6381 >            }
6382 >        }
6383 >    }
6384 >
6385 >    @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V>
6386 >        extends Traverser<K,V,Long> {
6387 >        final ObjectByObjectToLong<? super K, ? super V> transformer;
6388 >        final LongByLongToLong reducer;
6389 >        final long basis;
6390 >        long result;
6391 >        MapReduceMappingsToLongTask<K,V> rights, nextRight;
6392 >        MapReduceMappingsToLongTask
6393 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6394 >             MapReduceMappingsToLongTask<K,V> nextRight,
6395 >             ObjectByObjectToLong<? super K, ? super V> transformer,
6396 >             long basis,
6397 >             LongByLongToLong reducer) {
6398 >            super(m, p, b); this.nextRight = nextRight;
6399 >            this.transformer = transformer;
6400 >            this.basis = basis; this.reducer = reducer;
6401 >        }
6402 >        public final Long getRawResult() { return result; }
6403 >        @SuppressWarnings("unchecked") public final void compute() {
6404 >            final ObjectByObjectToLong<? super K, ? super V> transformer =
6405 >                this.transformer;
6406 >            final LongByLongToLong reducer = this.reducer;
6407 >            if (transformer == null || reducer == null)
6408 >                throw new NullPointerException();
6409 >            long r = this.basis;
6410 >            for (int b; (b = preSplit()) > 0;)
6411 >                (rights = new MapReduceMappingsToLongTask<K,V>
6412 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6413 >            Object v;
6414 >            while ((v = advance()) != null)
6415 >                r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6416 >            result = r;
6417 >            CountedCompleter<?> c;
6418 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6419 >                MapReduceMappingsToLongTask<K,V>
6420 >                    t = (MapReduceMappingsToLongTask<K,V>)c,
6421 >                    s = t.rights;
6422 >                while (s != null) {
6423 >                    t.result = reducer.apply(t.result, s.result);
6424 >                    s = t.rights = s.nextRight;
6425 >                }
6426 >            }
6427 >        }
6428 >    }
6429 >
6430 >    @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V>
6431 >        extends Traverser<K,V,Integer> {
6432 >        final ObjectToInt<? super K> transformer;
6433 >        final IntByIntToInt reducer;
6434 >        final int basis;
6435 >        int result;
6436 >        MapReduceKeysToIntTask<K,V> rights, nextRight;
6437 >        MapReduceKeysToIntTask
6438 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6439 >             MapReduceKeysToIntTask<K,V> nextRight,
6440 >             ObjectToInt<? super K> transformer,
6441 >             int basis,
6442 >             IntByIntToInt reducer) {
6443 >            super(m, p, b); this.nextRight = nextRight;
6444 >            this.transformer = transformer;
6445 >            this.basis = basis; this.reducer = reducer;
6446 >        }
6447 >        public final Integer getRawResult() { return result; }
6448 >        @SuppressWarnings("unchecked") public final void compute() {
6449 >            final ObjectToInt<? super K> transformer =
6450 >                this.transformer;
6451 >            final IntByIntToInt reducer = this.reducer;
6452 >            if (transformer == null || reducer == null)
6453 >                throw new NullPointerException();
6454 >            int r = this.basis;
6455 >            for (int b; (b = preSplit()) > 0;)
6456 >                (rights = new MapReduceKeysToIntTask<K,V>
6457 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6458 >            while (advance() != null)
6459 >                r = reducer.apply(r, transformer.apply((K)nextKey));
6460 >            result = r;
6461 >            CountedCompleter<?> c;
6462 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6463 >                MapReduceKeysToIntTask<K,V>
6464 >                    t = (MapReduceKeysToIntTask<K,V>)c,
6465 >                    s = t.rights;
6466 >                while (s != null) {
6467 >                    t.result = reducer.apply(t.result, s.result);
6468 >                    s = t.rights = s.nextRight;
6469 >                }
6470 >            }
6471 >        }
6472 >    }
6473 >
6474 >    @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V>
6475 >        extends Traverser<K,V,Integer> {
6476 >        final ObjectToInt<? super V> transformer;
6477 >        final IntByIntToInt reducer;
6478 >        final int basis;
6479 >        int result;
6480 >        MapReduceValuesToIntTask<K,V> rights, nextRight;
6481 >        MapReduceValuesToIntTask
6482 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6483 >             MapReduceValuesToIntTask<K,V> nextRight,
6484 >             ObjectToInt<? super V> transformer,
6485 >             int basis,
6486 >             IntByIntToInt reducer) {
6487 >            super(m, p, b); this.nextRight = nextRight;
6488 >            this.transformer = transformer;
6489 >            this.basis = basis; this.reducer = reducer;
6490 >        }
6491 >        public final Integer getRawResult() { return result; }
6492 >        @SuppressWarnings("unchecked") public final void compute() {
6493 >            final ObjectToInt<? super V> transformer =
6494 >                this.transformer;
6495 >            final IntByIntToInt reducer = this.reducer;
6496 >            if (transformer == null || reducer == null)
6497 >                throw new NullPointerException();
6498 >            int r = this.basis;
6499 >            for (int b; (b = preSplit()) > 0;)
6500 >                (rights = new MapReduceValuesToIntTask<K,V>
6501 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6502 >            Object v;
6503 >            while ((v = advance()) != null)
6504 >                r = reducer.apply(r, transformer.apply((V)v));
6505 >            result = r;
6506 >            CountedCompleter<?> c;
6507 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6508 >                MapReduceValuesToIntTask<K,V>
6509 >                    t = (MapReduceValuesToIntTask<K,V>)c,
6510 >                    s = t.rights;
6511 >                while (s != null) {
6512 >                    t.result = reducer.apply(t.result, s.result);
6513 >                    s = t.rights = s.nextRight;
6514 >                }
6515 >            }
6516 >        }
6517 >    }
6518 >
6519 >    @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V>
6520 >        extends Traverser<K,V,Integer> {
6521 >        final ObjectToInt<Map.Entry<K,V>> transformer;
6522 >        final IntByIntToInt reducer;
6523 >        final int basis;
6524 >        int result;
6525 >        MapReduceEntriesToIntTask<K,V> rights, nextRight;
6526 >        MapReduceEntriesToIntTask
6527 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6528 >             MapReduceEntriesToIntTask<K,V> nextRight,
6529 >             ObjectToInt<Map.Entry<K,V>> transformer,
6530 >             int basis,
6531 >             IntByIntToInt reducer) {
6532 >            super(m, p, b); this.nextRight = nextRight;
6533 >            this.transformer = transformer;
6534 >            this.basis = basis; this.reducer = reducer;
6535 >        }
6536 >        public final Integer getRawResult() { return result; }
6537 >        @SuppressWarnings("unchecked") public final void compute() {
6538 >            final ObjectToInt<Map.Entry<K,V>> transformer =
6539 >                this.transformer;
6540 >            final IntByIntToInt reducer = this.reducer;
6541 >            if (transformer == null || reducer == null)
6542 >                throw new NullPointerException();
6543 >            int r = this.basis;
6544 >            for (int b; (b = preSplit()) > 0;)
6545 >                (rights = new MapReduceEntriesToIntTask<K,V>
6546 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6547 >            Object v;
6548 >            while ((v = advance()) != null)
6549 >                r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6550 >            result = r;
6551 >            CountedCompleter<?> c;
6552 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6553 >                MapReduceEntriesToIntTask<K,V>
6554 >                    t = (MapReduceEntriesToIntTask<K,V>)c,
6555 >                    s = t.rights;
6556 >                while (s != null) {
6557 >                    t.result = reducer.apply(t.result, s.result);
6558 >                    s = t.rights = s.nextRight;
6559 >                }
6560 >            }
6561 >        }
6562 >    }
6563 >
6564 >    @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V>
6565 >        extends Traverser<K,V,Integer> {
6566 >        final ObjectByObjectToInt<? super K, ? super V> transformer;
6567 >        final IntByIntToInt reducer;
6568 >        final int basis;
6569 >        int result;
6570 >        MapReduceMappingsToIntTask<K,V> rights, nextRight;
6571 >        MapReduceMappingsToIntTask
6572 >            (ConcurrentHashMapV8<K,V> m, Traverser<K,V,?> p, int b,
6573 >             MapReduceMappingsToIntTask<K,V> nextRight,
6574 >             ObjectByObjectToInt<? super K, ? super V> transformer,
6575 >             int basis,
6576 >             IntByIntToInt reducer) {
6577 >            super(m, p, b); this.nextRight = nextRight;
6578 >            this.transformer = transformer;
6579 >            this.basis = basis; this.reducer = reducer;
6580 >        }
6581 >        public final Integer getRawResult() { return result; }
6582 >        @SuppressWarnings("unchecked") public final void compute() {
6583 >            final ObjectByObjectToInt<? super K, ? super V> transformer =
6584 >                this.transformer;
6585 >            final IntByIntToInt reducer = this.reducer;
6586 >            if (transformer == null || reducer == null)
6587 >                throw new NullPointerException();
6588 >            int r = this.basis;
6589 >            for (int b; (b = preSplit()) > 0;)
6590 >                (rights = new MapReduceMappingsToIntTask<K,V>
6591 >                 (map, this, b, rights, transformer, r, reducer)).fork();
6592 >            Object v;
6593 >            while ((v = advance()) != null)
6594 >                r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6595 >            result = r;
6596 >            CountedCompleter<?> c;
6597 >            for (c = firstComplete(); c != null; c = c.nextComplete()) {
6598 >                MapReduceMappingsToIntTask<K,V>
6599 >                    t = (MapReduceMappingsToIntTask<K,V>)c,
6600 >                    s = t.rights;
6601 >                while (s != null) {
6602 >                    t.result = reducer.apply(t.result, s.result);
6603 >                    s = t.rights = s.nextRight;
6604 >                }
6605 >            }
6606          }
6607      }
6608  
6609      // Unsafe mechanics
6610      private static final sun.misc.Unsafe UNSAFE;
6611      private static final long counterOffset;
6612 <    private static final long resizingOffset;
6612 >    private static final long sizeCtlOffset;
6613      private static final long ABASE;
6614      private static final int ASHIFT;
6615  
# Line 1579 | Line 6620 | public class ConcurrentHashMapV8<K, V>
6620              Class<?> k = ConcurrentHashMapV8.class;
6621              counterOffset = UNSAFE.objectFieldOffset
6622                  (k.getDeclaredField("counter"));
6623 <            resizingOffset = UNSAFE.objectFieldOffset
6624 <                (k.getDeclaredField("resizing"));
6623 >            sizeCtlOffset = UNSAFE.objectFieldOffset
6624 >                (k.getDeclaredField("sizeCtl"));
6625              Class<?> sc = Node[].class;
6626              ABASE = UNSAFE.arrayBaseOffset(sc);
6627              ss = UNSAFE.arrayIndexScale(sc);
# Line 1619 | Line 6660 | public class ConcurrentHashMapV8<K, V>
6660              }
6661          }
6662      }
1622
6663   }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines