ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
(Generate patch)

Comparing jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java (file contents):
Revision 1.110 by jsr166, Wed Apr 27 14:06:30 2011 UTC vs.
Revision 1.275 by jsr166, Wed Sep 9 02:46:48 2015 UTC

# Line 5 | Line 5
5   */
6  
7   package java.util.concurrent;
8 < import java.util.concurrent.locks.*;
9 < import java.util.*;
8 >
9 > import java.io.ObjectStreamField;
10   import java.io.Serializable;
11 < import java.io.IOException;
12 < import java.io.ObjectInputStream;
13 < import java.io.ObjectOutputStream;
11 > import java.lang.reflect.ParameterizedType;
12 > import java.lang.reflect.Type;
13 > import java.util.AbstractMap;
14 > import java.util.Arrays;
15 > import java.util.Collection;
16 > import java.util.Enumeration;
17 > import java.util.HashMap;
18 > import java.util.Hashtable;
19 > import java.util.Iterator;
20 > import java.util.Map;
21 > import java.util.NoSuchElementException;
22 > import java.util.Set;
23 > import java.util.Spliterator;
24 > import java.util.concurrent.atomic.AtomicReference;
25 > import java.util.concurrent.locks.LockSupport;
26 > import java.util.concurrent.locks.ReentrantLock;
27 > import java.util.function.BiConsumer;
28 > import java.util.function.BiFunction;
29 > import java.util.function.Consumer;
30 > import java.util.function.DoubleBinaryOperator;
31 > import java.util.function.Function;
32 > import java.util.function.IntBinaryOperator;
33 > import java.util.function.LongBinaryOperator;
34 > import java.util.function.Predicate;
35 > import java.util.function.ToDoubleBiFunction;
36 > import java.util.function.ToDoubleFunction;
37 > import java.util.function.ToIntBiFunction;
38 > import java.util.function.ToIntFunction;
39 > import java.util.function.ToLongBiFunction;
40 > import java.util.function.ToLongFunction;
41 > import java.util.stream.Stream;
42  
43   /**
44   * A hash table supporting full concurrency of retrievals and
45 < * adjustable expected concurrency for updates. This class obeys the
45 > * high expected concurrency for updates. This class obeys the
46   * same functional specification as {@link java.util.Hashtable}, and
47   * includes versions of methods corresponding to each method of
48 < * <tt>Hashtable</tt>. However, even though all operations are
48 > * {@code Hashtable}. However, even though all operations are
49   * thread-safe, retrieval operations do <em>not</em> entail locking,
50   * and there is <em>not</em> any support for locking the entire table
51   * in a way that prevents all access.  This class is fully
52 < * interoperable with <tt>Hashtable</tt> in programs that rely on its
52 > * interoperable with {@code Hashtable} in programs that rely on its
53   * thread safety but not on its synchronization details.
54   *
55 < * <p> Retrieval operations (including <tt>get</tt>) generally do not
56 < * block, so may overlap with update operations (including
57 < * <tt>put</tt> and <tt>remove</tt>). Retrievals reflect the results
58 < * of the most recently <em>completed</em> update operations holding
59 < * upon their onset.  For aggregate operations such as <tt>putAll</tt>
60 < * and <tt>clear</tt>, concurrent retrievals may reflect insertion or
61 < * removal of only some entries.  Similarly, Iterators and
62 < * Enumerations return elements reflecting the state of the hash table
63 < * at some point at or since the creation of the iterator/enumeration.
64 < * They do <em>not</em> throw {@link ConcurrentModificationException}.
55 > * <p>Retrieval operations (including {@code get}) generally do not
56 > * block, so may overlap with update operations (including {@code put}
57 > * and {@code remove}). Retrievals reflect the results of the most
58 > * recently <em>completed</em> update operations holding upon their
59 > * onset. (More formally, an update operation for a given key bears a
60 > * <em>happens-before</em> relation with any (non-null) retrieval for
61 > * that key reporting the updated value.)  For aggregate operations
62 > * such as {@code putAll} and {@code clear}, concurrent retrievals may
63 > * reflect insertion or removal of only some entries.  Similarly,
64 > * Iterators, Spliterators and Enumerations return elements reflecting the
65 > * state of the hash table at some point at or since the creation of the
66 > * iterator/enumeration.  They do <em>not</em> throw {@link
67 > * java.util.ConcurrentModificationException ConcurrentModificationException}.
68   * However, iterators are designed to be used by only one thread at a time.
69 + * Bear in mind that the results of aggregate status methods including
70 + * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
71 + * useful only when a map is not undergoing concurrent updates in other threads.
72 + * Otherwise the results of these methods reflect transient states
73 + * that may be adequate for monitoring or estimation purposes, but not
74 + * for program control.
75 + *
76 + * <p>The table is dynamically expanded when there are too many
77 + * collisions (i.e., keys that have distinct hash codes but fall into
78 + * the same slot modulo the table size), with the expected average
79 + * effect of maintaining roughly two bins per mapping (corresponding
80 + * to a 0.75 load factor threshold for resizing). There may be much
81 + * variance around this average as mappings are added and removed, but
82 + * overall, this maintains a commonly accepted time/space tradeoff for
83 + * hash tables.  However, resizing this or any other kind of hash
84 + * table may be a relatively slow operation. When possible, it is a
85 + * good idea to provide a size estimate as an optional {@code
86 + * initialCapacity} constructor argument. An additional optional
87 + * {@code loadFactor} constructor argument provides a further means of
88 + * customizing initial table capacity by specifying the table density
89 + * to be used in calculating the amount of space to allocate for the
90 + * given number of elements.  Also, for compatibility with previous
91 + * versions of this class, constructors may optionally specify an
92 + * expected {@code concurrencyLevel} as an additional hint for
93 + * internal sizing.  Note that using many keys with exactly the same
94 + * {@code hashCode()} is a sure way to slow down performance of any
95 + * hash table. To ameliorate impact, when keys are {@link Comparable},
96 + * this class may use comparison order among keys to help break ties.
97 + *
98 + * <p>A {@link Set} projection of a ConcurrentHashMap may be created
99 + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
100 + * (using {@link #keySet(Object)} when only keys are of interest, and the
101 + * mapped values are (perhaps transiently) not used or all take the
102 + * same mapping value.
103   *
104 < * <p> The allowed concurrency among update operations is guided by
105 < * the optional <tt>concurrencyLevel</tt> constructor argument
106 < * (default <tt>16</tt>), which is used as a hint for internal sizing.  The
107 < * table is internally partitioned to try to permit the indicated
108 < * number of concurrent updates without contention. Because placement
109 < * in hash tables is essentially random, the actual concurrency will
45 < * vary.  Ideally, you should choose a value to accommodate as many
46 < * threads as will ever concurrently modify the table. Using a
47 < * significantly higher value than you need can waste space and time,
48 < * and a significantly lower value can lead to thread contention. But
49 < * overestimates and underestimates within an order of magnitude do
50 < * not usually have much noticeable impact. A value of one is
51 < * appropriate when it is known that only one thread will modify and
52 < * all others will only read. Also, resizing this or any other kind of
53 < * hash table is a relatively slow operation, so, when possible, it is
54 < * a good idea to provide estimates of expected table sizes in
55 < * constructors.
104 > * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
105 > * form of histogram or multiset) by using {@link
106 > * java.util.concurrent.atomic.LongAdder} values and initializing via
107 > * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
108 > * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
109 > * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
110   *
111   * <p>This class and its views and iterators implement all of the
112   * <em>optional</em> methods of the {@link Map} and {@link Iterator}
113   * interfaces.
114   *
115 < * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
116 < * does <em>not</em> allow <tt>null</tt> to be used as a key or value.
115 > * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
116 > * does <em>not</em> allow {@code null} to be used as a key or value.
117 > *
118 > * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
119 > * operations that, unlike most {@link Stream} methods, are designed
120 > * to be safely, and often sensibly, applied even with maps that are
121 > * being concurrently updated by other threads; for example, when
122 > * computing a snapshot summary of the values in a shared registry.
123 > * There are three kinds of operation, each with four forms, accepting
124 > * functions with Keys, Values, Entries, and (Key, Value) arguments
125 > * and/or return values. Because the elements of a ConcurrentHashMap
126 > * are not ordered in any particular way, and may be processed in
127 > * different orders in different parallel executions, the correctness
128 > * of supplied functions should not depend on any ordering, or on any
129 > * other objects or values that may transiently change while
130 > * computation is in progress; and except for forEach actions, should
131 > * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
132 > * objects do not support method {@code setValue}.
133 > *
134 > * <ul>
135 > * <li> forEach: Perform a given action on each element.
136 > * A variant form applies a given transformation on each element
137 > * before performing the action.</li>
138 > *
139 > * <li> search: Return the first available non-null result of
140 > * applying a given function on each element; skipping further
141 > * search when a result is found.</li>
142 > *
143 > * <li> reduce: Accumulate each element.  The supplied reduction
144 > * function cannot rely on ordering (more formally, it should be
145 > * both associative and commutative).  There are five variants:
146 > *
147 > * <ul>
148 > *
149 > * <li> Plain reductions. (There is not a form of this method for
150 > * (key, value) function arguments since there is no corresponding
151 > * return type.)</li>
152 > *
153 > * <li> Mapped reductions that accumulate the results of a given
154 > * function applied to each element.</li>
155 > *
156 > * <li> Reductions to scalar doubles, longs, and ints, using a
157 > * given basis value.</li>
158 > *
159 > * </ul>
160 > * </li>
161 > * </ul>
162 > *
163 > * <p>These bulk operations accept a {@code parallelismThreshold}
164 > * argument. Methods proceed sequentially if the current map size is
165 > * estimated to be less than the given threshold. Using a value of
166 > * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
167 > * of {@code 1} results in maximal parallelism by partitioning into
168 > * enough subtasks to fully utilize the {@link
169 > * ForkJoinPool#commonPool()} that is used for all parallel
170 > * computations. Normally, you would initially choose one of these
171 > * extreme values, and then measure performance of using in-between
172 > * values that trade off overhead versus throughput.
173 > *
174 > * <p>The concurrency properties of bulk operations follow
175 > * from those of ConcurrentHashMap: Any non-null result returned
176 > * from {@code get(key)} and related access methods bears a
177 > * happens-before relation with the associated insertion or
178 > * update.  The result of any bulk operation reflects the
179 > * composition of these per-element relations (but is not
180 > * necessarily atomic with respect to the map as a whole unless it
181 > * is somehow known to be quiescent).  Conversely, because keys
182 > * and values in the map are never null, null serves as a reliable
183 > * atomic indicator of the current lack of any result.  To
184 > * maintain this property, null serves as an implicit basis for
185 > * all non-scalar reduction operations. For the double, long, and
186 > * int versions, the basis should be one that, when combined with
187 > * any other value, returns that other value (more formally, it
188 > * should be the identity element for the reduction). Most common
189 > * reductions have these properties; for example, computing a sum
190 > * with basis 0 or a minimum with basis MAX_VALUE.
191 > *
192 > * <p>Search and transformation functions provided as arguments
193 > * should similarly return null to indicate the lack of any result
194 > * (in which case it is not used). In the case of mapped
195 > * reductions, this also enables transformations to serve as
196 > * filters, returning null (or, in the case of primitive
197 > * specializations, the identity basis) if the element should not
198 > * be combined. You can create compound transformations and
199 > * filterings by composing them yourself under this "null means
200 > * there is nothing there now" rule before using them in search or
201 > * reduce operations.
202 > *
203 > * <p>Methods accepting and/or returning Entry arguments maintain
204 > * key-value associations. They may be useful for example when
205 > * finding the key for the greatest value. Note that "plain" Entry
206 > * arguments can be supplied using {@code new
207 > * AbstractMap.SimpleEntry(k,v)}.
208 > *
209 > * <p>Bulk operations may complete abruptly, throwing an
210 > * exception encountered in the application of a supplied
211 > * function. Bear in mind when handling such exceptions that other
212 > * concurrently executing functions could also have thrown
213 > * exceptions, or would have done so if the first exception had
214 > * not occurred.
215 > *
216 > * <p>Speedups for parallel compared to sequential forms are common
217 > * but not guaranteed.  Parallel operations involving brief functions
218 > * on small maps may execute more slowly than sequential forms if the
219 > * underlying work to parallelize the computation is more expensive
220 > * than the computation itself.  Similarly, parallelization may not
221 > * lead to much actual parallelism if all processors are busy
222 > * performing unrelated tasks.
223 > *
224 > * <p>All arguments to all task methods must be non-null.
225   *
226   * <p>This class is a member of the
227   * <a href="{@docRoot}/../technotes/guides/collections/index.html">
# Line 70 | Line 232 | import java.io.ObjectOutputStream;
232   * @param <K> the type of keys maintained by this map
233   * @param <V> the type of mapped values
234   */
235 < public class ConcurrentHashMap<K, V> extends AbstractMap<K, V>
236 <        implements ConcurrentMap<K, V>, Serializable {
235 > public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
236 >    implements ConcurrentMap<K,V>, Serializable {
237      private static final long serialVersionUID = 7249069246763182397L;
238  
239      /*
240 <     * The basic strategy is to subdivide the table among Segments,
241 <     * each of which itself is a concurrently readable hash table.  To
242 <     * reduce footprint, all but one segments are constructed only
243 <     * when first needed (see ensureSegment). To maintain visibility
244 <     * in the presence of lazy construction, accesses to segments as
245 <     * well as elements of segment's table must use volatile access,
246 <     * which is done via Unsafe within methods segmentAt etc
247 <     * below. These provide the functionality of AtomicReferenceArrays
248 <     * but reduce the levels of indirection. Additionally,
249 <     * volatile-writes of table elements and entry "next" fields
250 <     * within locked operations use the cheaper "lazySet" forms of
251 <     * writes (via putOrderedObject) because these writes are always
252 <     * followed by lock releases that maintain sequential consistency
253 <     * of table updates.
254 <     *
255 <     * Historical note: The previous version of this class relied
256 <     * heavily on "final" fields, which avoided some volatile reads at
257 <     * the expense of a large initial footprint.  Some remnants of
258 <     * that design (including forced construction of segment 0) exist
259 <     * to ensure serialization compatibility.
240 >     * Overview:
241 >     *
242 >     * The primary design goal of this hash table is to maintain
243 >     * concurrent readability (typically method get(), but also
244 >     * iterators and related methods) while minimizing update
245 >     * contention. Secondary goals are to keep space consumption about
246 >     * the same or better than java.util.HashMap, and to support high
247 >     * initial insertion rates on an empty table by many threads.
248 >     *
249 >     * This map usually acts as a binned (bucketed) hash table.  Each
250 >     * key-value mapping is held in a Node.  Most nodes are instances
251 >     * of the basic Node class with hash, key, value, and next
252 >     * fields. However, various subclasses exist: TreeNodes are
253 >     * arranged in balanced trees, not lists.  TreeBins hold the roots
254 >     * of sets of TreeNodes. ForwardingNodes are placed at the heads
255 >     * of bins during resizing. ReservationNodes are used as
256 >     * placeholders while establishing values in computeIfAbsent and
257 >     * related methods.  The types TreeBin, ForwardingNode, and
258 >     * ReservationNode do not hold normal user keys, values, or
259 >     * hashes, and are readily distinguishable during search etc
260 >     * because they have negative hash fields and null key and value
261 >     * fields. (These special nodes are either uncommon or transient,
262 >     * so the impact of carrying around some unused fields is
263 >     * insignificant.)
264 >     *
265 >     * The table is lazily initialized to a power-of-two size upon the
266 >     * first insertion.  Each bin in the table normally contains a
267 >     * list of Nodes (most often, the list has only zero or one Node).
268 >     * Table accesses require volatile/atomic reads, writes, and
269 >     * CASes.  Because there is no other way to arrange this without
270 >     * adding further indirections, we use intrinsics
271 >     * (sun.misc.Unsafe) operations.
272 >     *
273 >     * We use the top (sign) bit of Node hash fields for control
274 >     * purposes -- it is available anyway because of addressing
275 >     * constraints.  Nodes with negative hash fields are specially
276 >     * handled or ignored in map methods.
277 >     *
278 >     * Insertion (via put or its variants) of the first node in an
279 >     * empty bin is performed by just CASing it to the bin.  This is
280 >     * by far the most common case for put operations under most
281 >     * key/hash distributions.  Other update operations (insert,
282 >     * delete, and replace) require locks.  We do not want to waste
283 >     * the space required to associate a distinct lock object with
284 >     * each bin, so instead use the first node of a bin list itself as
285 >     * a lock. Locking support for these locks relies on builtin
286 >     * "synchronized" monitors.
287 >     *
288 >     * Using the first node of a list as a lock does not by itself
289 >     * suffice though: When a node is locked, any update must first
290 >     * validate that it is still the first node after locking it, and
291 >     * retry if not. Because new nodes are always appended to lists,
292 >     * once a node is first in a bin, it remains first until deleted
293 >     * or the bin becomes invalidated (upon resizing).
294 >     *
295 >     * The main disadvantage of per-bin locks is that other update
296 >     * operations on other nodes in a bin list protected by the same
297 >     * lock can stall, for example when user equals() or mapping
298 >     * functions take a long time.  However, statistically, under
299 >     * random hash codes, this is not a common problem.  Ideally, the
300 >     * frequency of nodes in bins follows a Poisson distribution
301 >     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
302 >     * parameter of about 0.5 on average, given the resizing threshold
303 >     * of 0.75, although with a large variance because of resizing
304 >     * granularity. Ignoring variance, the expected occurrences of
305 >     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
306 >     * first values are:
307 >     *
308 >     * 0:    0.60653066
309 >     * 1:    0.30326533
310 >     * 2:    0.07581633
311 >     * 3:    0.01263606
312 >     * 4:    0.00157952
313 >     * 5:    0.00015795
314 >     * 6:    0.00001316
315 >     * 7:    0.00000094
316 >     * 8:    0.00000006
317 >     * more: less than 1 in ten million
318 >     *
319 >     * Lock contention probability for two threads accessing distinct
320 >     * elements is roughly 1 / (8 * #elements) under random hashes.
321 >     *
322 >     * Actual hash code distributions encountered in practice
323 >     * sometimes deviate significantly from uniform randomness.  This
324 >     * includes the case when N > (1<<30), so some keys MUST collide.
325 >     * Similarly for dumb or hostile usages in which multiple keys are
326 >     * designed to have identical hash codes or ones that differs only
327 >     * in masked-out high bits. So we use a secondary strategy that
328 >     * applies when the number of nodes in a bin exceeds a
329 >     * threshold. These TreeBins use a balanced tree to hold nodes (a
330 >     * specialized form of red-black trees), bounding search time to
331 >     * O(log N).  Each search step in a TreeBin is at least twice as
332 >     * slow as in a regular list, but given that N cannot exceed
333 >     * (1<<64) (before running out of addresses) this bounds search
334 >     * steps, lock hold times, etc, to reasonable constants (roughly
335 >     * 100 nodes inspected per operation worst case) so long as keys
336 >     * are Comparable (which is very common -- String, Long, etc).
337 >     * TreeBin nodes (TreeNodes) also maintain the same "next"
338 >     * traversal pointers as regular nodes, so can be traversed in
339 >     * iterators in the same way.
340 >     *
341 >     * The table is resized when occupancy exceeds a percentage
342 >     * threshold (nominally, 0.75, but see below).  Any thread
343 >     * noticing an overfull bin may assist in resizing after the
344 >     * initiating thread allocates and sets up the replacement array.
345 >     * However, rather than stalling, these other threads may proceed
346 >     * with insertions etc.  The use of TreeBins shields us from the
347 >     * worst case effects of overfilling while resizes are in
348 >     * progress.  Resizing proceeds by transferring bins, one by one,
349 >     * from the table to the next table. However, threads claim small
350 >     * blocks of indices to transfer (via field transferIndex) before
351 >     * doing so, reducing contention.  A generation stamp in field
352 >     * sizeCtl ensures that resizings do not overlap. Because we are
353 >     * using power-of-two expansion, the elements from each bin must
354 >     * either stay at same index, or move with a power of two
355 >     * offset. We eliminate unnecessary node creation by catching
356 >     * cases where old nodes can be reused because their next fields
357 >     * won't change.  On average, only about one-sixth of them need
358 >     * cloning when a table doubles. The nodes they replace will be
359 >     * garbage collectable as soon as they are no longer referenced by
360 >     * any reader thread that may be in the midst of concurrently
361 >     * traversing table.  Upon transfer, the old table bin contains
362 >     * only a special forwarding node (with hash field "MOVED") that
363 >     * contains the next table as its key. On encountering a
364 >     * forwarding node, access and update operations restart, using
365 >     * the new table.
366 >     *
367 >     * Each bin transfer requires its bin lock, which can stall
368 >     * waiting for locks while resizing. However, because other
369 >     * threads can join in and help resize rather than contend for
370 >     * locks, average aggregate waits become shorter as resizing
371 >     * progresses.  The transfer operation must also ensure that all
372 >     * accessible bins in both the old and new table are usable by any
373 >     * traversal.  This is arranged in part by proceeding from the
374 >     * last bin (table.length - 1) up towards the first.  Upon seeing
375 >     * a forwarding node, traversals (see class Traverser) arrange to
376 >     * move to the new table without revisiting nodes.  To ensure that
377 >     * no intervening nodes are skipped even when moved out of order,
378 >     * a stack (see class TableStack) is created on first encounter of
379 >     * a forwarding node during a traversal, to maintain its place if
380 >     * later processing the current table. The need for these
381 >     * save/restore mechanics is relatively rare, but when one
382 >     * forwarding node is encountered, typically many more will be.
383 >     * So Traversers use a simple caching scheme to avoid creating so
384 >     * many new TableStack nodes. (Thanks to Peter Levart for
385 >     * suggesting use of a stack here.)
386 >     *
387 >     * The traversal scheme also applies to partial traversals of
388 >     * ranges of bins (via an alternate Traverser constructor)
389 >     * to support partitioned aggregate operations.  Also, read-only
390 >     * operations give up if ever forwarded to a null table, which
391 >     * provides support for shutdown-style clearing, which is also not
392 >     * currently implemented.
393 >     *
394 >     * Lazy table initialization minimizes footprint until first use,
395 >     * and also avoids resizings when the first operation is from a
396 >     * putAll, constructor with map argument, or deserialization.
397 >     * These cases attempt to override the initial capacity settings,
398 >     * but harmlessly fail to take effect in cases of races.
399 >     *
400 >     * The element count is maintained using a specialization of
401 >     * LongAdder. We need to incorporate a specialization rather than
402 >     * just use a LongAdder in order to access implicit
403 >     * contention-sensing that leads to creation of multiple
404 >     * CounterCells.  The counter mechanics avoid contention on
405 >     * updates but can encounter cache thrashing if read too
406 >     * frequently during concurrent access. To avoid reading so often,
407 >     * resizing under contention is attempted only upon adding to a
408 >     * bin already holding two or more nodes. Under uniform hash
409 >     * distributions, the probability of this occurring at threshold
410 >     * is around 13%, meaning that only about 1 in 8 puts check
411 >     * threshold (and after resizing, many fewer do so).
412 >     *
413 >     * TreeBins use a special form of comparison for search and
414 >     * related operations (which is the main reason we cannot use
415 >     * existing collections such as TreeMaps). TreeBins contain
416 >     * Comparable elements, but may contain others, as well as
417 >     * elements that are Comparable but not necessarily Comparable for
418 >     * the same T, so we cannot invoke compareTo among them. To handle
419 >     * this, the tree is ordered primarily by hash value, then by
420 >     * Comparable.compareTo order if applicable.  On lookup at a node,
421 >     * if elements are not comparable or compare as 0 then both left
422 >     * and right children may need to be searched in the case of tied
423 >     * hash values. (This corresponds to the full list search that
424 >     * would be necessary if all elements were non-Comparable and had
425 >     * tied hashes.) On insertion, to keep a total ordering (or as
426 >     * close as is required here) across rebalancings, we compare
427 >     * classes and identityHashCodes as tie-breakers. The red-black
428 >     * balancing code is updated from pre-jdk-collections
429 >     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
430 >     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
431 >     * Algorithms" (CLR).
432 >     *
433 >     * TreeBins also require an additional locking mechanism.  While
434 >     * list traversal is always possible by readers even during
435 >     * updates, tree traversal is not, mainly because of tree-rotations
436 >     * that may change the root node and/or its linkages.  TreeBins
437 >     * include a simple read-write lock mechanism parasitic on the
438 >     * main bin-synchronization strategy: Structural adjustments
439 >     * associated with an insertion or removal are already bin-locked
440 >     * (and so cannot conflict with other writers) but must wait for
441 >     * ongoing readers to finish. Since there can be only one such
442 >     * waiter, we use a simple scheme using a single "waiter" field to
443 >     * block writers.  However, readers need never block.  If the root
444 >     * lock is held, they proceed along the slow traversal path (via
445 >     * next-pointers) until the lock becomes available or the list is
446 >     * exhausted, whichever comes first. These cases are not fast, but
447 >     * maximize aggregate expected throughput.
448 >     *
449 >     * Maintaining API and serialization compatibility with previous
450 >     * versions of this class introduces several oddities. Mainly: We
451 >     * leave untouched but unused constructor arguments referring to
452 >     * concurrencyLevel. We accept a loadFactor constructor argument,
453 >     * but apply it only to initial table capacity (which is the only
454 >     * time that we can guarantee to honor it.) We also declare an
455 >     * unused "Segment" class that is instantiated in minimal form
456 >     * only when serializing.
457 >     *
458 >     * Also, solely for compatibility with previous versions of this
459 >     * class, it extends AbstractMap, even though all of its methods
460 >     * are overridden, so it is just useless baggage.
461 >     *
462 >     * This file is organized to make things a little easier to follow
463 >     * while reading than they might otherwise: First the main static
464 >     * declarations and utilities, then fields, then main public
465 >     * methods (with a few factorings of multiple public methods into
466 >     * internal ones), then sizing methods, trees, traversers, and
467 >     * bulk operations.
468       */
469  
470      /* ---------------- Constants -------------- */
471  
472      /**
473 <     * The default initial capacity for this table,
474 <     * used when not otherwise specified in a constructor.
473 >     * The largest possible table capacity.  This value must be
474 >     * exactly 1<<30 to stay within Java array allocation and indexing
475 >     * bounds for power of two table sizes, and is further required
476 >     * because the top two bits of 32bit hash fields are used for
477 >     * control purposes.
478       */
479 <    static final int DEFAULT_INITIAL_CAPACITY = 16;
479 >    private static final int MAXIMUM_CAPACITY = 1 << 30;
480  
481      /**
482 <     * The default load factor for this table, used when not
483 <     * otherwise specified in a constructor.
482 >     * The default initial table capacity.  Must be a power of 2
483 >     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
484       */
485 <    static final float DEFAULT_LOAD_FACTOR = 0.75f;
485 >    private static final int DEFAULT_CAPACITY = 16;
486  
487      /**
488 <     * The default concurrency level for this table, used when not
489 <     * otherwise specified in a constructor.
488 >     * The largest possible (non-power of two) array size.
489 >     * Needed by toArray and related methods.
490       */
491 <    static final int DEFAULT_CONCURRENCY_LEVEL = 16;
491 >    static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
492  
493      /**
494 <     * The maximum capacity, used if a higher value is implicitly
495 <     * specified by either of the constructors with arguments.  MUST
123 <     * be a power of two <= 1<<30 to ensure that entries are indexable
124 <     * using ints.
494 >     * The default concurrency level for this table. Unused but
495 >     * defined for compatibility with previous versions of this class.
496       */
497 <    static final int MAXIMUM_CAPACITY = 1 << 30;
497 >    private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
498  
499      /**
500 <     * The minimum capacity for per-segment tables.  Must be a power
501 <     * of two, at least two to avoid immediate resizing on next use
502 <     * after lazy construction.
500 >     * The load factor for this table. Overrides of this value in
501 >     * constructors affect only the initial table capacity.  The
502 >     * actual floating point value isn't normally used -- it is
503 >     * simpler to use expressions such as {@code n - (n >>> 2)} for
504 >     * the associated resizing threshold.
505       */
506 <    static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
506 >    private static final float LOAD_FACTOR = 0.75f;
507  
508      /**
509 <     * The maximum number of segments to allow; used to bound
510 <     * constructor arguments. Must be power of two less than 1 << 24.
509 >     * The bin count threshold for using a tree rather than list for a
510 >     * bin.  Bins are converted to trees when adding an element to a
511 >     * bin with at least this many nodes. The value must be greater
512 >     * than 2, and should be at least 8 to mesh with assumptions in
513 >     * tree removal about conversion back to plain bins upon
514 >     * shrinkage.
515       */
516 <    static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
516 >    static final int TREEIFY_THRESHOLD = 8;
517  
518      /**
519 <     * Number of unsynchronized retries in size and containsValue
520 <     * methods before resorting to locking. This is used to avoid
521 <     * unbounded retries if tables undergo continuous modification
145 <     * which would make it impossible to obtain an accurate result.
519 >     * The bin count threshold for untreeifying a (split) bin during a
520 >     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
521 >     * most 6 to mesh with shrinkage detection under removal.
522       */
523 <    static final int RETRIES_BEFORE_LOCK = 2;
148 <
149 <    /* ---------------- Fields -------------- */
523 >    static final int UNTREEIFY_THRESHOLD = 6;
524  
525      /**
526 <     * Mask value for indexing into segments. The upper bits of a
527 <     * key's hash code are used to choose the segment.
526 >     * The smallest table capacity for which bins may be treeified.
527 >     * (Otherwise the table is resized if too many nodes in a bin.)
528 >     * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
529 >     * conflicts between resizing and treeification thresholds.
530       */
531 <    final int segmentMask;
531 >    static final int MIN_TREEIFY_CAPACITY = 64;
532  
533      /**
534 <     * Shift value for indexing within segments.
534 >     * Minimum number of rebinnings per transfer step. Ranges are
535 >     * subdivided to allow multiple resizer threads.  This value
536 >     * serves as a lower bound to avoid resizers encountering
537 >     * excessive memory contention.  The value should be at least
538 >     * DEFAULT_CAPACITY.
539       */
540 <    final int segmentShift;
540 >    private static final int MIN_TRANSFER_STRIDE = 16;
541  
542      /**
543 <     * The segments, each of which is a specialized hash table.
543 >     * The number of bits used for generation stamp in sizeCtl.
544 >     * Must be at least 6 for 32bit arrays.
545       */
546 <    final Segment<K,V>[] segments;
546 >    private static int RESIZE_STAMP_BITS = 16;
547  
548 <    transient Set<K> keySet;
549 <    transient Set<Map.Entry<K,V>> entrySet;
550 <    transient Collection<V> values;
548 >    /**
549 >     * The maximum number of threads that can help resize.
550 >     * Must fit in 32 - RESIZE_STAMP_BITS bits.
551 >     */
552 >    private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
553  
554      /**
555 <     * ConcurrentHashMap list entry. Note that this is never exported
556 <     * out as a user-visible Map.Entry.
555 >     * The bit shift for recording size stamp in sizeCtl.
556 >     */
557 >    private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
558 >
559 >    /*
560 >     * Encodings for Node hash fields. See above for explanation.
561 >     */
562 >    static final int MOVED     = -1; // hash for forwarding nodes
563 >    static final int TREEBIN   = -2; // hash for roots of trees
564 >    static final int RESERVED  = -3; // hash for transient reservations
565 >    static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
566 >
567 >    /** Number of CPUS, to place bounds on some sizings */
568 >    static final int NCPU = Runtime.getRuntime().availableProcessors();
569 >
570 >    /** For serialization compatibility. */
571 >    private static final ObjectStreamField[] serialPersistentFields = {
572 >        new ObjectStreamField("segments", Segment[].class),
573 >        new ObjectStreamField("segmentMask", Integer.TYPE),
574 >        new ObjectStreamField("segmentShift", Integer.TYPE)
575 >    };
576 >
577 >    /* ---------------- Nodes -------------- */
578 >
579 >    /**
580 >     * Key-value entry.  This class is never exported out as a
581 >     * user-mutable Map.Entry (i.e., one supporting setValue; see
582 >     * MapEntry below), but can be used for read-only traversals used
583 >     * in bulk tasks.  Subclasses of Node with a negative hash field
584 >     * are special, and contain null keys and values (but are never
585 >     * exported).  Otherwise, keys and vals are never null.
586       */
587 <    static final class HashEntry<K,V> {
587 >    static class Node<K,V> implements Map.Entry<K,V> {
588          final int hash;
589          final K key;
590 <        volatile V value;
591 <        volatile HashEntry<K,V> next;
590 >        volatile V val;
591 >        volatile Node<K,V> next;
592  
593 <        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
593 >        Node(int hash, K key, V val, Node<K,V> next) {
594              this.hash = hash;
595              this.key = key;
596 <            this.value = value;
596 >            this.val = val;
597              this.next = next;
598          }
599  
600 +        public final K getKey()     { return key; }
601 +        public final V getValue()   { return val; }
602 +        public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
603 +        public final String toString() {
604 +            return Helpers.mapEntryToString(key, val);
605 +        }
606 +        public final V setValue(V value) {
607 +            throw new UnsupportedOperationException();
608 +        }
609 +
610 +        public final boolean equals(Object o) {
611 +            Object k, v, u; Map.Entry<?,?> e;
612 +            return ((o instanceof Map.Entry) &&
613 +                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
614 +                    (v = e.getValue()) != null &&
615 +                    (k == key || k.equals(key)) &&
616 +                    (v == (u = val) || v.equals(u)));
617 +        }
618 +
619          /**
620 <         * Sets next field with volatile write semantics.  (See above
190 <         * about use of putOrderedObject.)
620 >         * Virtualized support for map.get(); overridden in subclasses.
621           */
622 <        final void setNext(HashEntry<K,V> n) {
623 <            UNSAFE.putOrderedObject(this, nextOffset, n);
622 >        Node<K,V> find(int h, Object k) {
623 >            Node<K,V> e = this;
624 >            if (k != null) {
625 >                do {
626 >                    K ek;
627 >                    if (e.hash == h &&
628 >                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
629 >                        return e;
630 >                } while ((e = e.next) != null);
631 >            }
632 >            return null;
633          }
634 +    }
635  
636 <        // Unsafe mechanics
637 <        static final sun.misc.Unsafe UNSAFE;
638 <        static final long nextOffset;
639 <        static {
640 <            try {
641 <                UNSAFE = sun.misc.Unsafe.getUnsafe();
642 <                Class k = HashEntry.class;
643 <                nextOffset = UNSAFE.objectFieldOffset
644 <                    (k.getDeclaredField("next"));
645 <            } catch (Exception e) {
646 <                throw new Error(e);
636 >    /* ---------------- Static utilities -------------- */
637 >
638 >    /**
639 >     * Spreads (XORs) higher bits of hash to lower and also forces top
640 >     * bit to 0. Because the table uses power-of-two masking, sets of
641 >     * hashes that vary only in bits above the current mask will
642 >     * always collide. (Among known examples are sets of Float keys
643 >     * holding consecutive whole numbers in small tables.)  So we
644 >     * apply a transform that spreads the impact of higher bits
645 >     * downward. There is a tradeoff between speed, utility, and
646 >     * quality of bit-spreading. Because many common sets of hashes
647 >     * are already reasonably distributed (so don't benefit from
648 >     * spreading), and because we use trees to handle large sets of
649 >     * collisions in bins, we just XOR some shifted bits in the
650 >     * cheapest possible way to reduce systematic lossage, as well as
651 >     * to incorporate impact of the highest bits that would otherwise
652 >     * never be used in index calculations because of table bounds.
653 >     */
654 >    static final int spread(int h) {
655 >        return (h ^ (h >>> 16)) & HASH_BITS;
656 >    }
657 >
658 >    /**
659 >     * Returns a power of two table size for the given desired capacity.
660 >     * See Hackers Delight, sec 3.2
661 >     */
662 >    private static final int tableSizeFor(int c) {
663 >        int n = c - 1;
664 >        n |= n >>> 1;
665 >        n |= n >>> 2;
666 >        n |= n >>> 4;
667 >        n |= n >>> 8;
668 >        n |= n >>> 16;
669 >        return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
670 >    }
671 >
672 >    /**
673 >     * Returns x's Class if it is of the form "class C implements
674 >     * Comparable<C>", else null.
675 >     */
676 >    static Class<?> comparableClassFor(Object x) {
677 >        if (x instanceof Comparable) {
678 >            Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
679 >            if ((c = x.getClass()) == String.class) // bypass checks
680 >                return c;
681 >            if ((ts = c.getGenericInterfaces()) != null) {
682 >                for (int i = 0; i < ts.length; ++i) {
683 >                    if (((t = ts[i]) instanceof ParameterizedType) &&
684 >                        ((p = (ParameterizedType)t).getRawType() ==
685 >                         Comparable.class) &&
686 >                        (as = p.getActualTypeArguments()) != null &&
687 >                        as.length == 1 && as[0] == c) // type arg is c
688 >                        return c;
689 >                }
690              }
691          }
692 +        return null;
693      }
694  
695      /**
696 <     * Gets the ith element of given table (if nonnull) with volatile
697 <     * read semantics. Note: This is manually integrated into a few
698 <     * performance-sensitive methods to reduce call overhead.
696 >     * Returns k.compareTo(x) if x matches kc (k's screened comparable
697 >     * class), else 0.
698 >     */
699 >    @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
700 >    static int compareComparables(Class<?> kc, Object k, Object x) {
701 >        return (x == null || x.getClass() != kc ? 0 :
702 >                ((Comparable)k).compareTo(x));
703 >    }
704 >
705 >    /* ---------------- Table element access -------------- */
706 >
707 >    /*
708 >     * Volatile access methods are used for table elements as well as
709 >     * elements of in-progress next table while resizing.  All uses of
710 >     * the tab arguments must be null checked by callers.  All callers
711 >     * also paranoically precheck that tab's length is not zero (or an
712 >     * equivalent check), thus ensuring that any index argument taking
713 >     * the form of a hash value anded with (length - 1) is a valid
714 >     * index.  Note that, to be correct wrt arbitrary concurrency
715 >     * errors by users, these checks must operate on local variables,
716 >     * which accounts for some odd-looking inline assignments below.
717 >     * Note that calls to setTabAt always occur within locked regions,
718 >     * and so in principle require only release ordering, not
719 >     * full volatile semantics, but are currently coded as volatile
720 >     * writes to be conservative.
721       */
722 +
723      @SuppressWarnings("unchecked")
724 <    static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
725 <        return (tab == null) ? null :
219 <            (HashEntry<K,V>) UNSAFE.getObjectVolatile
220 <            (tab, ((long)i << TSHIFT) + TBASE);
724 >    static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
725 >        return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
726      }
727  
728 +    static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
729 +                                        Node<K,V> c, Node<K,V> v) {
730 +        return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
731 +    }
732 +
733 +    static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
734 +        U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
735 +    }
736 +
737 +    /* ---------------- Fields -------------- */
738 +
739 +    /**
740 +     * The array of bins. Lazily initialized upon first insertion.
741 +     * Size is always a power of two. Accessed directly by iterators.
742 +     */
743 +    transient volatile Node<K,V>[] table;
744 +
745 +    /**
746 +     * The next table to use; non-null only while resizing.
747 +     */
748 +    private transient volatile Node<K,V>[] nextTable;
749 +
750 +    /**
751 +     * Base counter value, used mainly when there is no contention,
752 +     * but also as a fallback during table initialization
753 +     * races. Updated via CAS.
754 +     */
755 +    private transient volatile long baseCount;
756 +
757 +    /**
758 +     * Table initialization and resizing control.  When negative, the
759 +     * table is being initialized or resized: -1 for initialization,
760 +     * else -(1 + the number of active resizing threads).  Otherwise,
761 +     * when table is null, holds the initial table size to use upon
762 +     * creation, or 0 for default. After initialization, holds the
763 +     * next element count value upon which to resize the table.
764 +     */
765 +    private transient volatile int sizeCtl;
766 +
767 +    /**
768 +     * The next table index (plus one) to split while resizing.
769 +     */
770 +    private transient volatile int transferIndex;
771 +
772 +    /**
773 +     * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
774 +     */
775 +    private transient volatile int cellsBusy;
776 +
777      /**
778 <     * Sets the ith element of given table, with volatile write
225 <     * semantics. (See above about use of putOrderedObject.)
778 >     * Table of counter cells. When non-null, size is a power of 2.
779       */
780 <    static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
781 <                                       HashEntry<K,V> e) {
782 <        UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
780 >    private transient volatile CounterCell[] counterCells;
781 >
782 >    // views
783 >    private transient KeySetView<K,V> keySet;
784 >    private transient ValuesView<K,V> values;
785 >    private transient EntrySetView<K,V> entrySet;
786 >
787 >
788 >    /* ---------------- Public operations -------------- */
789 >
790 >    /**
791 >     * Creates a new, empty map with the default initial table size (16).
792 >     */
793 >    public ConcurrentHashMap() {
794      }
795  
796      /**
797 <     * Applies a supplemental hash function to a given hashCode, which
798 <     * defends against poor quality hash functions.  This is critical
799 <     * because ConcurrentHashMap uses power-of-two length hash tables,
800 <     * that otherwise encounter collisions for hashCodes that do not
801 <     * differ in lower or upper bits.
797 >     * Creates a new, empty map with an initial table size
798 >     * accommodating the specified number of elements without the need
799 >     * to dynamically resize.
800 >     *
801 >     * @param initialCapacity The implementation performs internal
802 >     * sizing to accommodate this many elements.
803 >     * @throws IllegalArgumentException if the initial capacity of
804 >     * elements is negative
805       */
806 <    private static int hash(int h) {
807 <        // Spread bits to regularize both segment and index locations,
808 <        // using variant of single-word Wang/Jenkins hash.
809 <        h += (h <<  15) ^ 0xffffcd7d;
810 <        h ^= (h >>> 10);
811 <        h += (h <<   3);
812 <        h ^= (h >>>  6);
246 <        h += (h <<   2) + (h << 14);
247 <        return h ^ (h >>> 16);
806 >    public ConcurrentHashMap(int initialCapacity) {
807 >        if (initialCapacity < 0)
808 >            throw new IllegalArgumentException();
809 >        int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
810 >                   MAXIMUM_CAPACITY :
811 >                   tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
812 >        this.sizeCtl = cap;
813      }
814  
815      /**
816 <     * Segments are specialized versions of hash tables.  This
817 <     * subclasses from ReentrantLock opportunistically, just to
818 <     * simplify some locking and avoid separate construction.
816 >     * Creates a new map with the same mappings as the given map.
817 >     *
818 >     * @param m the map
819       */
820 <    static final class Segment<K,V> extends ReentrantLock implements Serializable {
821 <        /*
822 <         * Segments maintain a table of entry lists that are always
823 <         * kept in a consistent state, so can be read (via volatile
259 <         * reads of segments and tables) without locking.  This
260 <         * requires replicating nodes when necessary during table
261 <         * resizing, so the old lists can be traversed by readers
262 <         * still using old version of table.
263 <         *
264 <         * This class defines only mutative methods requiring locking.
265 <         * Except as noted, the methods of this class perform the
266 <         * per-segment versions of ConcurrentHashMap methods.  (Other
267 <         * methods are integrated directly into ConcurrentHashMap
268 <         * methods.) These mutative methods use a form of controlled
269 <         * spinning on contention via methods scanAndLock and
270 <         * scanAndLockForPut. These intersperse tryLocks with
271 <         * traversals to locate nodes.  The main benefit is to absorb
272 <         * cache misses (which are very common for hash tables) while
273 <         * obtaining locks so that traversal is faster once
274 <         * acquired. We do not actually use the found nodes since they
275 <         * must be re-acquired under lock anyway to ensure sequential
276 <         * consistency of updates (and in any case may be undetectably
277 <         * stale), but they will normally be much faster to re-locate.
278 <         * Also, scanAndLockForPut speculatively creates a fresh node
279 <         * to use in put if no node is found.
280 <         */
820 >    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
821 >        this.sizeCtl = DEFAULT_CAPACITY;
822 >        putAll(m);
823 >    }
824  
825 <        private static final long serialVersionUID = 2249069246763182397L;
825 >    /**
826 >     * Creates a new, empty map with an initial table size based on
827 >     * the given number of elements ({@code initialCapacity}) and
828 >     * initial table density ({@code loadFactor}).
829 >     *
830 >     * @param initialCapacity the initial capacity. The implementation
831 >     * performs internal sizing to accommodate this many elements,
832 >     * given the specified load factor.
833 >     * @param loadFactor the load factor (table density) for
834 >     * establishing the initial table size
835 >     * @throws IllegalArgumentException if the initial capacity of
836 >     * elements is negative or the load factor is nonpositive
837 >     *
838 >     * @since 1.6
839 >     */
840 >    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
841 >        this(initialCapacity, loadFactor, 1);
842 >    }
843  
844 <        /**
845 <         * The maximum number of times to tryLock in a prescan before
846 <         * possibly blocking on acquire in preparation for a locked
847 <         * segment operation. On multiprocessors, using a bounded
848 <         * number of retries maintains cache acquired while locating
849 <         * nodes.
850 <         */
851 <        static final int MAX_SCAN_RETRIES =
852 <            Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
844 >    /**
845 >     * Creates a new, empty map with an initial table size based on
846 >     * the given number of elements ({@code initialCapacity}), table
847 >     * density ({@code loadFactor}), and number of concurrently
848 >     * updating threads ({@code concurrencyLevel}).
849 >     *
850 >     * @param initialCapacity the initial capacity. The implementation
851 >     * performs internal sizing to accommodate this many elements,
852 >     * given the specified load factor.
853 >     * @param loadFactor the load factor (table density) for
854 >     * establishing the initial table size
855 >     * @param concurrencyLevel the estimated number of concurrently
856 >     * updating threads. The implementation may use this value as
857 >     * a sizing hint.
858 >     * @throws IllegalArgumentException if the initial capacity is
859 >     * negative or the load factor or concurrencyLevel are
860 >     * nonpositive
861 >     */
862 >    public ConcurrentHashMap(int initialCapacity,
863 >                             float loadFactor, int concurrencyLevel) {
864 >        if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
865 >            throw new IllegalArgumentException();
866 >        if (initialCapacity < concurrencyLevel)   // Use at least as many bins
867 >            initialCapacity = concurrencyLevel;   // as estimated threads
868 >        long size = (long)(1.0 + (long)initialCapacity / loadFactor);
869 >        int cap = (size >= (long)MAXIMUM_CAPACITY) ?
870 >            MAXIMUM_CAPACITY : tableSizeFor((int)size);
871 >        this.sizeCtl = cap;
872 >    }
873  
874 <        /**
295 <         * The per-segment table. Elements are accessed via
296 <         * entryAt/setEntryAt providing volatile semantics.
297 <         */
298 <        transient volatile HashEntry<K,V>[] table;
874 >    // Original (since JDK1.2) Map methods
875  
876 <        /**
877 <         * The number of elements. Accessed only either within locks
878 <         * or among other volatile reads that maintain visibility.
879 <         */
880 <        transient int count;
876 >    /**
877 >     * {@inheritDoc}
878 >     */
879 >    public int size() {
880 >        long n = sumCount();
881 >        return ((n < 0L) ? 0 :
882 >                (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
883 >                (int)n);
884 >    }
885  
886 <        /**
887 <         * The total number of mutative operations in this segment.
888 <         * Even though this may overflows 32 bits, it provides
889 <         * sufficient accuracy for stability checks in CHM isEmpty()
890 <         * and size() methods.  Accessed only either within locks or
891 <         * among other volatile reads that maintain visibility.
312 <         */
313 <        transient int modCount;
886 >    /**
887 >     * {@inheritDoc}
888 >     */
889 >    public boolean isEmpty() {
890 >        return sumCount() <= 0L; // ignore transient negative values
891 >    }
892  
893 <        /**
894 <         * The table is rehashed when its size exceeds this threshold.
895 <         * (The value of this field is always <tt>(int)(capacity *
896 <         * loadFactor)</tt>.)
897 <         */
898 <        transient int threshold;
893 >    /**
894 >     * Returns the value to which the specified key is mapped,
895 >     * or {@code null} if this map contains no mapping for the key.
896 >     *
897 >     * <p>More formally, if this map contains a mapping from a key
898 >     * {@code k} to a value {@code v} such that {@code key.equals(k)},
899 >     * then this method returns {@code v}; otherwise it returns
900 >     * {@code null}.  (There can be at most one such mapping.)
901 >     *
902 >     * @throws NullPointerException if the specified key is null
903 >     */
904 >    public V get(Object key) {
905 >        Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
906 >        int h = spread(key.hashCode());
907 >        if ((tab = table) != null && (n = tab.length) > 0 &&
908 >            (e = tabAt(tab, (n - 1) & h)) != null) {
909 >            if ((eh = e.hash) == h) {
910 >                if ((ek = e.key) == key || (ek != null && key.equals(ek)))
911 >                    return e.val;
912 >            }
913 >            else if (eh < 0)
914 >                return (p = e.find(h, key)) != null ? p.val : null;
915 >            while ((e = e.next) != null) {
916 >                if (e.hash == h &&
917 >                    ((ek = e.key) == key || (ek != null && key.equals(ek))))
918 >                    return e.val;
919 >            }
920 >        }
921 >        return null;
922 >    }
923  
924 <        /**
925 <         * The load factor for the hash table.  Even though this value
926 <         * is same for all segments, it is replicated to avoid needing
927 <         * links to outer object.
928 <         * @serial
929 <         */
930 <        final float loadFactor;
924 >    /**
925 >     * Tests if the specified object is a key in this table.
926 >     *
927 >     * @param  key possible key
928 >     * @return {@code true} if and only if the specified object
929 >     *         is a key in this table, as determined by the
930 >     *         {@code equals} method; {@code false} otherwise
931 >     * @throws NullPointerException if the specified key is null
932 >     */
933 >    public boolean containsKey(Object key) {
934 >        return get(key) != null;
935 >    }
936  
937 <        Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
938 <            this.loadFactor = lf;
939 <            this.threshold = threshold;
940 <            this.table = tab;
937 >    /**
938 >     * Returns {@code true} if this map maps one or more keys to the
939 >     * specified value. Note: This method may require a full traversal
940 >     * of the map, and is much slower than method {@code containsKey}.
941 >     *
942 >     * @param value value whose presence in this map is to be tested
943 >     * @return {@code true} if this map maps one or more keys to the
944 >     *         specified value
945 >     * @throws NullPointerException if the specified value is null
946 >     */
947 >    public boolean containsValue(Object value) {
948 >        if (value == null)
949 >            throw new NullPointerException();
950 >        Node<K,V>[] t;
951 >        if ((t = table) != null) {
952 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
953 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
954 >                V v;
955 >                if ((v = p.val) == value || (v != null && value.equals(v)))
956 >                    return true;
957 >            }
958          }
959 +        return false;
960 +    }
961  
962 <        final V put(K key, int hash, V value, boolean onlyIfAbsent) {
963 <            HashEntry<K,V> node = tryLock() ? null :
964 <                scanAndLockForPut(key, hash, value);
965 <            V oldValue;
966 <            try {
967 <                HashEntry<K,V>[] tab = table;
968 <                int index = (tab.length - 1) & hash;
969 <                HashEntry<K,V> first = entryAt(tab, index);
970 <                for (HashEntry<K,V> e = first;;) {
971 <                    if (e != null) {
972 <                        K k;
973 <                        if ((k = e.key) == key ||
974 <                            (e.hash == hash && key.equals(k))) {
975 <                            oldValue = e.value;
976 <                            if (!onlyIfAbsent) {
977 <                                e.value = value;
978 <                                ++modCount;
962 >    /**
963 >     * Maps the specified key to the specified value in this table.
964 >     * Neither the key nor the value can be null.
965 >     *
966 >     * <p>The value can be retrieved by calling the {@code get} method
967 >     * with a key that is equal to the original key.
968 >     *
969 >     * @param key key with which the specified value is to be associated
970 >     * @param value value to be associated with the specified key
971 >     * @return the previous value associated with {@code key}, or
972 >     *         {@code null} if there was no mapping for {@code key}
973 >     * @throws NullPointerException if the specified key or value is null
974 >     */
975 >    public V put(K key, V value) {
976 >        return putVal(key, value, false);
977 >    }
978 >
979 >    /** Implementation for put and putIfAbsent */
980 >    final V putVal(K key, V value, boolean onlyIfAbsent) {
981 >        if (key == null || value == null) throw new NullPointerException();
982 >        int hash = spread(key.hashCode());
983 >        int binCount = 0;
984 >        for (Node<K,V>[] tab = table;;) {
985 >            Node<K,V> f; int n, i, fh;
986 >            if (tab == null || (n = tab.length) == 0)
987 >                tab = initTable();
988 >            else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
989 >                if (casTabAt(tab, i, null,
990 >                             new Node<K,V>(hash, key, value, null)))
991 >                    break;                   // no lock when adding to empty bin
992 >            }
993 >            else if ((fh = f.hash) == MOVED)
994 >                tab = helpTransfer(tab, f);
995 >            else {
996 >                V oldVal = null;
997 >                synchronized (f) {
998 >                    if (tabAt(tab, i) == f) {
999 >                        if (fh >= 0) {
1000 >                            binCount = 1;
1001 >                            for (Node<K,V> e = f;; ++binCount) {
1002 >                                K ek;
1003 >                                if (e.hash == hash &&
1004 >                                    ((ek = e.key) == key ||
1005 >                                     (ek != null && key.equals(ek)))) {
1006 >                                    oldVal = e.val;
1007 >                                    if (!onlyIfAbsent)
1008 >                                        e.val = value;
1009 >                                    break;
1010 >                                }
1011 >                                Node<K,V> pred = e;
1012 >                                if ((e = e.next) == null) {
1013 >                                    pred.next = new Node<K,V>(hash, key,
1014 >                                                              value, null);
1015 >                                    break;
1016 >                                }
1017                              }
354                            break;
1018                          }
1019 <                        e = e.next;
1020 <                    }
1021 <                    else {
1022 <                        if (node != null)
1023 <                            node.setNext(first);
1024 <                        else
1025 <                            node = new HashEntry<K,V>(hash, key, value, first);
1026 <                        int c = count + 1;
1027 <                        if (c > threshold && tab.length < MAXIMUM_CAPACITY)
1028 <                            rehash(node);
1029 <                        else
1030 <                            setEntryAt(tab, index, node);
368 <                        ++modCount;
369 <                        count = c;
370 <                        oldValue = null;
371 <                        break;
372 <                    }
373 <                }
374 <            } finally {
375 <                unlock();
376 <            }
377 <            return oldValue;
378 <        }
379 <
380 <        /**
381 <         * Doubles size of table and repacks entries, also adding the
382 <         * given node to new table
383 <         */
384 <        @SuppressWarnings("unchecked")
385 <        private void rehash(HashEntry<K,V> node) {
386 <            /*
387 <             * Reclassify nodes in each list to new table.  Because we
388 <             * are using power-of-two expansion, the elements from
389 <             * each bin must either stay at same index, or move with a
390 <             * power of two offset. We eliminate unnecessary node
391 <             * creation by catching cases where old nodes can be
392 <             * reused because their next fields won't change.
393 <             * Statistically, at the default threshold, only about
394 <             * one-sixth of them need cloning when a table
395 <             * doubles. The nodes they replace will be garbage
396 <             * collectable as soon as they are no longer referenced by
397 <             * any reader thread that may be in the midst of
398 <             * concurrently traversing table. Entry accesses use plain
399 <             * array indexing because they are followed by volatile
400 <             * table write.
401 <             */
402 <            HashEntry<K,V>[] oldTable = table;
403 <            int oldCapacity = oldTable.length;
404 <            int newCapacity = oldCapacity << 1;
405 <            threshold = (int)(newCapacity * loadFactor);
406 <            HashEntry<K,V>[] newTable =
407 <                (HashEntry<K,V>[]) new HashEntry[newCapacity];
408 <            int sizeMask = newCapacity - 1;
409 <            for (int i = 0; i < oldCapacity ; i++) {
410 <                HashEntry<K,V> e = oldTable[i];
411 <                if (e != null) {
412 <                    HashEntry<K,V> next = e.next;
413 <                    int idx = e.hash & sizeMask;
414 <                    if (next == null)   //  Single node on list
415 <                        newTable[idx] = e;
416 <                    else { // Reuse consecutive sequence at same slot
417 <                        HashEntry<K,V> lastRun = e;
418 <                        int lastIdx = idx;
419 <                        for (HashEntry<K,V> last = next;
420 <                             last != null;
421 <                             last = last.next) {
422 <                            int k = last.hash & sizeMask;
423 <                            if (k != lastIdx) {
424 <                                lastIdx = k;
425 <                                lastRun = last;
426 <                            }
427 <                        }
428 <                        newTable[lastIdx] = lastRun;
429 <                        // Clone remaining nodes
430 <                        for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
431 <                            V v = p.value;
432 <                            int h = p.hash;
433 <                            int k = h & sizeMask;
434 <                            HashEntry<K,V> n = newTable[k];
435 <                            newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
436 <                        }
437 <                    }
438 <                }
439 <            }
440 <            int nodeIndex = node.hash & sizeMask; // add the new node
441 <            node.setNext(newTable[nodeIndex]);
442 <            newTable[nodeIndex] = node;
443 <            table = newTable;
444 <        }
445 <
446 <        /**
447 <         * Scans for a node containing given key while trying to
448 <         * acquire lock, creating and returning one if not found. Upon
449 <         * return, guarantees that lock is held. Unlike in most
450 <         * methods, calls to method equals are not screened: Since
451 <         * traversal speed doesn't matter, we might as well help warm
452 <         * up the associated code and accesses as well.
453 <         *
454 <         * @return a new node if key not found, else null
455 <         */
456 <        private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
457 <            HashEntry<K,V> first = entryForHash(this, hash);
458 <            HashEntry<K,V> e = first;
459 <            HashEntry<K,V> node = null;
460 <            int retries = -1; // negative while locating node
461 <            while (!tryLock()) {
462 <                HashEntry<K,V> f; // to recheck first below
463 <                if (retries < 0) {
464 <                    if (e == null) {
465 <                        if (node == null) // speculatively create node
466 <                            node = new HashEntry<K,V>(hash, key, value, null);
467 <                        retries = 0;
1019 >                        else if (f instanceof TreeBin) {
1020 >                            Node<K,V> p;
1021 >                            binCount = 2;
1022 >                            if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1023 >                                                           value)) != null) {
1024 >                                oldVal = p.val;
1025 >                                if (!onlyIfAbsent)
1026 >                                    p.val = value;
1027 >                            }
1028 >                        }
1029 >                        else if (f instanceof ReservationNode)
1030 >                            throw new IllegalStateException("Recursive update");
1031                      }
469                    else if (key.equals(e.key))
470                        retries = 0;
471                    else
472                        e = e.next;
473                }
474                else if (++retries > MAX_SCAN_RETRIES) {
475                    lock();
476                    break;
477                }
478                else if ((retries & 1) == 0 &&
479                         (f = entryForHash(this, hash)) != first) {
480                    e = first = f; // re-traverse if entry changed
481                    retries = -1;
482                }
483            }
484            return node;
485        }
486
487        /**
488         * Scans for a node containing the given key while trying to
489         * acquire lock for a remove or replace operation. Upon
490         * return, guarantees that lock is held.  Note that we must
491         * lock even if the key is not found, to ensure sequential
492         * consistency of updates.
493         */
494        private void scanAndLock(Object key, int hash) {
495            // similar to but simpler than scanAndLockForPut
496            HashEntry<K,V> first = entryForHash(this, hash);
497            HashEntry<K,V> e = first;
498            int retries = -1;
499            while (!tryLock()) {
500                HashEntry<K,V> f;
501                if (retries < 0) {
502                    if (e == null || key.equals(e.key))
503                        retries = 0;
504                    else
505                        e = e.next;
1032                  }
1033 <                else if (++retries > MAX_SCAN_RETRIES) {
1034 <                    lock();
1033 >                if (binCount != 0) {
1034 >                    if (binCount >= TREEIFY_THRESHOLD)
1035 >                        treeifyBin(tab, i);
1036 >                    if (oldVal != null)
1037 >                        return oldVal;
1038                      break;
1039                  }
511                else if ((retries & 1) == 0 &&
512                         (f = entryForHash(this, hash)) != first) {
513                    e = first = f;
514                    retries = -1;
515                }
1040              }
1041          }
1042 +        addCount(1L, binCount);
1043 +        return null;
1044 +    }
1045  
1046 <        /**
1047 <         * Remove; match on key only if value null, else match both.
1048 <         */
1049 <        final V remove(Object key, int hash, Object value) {
1050 <            if (!tryLock())
1051 <                scanAndLock(key, hash);
1052 <            V oldValue = null;
1053 <            try {
1054 <                HashEntry<K,V>[] tab = table;
1055 <                int index = (tab.length - 1) & hash;
1056 <                HashEntry<K,V> e = entryAt(tab, index);
1057 <                HashEntry<K,V> pred = null;
1058 <                while (e != null) {
1059 <                    K k;
1060 <                    HashEntry<K,V> next = e.next;
1061 <                    if ((k = e.key) == key ||
1062 <                        (e.hash == hash && key.equals(k))) {
1063 <                        V v = e.value;
1064 <                        if (value == null || value == v || value.equals(v)) {
1065 <                            if (pred == null)
1066 <                                setEntryAt(tab, index, next);
1067 <                            else
1068 <                                pred.setNext(next);
1069 <                            ++modCount;
1070 <                            --count;
1071 <                            oldValue = v;
1046 >    /**
1047 >     * Copies all of the mappings from the specified map to this one.
1048 >     * These mappings replace any mappings that this map had for any of the
1049 >     * keys currently in the specified map.
1050 >     *
1051 >     * @param m mappings to be stored in this map
1052 >     */
1053 >    public void putAll(Map<? extends K, ? extends V> m) {
1054 >        tryPresize(m.size());
1055 >        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1056 >            putVal(e.getKey(), e.getValue(), false);
1057 >    }
1058 >
1059 >    /**
1060 >     * Removes the key (and its corresponding value) from this map.
1061 >     * This method does nothing if the key is not in the map.
1062 >     *
1063 >     * @param  key the key that needs to be removed
1064 >     * @return the previous value associated with {@code key}, or
1065 >     *         {@code null} if there was no mapping for {@code key}
1066 >     * @throws NullPointerException if the specified key is null
1067 >     */
1068 >    public V remove(Object key) {
1069 >        return replaceNode(key, null, null);
1070 >    }
1071 >
1072 >    /**
1073 >     * Implementation for the four public remove/replace methods:
1074 >     * Replaces node value with v, conditional upon match of cv if
1075 >     * non-null.  If resulting value is null, delete.
1076 >     */
1077 >    final V replaceNode(Object key, V value, Object cv) {
1078 >        int hash = spread(key.hashCode());
1079 >        for (Node<K,V>[] tab = table;;) {
1080 >            Node<K,V> f; int n, i, fh;
1081 >            if (tab == null || (n = tab.length) == 0 ||
1082 >                (f = tabAt(tab, i = (n - 1) & hash)) == null)
1083 >                break;
1084 >            else if ((fh = f.hash) == MOVED)
1085 >                tab = helpTransfer(tab, f);
1086 >            else {
1087 >                V oldVal = null;
1088 >                boolean validated = false;
1089 >                synchronized (f) {
1090 >                    if (tabAt(tab, i) == f) {
1091 >                        if (fh >= 0) {
1092 >                            validated = true;
1093 >                            for (Node<K,V> e = f, pred = null;;) {
1094 >                                K ek;
1095 >                                if (e.hash == hash &&
1096 >                                    ((ek = e.key) == key ||
1097 >                                     (ek != null && key.equals(ek)))) {
1098 >                                    V ev = e.val;
1099 >                                    if (cv == null || cv == ev ||
1100 >                                        (ev != null && cv.equals(ev))) {
1101 >                                        oldVal = ev;
1102 >                                        if (value != null)
1103 >                                            e.val = value;
1104 >                                        else if (pred != null)
1105 >                                            pred.next = e.next;
1106 >                                        else
1107 >                                            setTabAt(tab, i, e.next);
1108 >                                    }
1109 >                                    break;
1110 >                                }
1111 >                                pred = e;
1112 >                                if ((e = e.next) == null)
1113 >                                    break;
1114 >                            }
1115                          }
1116 <                        break;
1116 >                        else if (f instanceof TreeBin) {
1117 >                            validated = true;
1118 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1119 >                            TreeNode<K,V> r, p;
1120 >                            if ((r = t.root) != null &&
1121 >                                (p = r.findTreeNode(hash, key, null)) != null) {
1122 >                                V pv = p.val;
1123 >                                if (cv == null || cv == pv ||
1124 >                                    (pv != null && cv.equals(pv))) {
1125 >                                    oldVal = pv;
1126 >                                    if (value != null)
1127 >                                        p.val = value;
1128 >                                    else if (t.removeTreeNode(p))
1129 >                                        setTabAt(tab, i, untreeify(t.first));
1130 >                                }
1131 >                            }
1132 >                        }
1133 >                        else if (f instanceof ReservationNode)
1134 >                            throw new IllegalStateException("Recursive update");
1135                      }
548                    pred = e;
549                    e = next;
1136                  }
1137 <            } finally {
1138 <                unlock();
1139 <            }
1140 <            return oldValue;
1141 <        }
556 <
557 <        final boolean replace(K key, int hash, V oldValue, V newValue) {
558 <            if (!tryLock())
559 <                scanAndLock(key, hash);
560 <            boolean replaced = false;
561 <            try {
562 <                HashEntry<K,V> e;
563 <                for (e = entryForHash(this, hash); e != null; e = e.next) {
564 <                    K k;
565 <                    if ((k = e.key) == key ||
566 <                        (e.hash == hash && key.equals(k))) {
567 <                        if (oldValue.equals(e.value)) {
568 <                            e.value = newValue;
569 <                            ++modCount;
570 <                            replaced = true;
571 <                        }
572 <                        break;
1137 >                if (validated) {
1138 >                    if (oldVal != null) {
1139 >                        if (value == null)
1140 >                            addCount(-1L, -1);
1141 >                        return oldVal;
1142                      }
1143 +                    break;
1144                  }
575            } finally {
576                unlock();
1145              }
578            return replaced;
1146          }
1147 +        return null;
1148 +    }
1149  
1150 <        final V replace(K key, int hash, V value) {
1151 <            if (!tryLock())
1152 <                scanAndLock(key, hash);
1153 <            V oldValue = null;
1154 <            try {
1155 <                HashEntry<K,V> e;
1156 <                for (e = entryForHash(this, hash); e != null; e = e.next) {
1157 <                    K k;
1158 <                    if ((k = e.key) == key ||
1159 <                        (e.hash == hash && key.equals(k))) {
1160 <                        oldValue = e.value;
1161 <                        e.value = value;
1162 <                        ++modCount;
1163 <                        break;
1150 >    /**
1151 >     * Removes all of the mappings from this map.
1152 >     */
1153 >    public void clear() {
1154 >        long delta = 0L; // negative number of deletions
1155 >        int i = 0;
1156 >        Node<K,V>[] tab = table;
1157 >        while (tab != null && i < tab.length) {
1158 >            int fh;
1159 >            Node<K,V> f = tabAt(tab, i);
1160 >            if (f == null)
1161 >                ++i;
1162 >            else if ((fh = f.hash) == MOVED) {
1163 >                tab = helpTransfer(tab, f);
1164 >                i = 0; // restart
1165 >            }
1166 >            else {
1167 >                synchronized (f) {
1168 >                    if (tabAt(tab, i) == f) {
1169 >                        Node<K,V> p = (fh >= 0 ? f :
1170 >                                       (f instanceof TreeBin) ?
1171 >                                       ((TreeBin<K,V>)f).first : null);
1172 >                        while (p != null) {
1173 >                            --delta;
1174 >                            p = p.next;
1175 >                        }
1176 >                        setTabAt(tab, i++, null);
1177                      }
1178                  }
597            } finally {
598                unlock();
1179              }
600            return oldValue;
1180          }
1181 +        if (delta != 0L)
1182 +            addCount(delta, -1);
1183 +    }
1184  
1185 <        final void clear() {
1186 <            lock();
1187 <            try {
1188 <                HashEntry<K,V>[] tab = table;
1189 <                for (int i = 0; i < tab.length ; i++)
1190 <                    setEntryAt(tab, i, null);
1191 <                ++modCount;
1192 <                count = 0;
1193 <            } finally {
1194 <                unlock();
1195 <            }
1196 <        }
1185 >    /**
1186 >     * Returns a {@link Set} view of the keys contained in this map.
1187 >     * The set is backed by the map, so changes to the map are
1188 >     * reflected in the set, and vice-versa. The set supports element
1189 >     * removal, which removes the corresponding mapping from this map,
1190 >     * via the {@code Iterator.remove}, {@code Set.remove},
1191 >     * {@code removeAll}, {@code retainAll}, and {@code clear}
1192 >     * operations.  It does not support the {@code add} or
1193 >     * {@code addAll} operations.
1194 >     *
1195 >     * <p>The view's iterators and spliterators are
1196 >     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1197 >     *
1198 >     * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1199 >     * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1200 >     *
1201 >     * @return the set view
1202 >     */
1203 >    public KeySetView<K,V> keySet() {
1204 >        KeySetView<K,V> ks;
1205 >        return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1206      }
1207  
1208 <    // Accessing segments
1208 >    /**
1209 >     * Returns a {@link Collection} view of the values contained in this map.
1210 >     * The collection is backed by the map, so changes to the map are
1211 >     * reflected in the collection, and vice-versa.  The collection
1212 >     * supports element removal, which removes the corresponding
1213 >     * mapping from this map, via the {@code Iterator.remove},
1214 >     * {@code Collection.remove}, {@code removeAll},
1215 >     * {@code retainAll}, and {@code clear} operations.  It does not
1216 >     * support the {@code add} or {@code addAll} operations.
1217 >     *
1218 >     * <p>The view's iterators and spliterators are
1219 >     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1220 >     *
1221 >     * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1222 >     * and {@link Spliterator#NONNULL}.
1223 >     *
1224 >     * @return the collection view
1225 >     */
1226 >    public Collection<V> values() {
1227 >        ValuesView<K,V> vs;
1228 >        return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1229 >    }
1230 >
1231 >    /**
1232 >     * Returns a {@link Set} view of the mappings contained in this map.
1233 >     * The set is backed by the map, so changes to the map are
1234 >     * reflected in the set, and vice-versa.  The set supports element
1235 >     * removal, which removes the corresponding mapping from the map,
1236 >     * via the {@code Iterator.remove}, {@code Set.remove},
1237 >     * {@code removeAll}, {@code retainAll}, and {@code clear}
1238 >     * operations.
1239 >     *
1240 >     * <p>The view's iterators and spliterators are
1241 >     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1242 >     *
1243 >     * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1244 >     * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1245 >     *
1246 >     * @return the set view
1247 >     */
1248 >    public Set<Map.Entry<K,V>> entrySet() {
1249 >        EntrySetView<K,V> es;
1250 >        return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1251 >    }
1252  
1253      /**
1254 <     * Gets the jth element of given segment array (if nonnull) with
1255 <     * volatile element access semantics via Unsafe. (The null check
1256 <     * can trigger harmlessly only during deserialization.) Note:
1257 <     * because each element of segments array is set only once (using
1258 <     * fully ordered writes), some performance-sensitive methods rely
625 <     * on this method only as a recheck upon null reads.
1254 >     * Returns the hash code value for this {@link Map}, i.e.,
1255 >     * the sum of, for each key-value pair in the map,
1256 >     * {@code key.hashCode() ^ value.hashCode()}.
1257 >     *
1258 >     * @return the hash code value for this map
1259       */
1260 <    @SuppressWarnings("unchecked")
1261 <    static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
1262 <        long u = (j << SSHIFT) + SBASE;
1263 <        return ss == null ? null :
1264 <            (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
1260 >    public int hashCode() {
1261 >        int h = 0;
1262 >        Node<K,V>[] t;
1263 >        if ((t = table) != null) {
1264 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1265 >            for (Node<K,V> p; (p = it.advance()) != null; )
1266 >                h += p.key.hashCode() ^ p.val.hashCode();
1267 >        }
1268 >        return h;
1269      }
1270  
1271      /**
1272 <     * Returns the segment for the given index, creating it and
1273 <     * recording in segment table (via CAS) if not already present.
1272 >     * Returns a string representation of this map.  The string
1273 >     * representation consists of a list of key-value mappings (in no
1274 >     * particular order) enclosed in braces ("{@code {}}").  Adjacent
1275 >     * mappings are separated by the characters {@code ", "} (comma
1276 >     * and space).  Each key-value mapping is rendered as the key
1277 >     * followed by an equals sign ("{@code =}") followed by the
1278 >     * associated value.
1279       *
1280 <     * @param k the index
639 <     * @return the segment
1280 >     * @return a string representation of this map
1281       */
1282 <    @SuppressWarnings("unchecked")
1283 <    private Segment<K,V> ensureSegment(int k) {
1284 <        final Segment<K,V>[] ss = this.segments;
1285 <        long u = (k << SSHIFT) + SBASE; // raw offset
1286 <        Segment<K,V> seg;
1287 <        if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
1288 <            Segment<K,V> proto = ss[0]; // use segment 0 as prototype
1289 <            int cap = proto.table.length;
1290 <            float lf = proto.loadFactor;
1291 <            int threshold = (int)(cap * lf);
1292 <            HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
1293 <            if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
1294 <                == null) { // recheck
1295 <                Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
1296 <                while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
1297 <                       == null) {
1298 <                    if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
658 <                        break;
659 <                }
1282 >    public String toString() {
1283 >        Node<K,V>[] t;
1284 >        int f = (t = table) == null ? 0 : t.length;
1285 >        Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1286 >        StringBuilder sb = new StringBuilder();
1287 >        sb.append('{');
1288 >        Node<K,V> p;
1289 >        if ((p = it.advance()) != null) {
1290 >            for (;;) {
1291 >                K k = p.key;
1292 >                V v = p.val;
1293 >                sb.append(k == this ? "(this Map)" : k);
1294 >                sb.append('=');
1295 >                sb.append(v == this ? "(this Map)" : v);
1296 >                if ((p = it.advance()) == null)
1297 >                    break;
1298 >                sb.append(',').append(' ');
1299              }
1300          }
1301 <        return seg;
1301 >        return sb.append('}').toString();
1302      }
1303  
665    // Hash-based segment and entry accesses
666
1304      /**
1305 <     * Gets the segment for the given hash code.
1305 >     * Compares the specified object with this map for equality.
1306 >     * Returns {@code true} if the given object is a map with the same
1307 >     * mappings as this map.  This operation may return misleading
1308 >     * results if either map is concurrently modified during execution
1309 >     * of this method.
1310 >     *
1311 >     * @param o object to be compared for equality with this map
1312 >     * @return {@code true} if the specified object is equal to this map
1313       */
1314 <    @SuppressWarnings("unchecked")
1315 <    private Segment<K,V> segmentForHash(int h) {
1316 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1317 <        return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u);
1314 >    public boolean equals(Object o) {
1315 >        if (o != this) {
1316 >            if (!(o instanceof Map))
1317 >                return false;
1318 >            Map<?,?> m = (Map<?,?>) o;
1319 >            Node<K,V>[] t;
1320 >            int f = (t = table) == null ? 0 : t.length;
1321 >            Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1322 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1323 >                V val = p.val;
1324 >                Object v = m.get(p.key);
1325 >                if (v == null || (v != val && !v.equals(val)))
1326 >                    return false;
1327 >            }
1328 >            for (Map.Entry<?,?> e : m.entrySet()) {
1329 >                Object mk, mv, v;
1330 >                if ((mk = e.getKey()) == null ||
1331 >                    (mv = e.getValue()) == null ||
1332 >                    (v = get(mk)) == null ||
1333 >                    (mv != v && !mv.equals(v)))
1334 >                    return false;
1335 >            }
1336 >        }
1337 >        return true;
1338      }
1339  
1340      /**
1341 <     * Gets the table entry for the given segment and hash code.
1341 >     * Stripped-down version of helper class used in previous version,
1342 >     * declared for the sake of serialization compatibility
1343       */
1344 <    @SuppressWarnings("unchecked")
1345 <    static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
1346 <        HashEntry<K,V>[] tab;
1347 <        return (seg == null || (tab = seg.table) == null) ? null :
683 <            (HashEntry<K,V>) UNSAFE.getObjectVolatile
684 <            (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1344 >    static class Segment<K,V> extends ReentrantLock implements Serializable {
1345 >        private static final long serialVersionUID = 2249069246763182397L;
1346 >        final float loadFactor;
1347 >        Segment(float lf) { this.loadFactor = lf; }
1348      }
1349  
687    /* ---------------- Public operations -------------- */
688
1350      /**
1351 <     * Creates a new, empty map with the specified initial
1352 <     * capacity, load factor and concurrency level.
1353 <     *
1354 <     * @param initialCapacity the initial capacity. The implementation
1355 <     * performs internal sizing to accommodate this many elements.
1356 <     * @param loadFactor  the load factor threshold, used to control resizing.
1357 <     * Resizing may be performed when the average number of elements per
1358 <     * bin exceeds this threshold.
698 <     * @param concurrencyLevel the estimated number of concurrently
699 <     * updating threads. The implementation performs internal sizing
700 <     * to try to accommodate this many threads.
701 <     * @throws IllegalArgumentException if the initial capacity is
702 <     * negative or the load factor or concurrencyLevel are
703 <     * nonpositive.
1351 >     * Saves the state of the {@code ConcurrentHashMap} instance to a
1352 >     * stream (i.e., serializes it).
1353 >     * @param s the stream
1354 >     * @throws java.io.IOException if an I/O error occurs
1355 >     * @serialData
1356 >     * the key (Object) and value (Object)
1357 >     * for each key-value mapping, followed by a null pair.
1358 >     * The key-value mappings are emitted in no particular order.
1359       */
1360 <    @SuppressWarnings("unchecked")
1361 <    public ConcurrentHashMap(int initialCapacity,
1362 <                             float loadFactor, int concurrencyLevel) {
1363 <        if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
709 <            throw new IllegalArgumentException();
710 <        if (concurrencyLevel > MAX_SEGMENTS)
711 <            concurrencyLevel = MAX_SEGMENTS;
712 <        // Find power-of-two sizes best matching arguments
1360 >    private void writeObject(java.io.ObjectOutputStream s)
1361 >        throws java.io.IOException {
1362 >        // For serialization compatibility
1363 >        // Emulate segment calculation from previous version of this class
1364          int sshift = 0;
1365          int ssize = 1;
1366 <        while (ssize < concurrencyLevel) {
1366 >        while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1367              ++sshift;
1368              ssize <<= 1;
1369          }
1370 <        this.segmentShift = 32 - sshift;
1371 <        this.segmentMask = ssize - 1;
1372 <        if (initialCapacity > MAXIMUM_CAPACITY)
1373 <            initialCapacity = MAXIMUM_CAPACITY;
1374 <        int c = initialCapacity / ssize;
1375 <        if (c * ssize < initialCapacity)
1376 <            ++c;
1377 <        int cap = MIN_SEGMENT_TABLE_CAPACITY;
1378 <        while (cap < c)
1379 <            cap <<= 1;
1380 <        // create segments and segments[0]
1381 <        Segment<K,V> s0 =
1382 <            new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
1383 <                             (HashEntry<K,V>[])new HashEntry[cap]);
1384 <        Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
1385 <        UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
1386 <        this.segments = ss;
1370 >        int segmentShift = 32 - sshift;
1371 >        int segmentMask = ssize - 1;
1372 >        @SuppressWarnings("unchecked")
1373 >        Segment<K,V>[] segments = (Segment<K,V>[])
1374 >            new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1375 >        for (int i = 0; i < segments.length; ++i)
1376 >            segments[i] = new Segment<K,V>(LOAD_FACTOR);
1377 >        java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1378 >        streamFields.put("segments", segments);
1379 >        streamFields.put("segmentShift", segmentShift);
1380 >        streamFields.put("segmentMask", segmentMask);
1381 >        s.writeFields();
1382 >
1383 >        Node<K,V>[] t;
1384 >        if ((t = table) != null) {
1385 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1386 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1387 >                s.writeObject(p.key);
1388 >                s.writeObject(p.val);
1389 >            }
1390 >        }
1391 >        s.writeObject(null);
1392 >        s.writeObject(null);
1393 >        segments = null; // throw away
1394      }
1395  
1396      /**
1397 <     * Creates a new, empty map with the specified initial capacity
1398 <     * and load factor and with the default concurrencyLevel (16).
1399 <     *
1400 <     * @param initialCapacity The implementation performs internal
1401 <     * sizing to accommodate this many elements.
1402 <     * @param loadFactor  the load factor threshold, used to control resizing.
1403 <     * Resizing may be performed when the average number of elements per
1404 <     * bin exceeds this threshold.
1405 <     * @throws IllegalArgumentException if the initial capacity of
1406 <     * elements is negative or the load factor is nonpositive
1397 >     * Reconstitutes the instance from a stream (that is, deserializes it).
1398 >     * @param s the stream
1399 >     * @throws ClassNotFoundException if the class of a serialized object
1400 >     *         could not be found
1401 >     * @throws java.io.IOException if an I/O error occurs
1402 >     */
1403 >    private void readObject(java.io.ObjectInputStream s)
1404 >        throws java.io.IOException, ClassNotFoundException {
1405 >        /*
1406 >         * To improve performance in typical cases, we create nodes
1407 >         * while reading, then place in table once size is known.
1408 >         * However, we must also validate uniqueness and deal with
1409 >         * overpopulated bins while doing so, which requires
1410 >         * specialized versions of putVal mechanics.
1411 >         */
1412 >        sizeCtl = -1; // force exclusion for table construction
1413 >        s.defaultReadObject();
1414 >        long size = 0L;
1415 >        Node<K,V> p = null;
1416 >        for (;;) {
1417 >            @SuppressWarnings("unchecked")
1418 >            K k = (K) s.readObject();
1419 >            @SuppressWarnings("unchecked")
1420 >            V v = (V) s.readObject();
1421 >            if (k != null && v != null) {
1422 >                p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1423 >                ++size;
1424 >            }
1425 >            else
1426 >                break;
1427 >        }
1428 >        if (size == 0L)
1429 >            sizeCtl = 0;
1430 >        else {
1431 >            int n;
1432 >            if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1433 >                n = MAXIMUM_CAPACITY;
1434 >            else {
1435 >                int sz = (int)size;
1436 >                n = tableSizeFor(sz + (sz >>> 1) + 1);
1437 >            }
1438 >            @SuppressWarnings("unchecked")
1439 >            Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1440 >            int mask = n - 1;
1441 >            long added = 0L;
1442 >            while (p != null) {
1443 >                boolean insertAtFront;
1444 >                Node<K,V> next = p.next, first;
1445 >                int h = p.hash, j = h & mask;
1446 >                if ((first = tabAt(tab, j)) == null)
1447 >                    insertAtFront = true;
1448 >                else {
1449 >                    K k = p.key;
1450 >                    if (first.hash < 0) {
1451 >                        TreeBin<K,V> t = (TreeBin<K,V>)first;
1452 >                        if (t.putTreeVal(h, k, p.val) == null)
1453 >                            ++added;
1454 >                        insertAtFront = false;
1455 >                    }
1456 >                    else {
1457 >                        int binCount = 0;
1458 >                        insertAtFront = true;
1459 >                        Node<K,V> q; K qk;
1460 >                        for (q = first; q != null; q = q.next) {
1461 >                            if (q.hash == h &&
1462 >                                ((qk = q.key) == k ||
1463 >                                 (qk != null && k.equals(qk)))) {
1464 >                                insertAtFront = false;
1465 >                                break;
1466 >                            }
1467 >                            ++binCount;
1468 >                        }
1469 >                        if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1470 >                            insertAtFront = false;
1471 >                            ++added;
1472 >                            p.next = first;
1473 >                            TreeNode<K,V> hd = null, tl = null;
1474 >                            for (q = p; q != null; q = q.next) {
1475 >                                TreeNode<K,V> t = new TreeNode<K,V>
1476 >                                    (q.hash, q.key, q.val, null, null);
1477 >                                if ((t.prev = tl) == null)
1478 >                                    hd = t;
1479 >                                else
1480 >                                    tl.next = t;
1481 >                                tl = t;
1482 >                            }
1483 >                            setTabAt(tab, j, new TreeBin<K,V>(hd));
1484 >                        }
1485 >                    }
1486 >                }
1487 >                if (insertAtFront) {
1488 >                    ++added;
1489 >                    p.next = first;
1490 >                    setTabAt(tab, j, p);
1491 >                }
1492 >                p = next;
1493 >            }
1494 >            table = tab;
1495 >            sizeCtl = n - (n >>> 2);
1496 >            baseCount = added;
1497 >        }
1498 >    }
1499 >
1500 >    // ConcurrentMap methods
1501 >
1502 >    /**
1503 >     * {@inheritDoc}
1504       *
1505 <     * @since 1.6
1505 >     * @return the previous value associated with the specified key,
1506 >     *         or {@code null} if there was no mapping for the key
1507 >     * @throws NullPointerException if the specified key or value is null
1508       */
1509 <    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
1510 <        this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
1509 >    public V putIfAbsent(K key, V value) {
1510 >        return putVal(key, value, true);
1511      }
1512  
1513      /**
1514 <     * Creates a new, empty map with the specified initial capacity,
758 <     * and with default load factor (0.75) and concurrencyLevel (16).
1514 >     * {@inheritDoc}
1515       *
1516 <     * @param initialCapacity the initial capacity. The implementation
761 <     * performs internal sizing to accommodate this many elements.
762 <     * @throws IllegalArgumentException if the initial capacity of
763 <     * elements is negative.
1516 >     * @throws NullPointerException if the specified key is null
1517       */
1518 <    public ConcurrentHashMap(int initialCapacity) {
1519 <        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
1518 >    public boolean remove(Object key, Object value) {
1519 >        if (key == null)
1520 >            throw new NullPointerException();
1521 >        return value != null && replaceNode(key, null, value) != null;
1522      }
1523  
1524      /**
1525 <     * Creates a new, empty map with a default initial capacity (16),
1526 <     * load factor (0.75) and concurrencyLevel (16).
1525 >     * {@inheritDoc}
1526 >     *
1527 >     * @throws NullPointerException if any of the arguments are null
1528       */
1529 <    public ConcurrentHashMap() {
1530 <        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
1529 >    public boolean replace(K key, V oldValue, V newValue) {
1530 >        if (key == null || oldValue == null || newValue == null)
1531 >            throw new NullPointerException();
1532 >        return replaceNode(key, newValue, oldValue) != null;
1533      }
1534  
1535      /**
1536 <     * Creates a new map with the same mappings as the given map.
779 <     * The map is created with a capacity of 1.5 times the number
780 <     * of mappings in the given map or 16 (whichever is greater),
781 <     * and a default load factor (0.75) and concurrencyLevel (16).
1536 >     * {@inheritDoc}
1537       *
1538 <     * @param m the map
1538 >     * @return the previous value associated with the specified key,
1539 >     *         or {@code null} if there was no mapping for the key
1540 >     * @throws NullPointerException if the specified key or value is null
1541       */
1542 <    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
1543 <        this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
1544 <                      DEFAULT_INITIAL_CAPACITY),
1545 <             DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
789 <        putAll(m);
1542 >    public V replace(K key, V value) {
1543 >        if (key == null || value == null)
1544 >            throw new NullPointerException();
1545 >        return replaceNode(key, value, null);
1546      }
1547  
1548 +    // Overrides of JDK8+ Map extension method defaults
1549 +
1550      /**
1551 <     * Returns <tt>true</tt> if this map contains no key-value mappings.
1551 >     * Returns the value to which the specified key is mapped, or the
1552 >     * given default value if this map contains no mapping for the
1553 >     * key.
1554       *
1555 <     * @return <tt>true</tt> if this map contains no key-value mappings
1555 >     * @param key the key whose associated value is to be returned
1556 >     * @param defaultValue the value to return if this map contains
1557 >     * no mapping for the given key
1558 >     * @return the mapping for the key, if present; else the default value
1559 >     * @throws NullPointerException if the specified key is null
1560       */
1561 <    public boolean isEmpty() {
1562 <        /*
1563 <         * Sum per-segment modCounts to avoid mis-reporting when
1564 <         * elements are concurrently added and removed in one segment
1565 <         * while checking another, in which case the table was never
1566 <         * actually empty at any point. (The sum ensures accuracy up
1567 <         * through at least 1<<31 per-segment modifications before
1568 <         * recheck.)  Methods size() and containsValue() use similar
1569 <         * constructions for stability checks.
1570 <         */
1571 <        long sum = 0L;
1572 <        final Segment<K,V>[] segments = this.segments;
809 <        for (int j = 0; j < segments.length; ++j) {
810 <            Segment<K,V> seg = segmentAt(segments, j);
811 <            if (seg != null) {
812 <                if (seg.count != 0)
813 <                    return false;
814 <                sum += seg.modCount;
1561 >    public V getOrDefault(Object key, V defaultValue) {
1562 >        V v;
1563 >        return (v = get(key)) == null ? defaultValue : v;
1564 >    }
1565 >
1566 >    public void forEach(BiConsumer<? super K, ? super V> action) {
1567 >        if (action == null) throw new NullPointerException();
1568 >        Node<K,V>[] t;
1569 >        if ((t = table) != null) {
1570 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1571 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1572 >                action.accept(p.key, p.val);
1573              }
1574          }
1575 <        if (sum != 0L) { // recheck unless no modifications
1576 <            for (int j = 0; j < segments.length; ++j) {
1577 <                Segment<K,V> seg = segmentAt(segments, j);
1578 <                if (seg != null) {
1579 <                    if (seg.count != 0)
1580 <                        return false;
1581 <                    sum -= seg.modCount;
1575 >    }
1576 >
1577 >    public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1578 >        if (function == null) throw new NullPointerException();
1579 >        Node<K,V>[] t;
1580 >        if ((t = table) != null) {
1581 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1582 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1583 >                V oldValue = p.val;
1584 >                for (K key = p.key;;) {
1585 >                    V newValue = function.apply(key, oldValue);
1586 >                    if (newValue == null)
1587 >                        throw new NullPointerException();
1588 >                    if (replaceNode(key, newValue, oldValue) != null ||
1589 >                        (oldValue = get(key)) == null)
1590 >                        break;
1591                  }
1592              }
826            if (sum != 0L)
827                return false;
1593          }
829        return true;
1594      }
1595  
1596      /**
1597 <     * Returns the number of key-value mappings in this map.  If the
1598 <     * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
1599 <     * <tt>Integer.MAX_VALUE</tt>.
1597 >     * Helper method for EntrySet.removeIf
1598 >     */
1599 >    boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1600 >        if (function == null) throw new NullPointerException();
1601 >        Node<K,V>[] t;
1602 >        boolean removed = false;
1603 >        if ((t = table) != null) {
1604 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1605 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1606 >                K k = p.key;
1607 >                V v = p.val;
1608 >                Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1609 >                if (function.test(e) && replaceNode(k, null, v) != null)
1610 >                    removed = true;
1611 >            }
1612 >        }
1613 >        return removed;
1614 >    }
1615 >
1616 >    /**
1617 >     * Helper method for Values.removeIf
1618 >     */
1619 >    boolean removeValueIf(Predicate<? super  V> function) {
1620 >        if (function == null) throw new NullPointerException();
1621 >        Node<K,V>[] t;
1622 >        boolean removed = false;
1623 >        if ((t = table) != null) {
1624 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1625 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1626 >                K k = p.key;
1627 >                V v = p.val;
1628 >                if (function.test(v) && replaceNode(k, null, v) != null)
1629 >                    removed = true;
1630 >            }
1631 >        }
1632 >        return removed;
1633 >    }
1634 >
1635 >    /**
1636 >     * If the specified key is not already associated with a value,
1637 >     * attempts to compute its value using the given mapping function
1638 >     * and enters it into this map unless {@code null}.  The entire
1639 >     * method invocation is performed atomically, so the function is
1640 >     * applied at most once per key.  Some attempted update operations
1641 >     * on this map by other threads may be blocked while computation
1642 >     * is in progress, so the computation should be short and simple,
1643 >     * and must not attempt to update any other mappings of this map.
1644       *
1645 <     * @return the number of key-value mappings in this map
1645 >     * @param key key with which the specified value is to be associated
1646 >     * @param mappingFunction the function to compute a value
1647 >     * @return the current (existing or computed) value associated with
1648 >     *         the specified key, or null if the computed value is null
1649 >     * @throws NullPointerException if the specified key or mappingFunction
1650 >     *         is null
1651 >     * @throws IllegalStateException if the computation detectably
1652 >     *         attempts a recursive update to this map that would
1653 >     *         otherwise never complete
1654 >     * @throws RuntimeException or Error if the mappingFunction does so,
1655 >     *         in which case the mapping is left unestablished
1656       */
1657 <    public int size() {
1658 <        // Try a few times to get accurate count. On failure due to
1659 <        // continuous async changes in table, resort to locking.
1660 <        final Segment<K,V>[] segments = this.segments;
1661 <        int size;
1662 <        boolean overflow; // true if size overflows 32 bits
1663 <        long sum;         // sum of modCounts
1664 <        long last = 0L;   // previous sum
1665 <        int retries = -1; // first iteration isn't retry
1666 <        try {
1667 <            for (;;) {
1668 <                if (retries++ == RETRIES_BEFORE_LOCK) {
1669 <                    for (int j = 0; j < segments.length; ++j)
1670 <                        ensureSegment(j).lock(); // force creation
1671 <                }
1672 <                sum = 0L;
1673 <                size = 0;
1674 <                overflow = false;
1675 <                for (int j = 0; j < segments.length; ++j) {
1676 <                    Segment<K,V> seg = segmentAt(segments, j);
1677 <                    if (seg != null) {
1678 <                        sum += seg.modCount;
861 <                        int c = seg.count;
862 <                        if (c < 0 || (size += c) < 0)
863 <                            overflow = true;
1657 >    public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1658 >        if (key == null || mappingFunction == null)
1659 >            throw new NullPointerException();
1660 >        int h = spread(key.hashCode());
1661 >        V val = null;
1662 >        int binCount = 0;
1663 >        for (Node<K,V>[] tab = table;;) {
1664 >            Node<K,V> f; int n, i, fh;
1665 >            if (tab == null || (n = tab.length) == 0)
1666 >                tab = initTable();
1667 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1668 >                Node<K,V> r = new ReservationNode<K,V>();
1669 >                synchronized (r) {
1670 >                    if (casTabAt(tab, i, null, r)) {
1671 >                        binCount = 1;
1672 >                        Node<K,V> node = null;
1673 >                        try {
1674 >                            if ((val = mappingFunction.apply(key)) != null)
1675 >                                node = new Node<K,V>(h, key, val, null);
1676 >                        } finally {
1677 >                            setTabAt(tab, i, node);
1678 >                        }
1679                      }
1680                  }
1681 <                if (sum == last)
1681 >                if (binCount != 0)
1682                      break;
868                last = sum;
1683              }
1684 <        } finally {
1685 <            if (retries > RETRIES_BEFORE_LOCK) {
1686 <                for (int j = 0; j < segments.length; ++j)
1687 <                    segmentAt(segments, j).unlock();
1684 >            else if ((fh = f.hash) == MOVED)
1685 >                tab = helpTransfer(tab, f);
1686 >            else {
1687 >                boolean added = false;
1688 >                synchronized (f) {
1689 >                    if (tabAt(tab, i) == f) {
1690 >                        if (fh >= 0) {
1691 >                            binCount = 1;
1692 >                            for (Node<K,V> e = f;; ++binCount) {
1693 >                                K ek;
1694 >                                if (e.hash == h &&
1695 >                                    ((ek = e.key) == key ||
1696 >                                     (ek != null && key.equals(ek)))) {
1697 >                                    val = e.val;
1698 >                                    break;
1699 >                                }
1700 >                                Node<K,V> pred = e;
1701 >                                if ((e = e.next) == null) {
1702 >                                    if ((val = mappingFunction.apply(key)) != null) {
1703 >                                        if (pred.next != null)
1704 >                                            throw new IllegalStateException("Recursive update");
1705 >                                        added = true;
1706 >                                        pred.next = new Node<K,V>(h, key, val, null);
1707 >                                    }
1708 >                                    break;
1709 >                                }
1710 >                            }
1711 >                        }
1712 >                        else if (f instanceof TreeBin) {
1713 >                            binCount = 2;
1714 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1715 >                            TreeNode<K,V> r, p;
1716 >                            if ((r = t.root) != null &&
1717 >                                (p = r.findTreeNode(h, key, null)) != null)
1718 >                                val = p.val;
1719 >                            else if ((val = mappingFunction.apply(key)) != null) {
1720 >                                added = true;
1721 >                                t.putTreeVal(h, key, val);
1722 >                            }
1723 >                        }
1724 >                        else if (f instanceof ReservationNode)
1725 >                            throw new IllegalStateException("Recursive update");
1726 >                    }
1727 >                }
1728 >                if (binCount != 0) {
1729 >                    if (binCount >= TREEIFY_THRESHOLD)
1730 >                        treeifyBin(tab, i);
1731 >                    if (!added)
1732 >                        return val;
1733 >                    break;
1734 >                }
1735              }
1736          }
1737 <        return overflow ? Integer.MAX_VALUE : size;
1737 >        if (val != null)
1738 >            addCount(1L, binCount);
1739 >        return val;
1740      }
1741  
1742      /**
1743 <     * Returns the value to which the specified key is mapped,
1744 <     * or {@code null} if this map contains no mapping for the key.
1743 >     * If the value for the specified key is present, attempts to
1744 >     * compute a new mapping given the key and its current mapped
1745 >     * value.  The entire method invocation is performed atomically.
1746 >     * Some attempted update operations on this map by other threads
1747 >     * may be blocked while computation is in progress, so the
1748 >     * computation should be short and simple, and must not attempt to
1749 >     * update any other mappings of this map.
1750       *
1751 <     * <p>More formally, if this map contains a mapping from a key
1752 <     * {@code k} to a value {@code v} such that {@code key.equals(k)},
1753 <     * then this method returns {@code v}; otherwise it returns
1754 <     * {@code null}.  (There can be at most one such mapping.)
1755 <     *
1756 <     * @throws NullPointerException if the specified key is null
1751 >     * @param key key with which a value may be associated
1752 >     * @param remappingFunction the function to compute a value
1753 >     * @return the new value associated with the specified key, or null if none
1754 >     * @throws NullPointerException if the specified key or remappingFunction
1755 >     *         is null
1756 >     * @throws IllegalStateException if the computation detectably
1757 >     *         attempts a recursive update to this map that would
1758 >     *         otherwise never complete
1759 >     * @throws RuntimeException or Error if the remappingFunction does so,
1760 >     *         in which case the mapping is unchanged
1761       */
1762 <    public V get(Object key) {
1763 <        Segment<K,V> s; // manually integrate access methods to reduce overhead
1764 <        HashEntry<K,V>[] tab;
1765 <        int h = hash(key.hashCode());
1766 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1767 <        if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
1768 <            (tab = s.table) != null) {
1769 <            for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
1770 <                     (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1771 <                 e != null; e = e.next) {
1772 <                K k;
1773 <                if ((k = e.key) == key || (e.hash == h && key.equals(k)))
1774 <                    return e.value;
1762 >    public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1763 >        if (key == null || remappingFunction == null)
1764 >            throw new NullPointerException();
1765 >        int h = spread(key.hashCode());
1766 >        V val = null;
1767 >        int delta = 0;
1768 >        int binCount = 0;
1769 >        for (Node<K,V>[] tab = table;;) {
1770 >            Node<K,V> f; int n, i, fh;
1771 >            if (tab == null || (n = tab.length) == 0)
1772 >                tab = initTable();
1773 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1774 >                break;
1775 >            else if ((fh = f.hash) == MOVED)
1776 >                tab = helpTransfer(tab, f);
1777 >            else {
1778 >                synchronized (f) {
1779 >                    if (tabAt(tab, i) == f) {
1780 >                        if (fh >= 0) {
1781 >                            binCount = 1;
1782 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
1783 >                                K ek;
1784 >                                if (e.hash == h &&
1785 >                                    ((ek = e.key) == key ||
1786 >                                     (ek != null && key.equals(ek)))) {
1787 >                                    val = remappingFunction.apply(key, e.val);
1788 >                                    if (val != null)
1789 >                                        e.val = val;
1790 >                                    else {
1791 >                                        delta = -1;
1792 >                                        Node<K,V> en = e.next;
1793 >                                        if (pred != null)
1794 >                                            pred.next = en;
1795 >                                        else
1796 >                                            setTabAt(tab, i, en);
1797 >                                    }
1798 >                                    break;
1799 >                                }
1800 >                                pred = e;
1801 >                                if ((e = e.next) == null)
1802 >                                    break;
1803 >                            }
1804 >                        }
1805 >                        else if (f instanceof TreeBin) {
1806 >                            binCount = 2;
1807 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1808 >                            TreeNode<K,V> r, p;
1809 >                            if ((r = t.root) != null &&
1810 >                                (p = r.findTreeNode(h, key, null)) != null) {
1811 >                                val = remappingFunction.apply(key, p.val);
1812 >                                if (val != null)
1813 >                                    p.val = val;
1814 >                                else {
1815 >                                    delta = -1;
1816 >                                    if (t.removeTreeNode(p))
1817 >                                        setTabAt(tab, i, untreeify(t.first));
1818 >                                }
1819 >                            }
1820 >                        }
1821 >                        else if (f instanceof ReservationNode)
1822 >                            throw new IllegalStateException("Recursive update");
1823 >                    }
1824 >                }
1825 >                if (binCount != 0)
1826 >                    break;
1827              }
1828          }
1829 <        return null;
1829 >        if (delta != 0)
1830 >            addCount((long)delta, binCount);
1831 >        return val;
1832      }
1833  
1834      /**
1835 <     * Tests if the specified object is a key in this table.
1835 >     * Attempts to compute a mapping for the specified key and its
1836 >     * current mapped value (or {@code null} if there is no current
1837 >     * mapping). The entire method invocation is performed atomically.
1838 >     * Some attempted update operations on this map by other threads
1839 >     * may be blocked while computation is in progress, so the
1840 >     * computation should be short and simple, and must not attempt to
1841 >     * update any other mappings of this Map.
1842       *
1843 <     * @param  key   possible key
1844 <     * @return <tt>true</tt> if and only if the specified object
1845 <     *         is a key in this table, as determined by the
1846 <     *         <tt>equals</tt> method; <tt>false</tt> otherwise.
1847 <     * @throws NullPointerException if the specified key is null
1848 <     */
1849 <    @SuppressWarnings("unchecked")
1850 <    public boolean containsKey(Object key) {
1851 <        Segment<K,V> s; // same as get() except no need for volatile value read
1852 <        HashEntry<K,V>[] tab;
1853 <        int h = hash(key.hashCode());
1854 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1855 <        if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
1856 <            (tab = s.table) != null) {
1857 <            for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
1858 <                     (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1859 <                 e != null; e = e.next) {
1860 <                K k;
1861 <                if ((k = e.key) == key || (e.hash == h && key.equals(k)))
1862 <                    return true;
1843 >     * @param key key with which the specified value is to be associated
1844 >     * @param remappingFunction the function to compute a value
1845 >     * @return the new value associated with the specified key, or null if none
1846 >     * @throws NullPointerException if the specified key or remappingFunction
1847 >     *         is null
1848 >     * @throws IllegalStateException if the computation detectably
1849 >     *         attempts a recursive update to this map that would
1850 >     *         otherwise never complete
1851 >     * @throws RuntimeException or Error if the remappingFunction does so,
1852 >     *         in which case the mapping is unchanged
1853 >     */
1854 >    public V compute(K key,
1855 >                     BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1856 >        if (key == null || remappingFunction == null)
1857 >            throw new NullPointerException();
1858 >        int h = spread(key.hashCode());
1859 >        V val = null;
1860 >        int delta = 0;
1861 >        int binCount = 0;
1862 >        for (Node<K,V>[] tab = table;;) {
1863 >            Node<K,V> f; int n, i, fh;
1864 >            if (tab == null || (n = tab.length) == 0)
1865 >                tab = initTable();
1866 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1867 >                Node<K,V> r = new ReservationNode<K,V>();
1868 >                synchronized (r) {
1869 >                    if (casTabAt(tab, i, null, r)) {
1870 >                        binCount = 1;
1871 >                        Node<K,V> node = null;
1872 >                        try {
1873 >                            if ((val = remappingFunction.apply(key, null)) != null) {
1874 >                                delta = 1;
1875 >                                node = new Node<K,V>(h, key, val, null);
1876 >                            }
1877 >                        } finally {
1878 >                            setTabAt(tab, i, node);
1879 >                        }
1880 >                    }
1881 >                }
1882 >                if (binCount != 0)
1883 >                    break;
1884 >            }
1885 >            else if ((fh = f.hash) == MOVED)
1886 >                tab = helpTransfer(tab, f);
1887 >            else {
1888 >                synchronized (f) {
1889 >                    if (tabAt(tab, i) == f) {
1890 >                        if (fh >= 0) {
1891 >                            binCount = 1;
1892 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
1893 >                                K ek;
1894 >                                if (e.hash == h &&
1895 >                                    ((ek = e.key) == key ||
1896 >                                     (ek != null && key.equals(ek)))) {
1897 >                                    val = remappingFunction.apply(key, e.val);
1898 >                                    if (val != null)
1899 >                                        e.val = val;
1900 >                                    else {
1901 >                                        delta = -1;
1902 >                                        Node<K,V> en = e.next;
1903 >                                        if (pred != null)
1904 >                                            pred.next = en;
1905 >                                        else
1906 >                                            setTabAt(tab, i, en);
1907 >                                    }
1908 >                                    break;
1909 >                                }
1910 >                                pred = e;
1911 >                                if ((e = e.next) == null) {
1912 >                                    val = remappingFunction.apply(key, null);
1913 >                                    if (val != null) {
1914 >                                        if (pred.next != null)
1915 >                                            throw new IllegalStateException("Recursive update");
1916 >                                        delta = 1;
1917 >                                        pred.next =
1918 >                                            new Node<K,V>(h, key, val, null);
1919 >                                    }
1920 >                                    break;
1921 >                                }
1922 >                            }
1923 >                        }
1924 >                        else if (f instanceof TreeBin) {
1925 >                            binCount = 1;
1926 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1927 >                            TreeNode<K,V> r, p;
1928 >                            if ((r = t.root) != null)
1929 >                                p = r.findTreeNode(h, key, null);
1930 >                            else
1931 >                                p = null;
1932 >                            V pv = (p == null) ? null : p.val;
1933 >                            val = remappingFunction.apply(key, pv);
1934 >                            if (val != null) {
1935 >                                if (p != null)
1936 >                                    p.val = val;
1937 >                                else {
1938 >                                    delta = 1;
1939 >                                    t.putTreeVal(h, key, val);
1940 >                                }
1941 >                            }
1942 >                            else if (p != null) {
1943 >                                delta = -1;
1944 >                                if (t.removeTreeNode(p))
1945 >                                    setTabAt(tab, i, untreeify(t.first));
1946 >                            }
1947 >                        }
1948 >                        else if (f instanceof ReservationNode)
1949 >                            throw new IllegalStateException("Recursive update");
1950 >                    }
1951 >                }
1952 >                if (binCount != 0) {
1953 >                    if (binCount >= TREEIFY_THRESHOLD)
1954 >                        treeifyBin(tab, i);
1955 >                    break;
1956 >                }
1957              }
1958          }
1959 <        return false;
1959 >        if (delta != 0)
1960 >            addCount((long)delta, binCount);
1961 >        return val;
1962      }
1963  
1964      /**
1965 <     * Returns <tt>true</tt> if this map maps one or more keys to the
1966 <     * specified value. Note: This method requires a full internal
1967 <     * traversal of the hash table, and so is much slower than
1968 <     * method <tt>containsKey</tt>.
1965 >     * If the specified key is not already associated with a
1966 >     * (non-null) value, associates it with the given value.
1967 >     * Otherwise, replaces the value with the results of the given
1968 >     * remapping function, or removes if {@code null}. The entire
1969 >     * method invocation is performed atomically.  Some attempted
1970 >     * update operations on this map by other threads may be blocked
1971 >     * while computation is in progress, so the computation should be
1972 >     * short and simple, and must not attempt to update any other
1973 >     * mappings of this Map.
1974       *
1975 <     * @param value value whose presence in this map is to be tested
1976 <     * @return <tt>true</tt> if this map maps one or more keys to the
1977 <     *         specified value
1978 <     * @throws NullPointerException if the specified value is null
1975 >     * @param key key with which the specified value is to be associated
1976 >     * @param value the value to use if absent
1977 >     * @param remappingFunction the function to recompute a value if present
1978 >     * @return the new value associated with the specified key, or null if none
1979 >     * @throws NullPointerException if the specified key or the
1980 >     *         remappingFunction is null
1981 >     * @throws RuntimeException or Error if the remappingFunction does so,
1982 >     *         in which case the mapping is unchanged
1983       */
1984 <    public boolean containsValue(Object value) {
1985 <        // Same idea as size()
949 <        if (value == null)
1984 >    public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1985 >        if (key == null || value == null || remappingFunction == null)
1986              throw new NullPointerException();
1987 <        final Segment<K,V>[] segments = this.segments;
1988 <        boolean found = false;
1989 <        long last = 0L;   // previous sum
1990 <        int retries = -1;
1991 <        try {
1992 <            outer: for (;;) {
1993 <                if (retries++ == RETRIES_BEFORE_LOCK) {
1994 <                    for (int j = 0; j < segments.length; ++j)
1995 <                        ensureSegment(j).lock(); // force creation
1996 <                }
1997 <                long sum = 0L;
1998 <                for (int j = 0; j < segments.length; ++j) {
1999 <                    HashEntry<K,V>[] tab;
2000 <                    Segment<K,V> seg = segmentAt(segments, j);
2001 <                    if (seg != null && (tab = seg.table) != null) {
2002 <                        for (int i = 0 ; i < tab.length; i++) {
2003 <                            HashEntry<K,V> e;
2004 <                            for (e = entryAt(tab, i); e != null; e = e.next) {
2005 <                                V v = e.value;
2006 <                                if (v != null && value.equals(v)) {
2007 <                                    found = true;
2008 <                                    break outer;
1987 >        int h = spread(key.hashCode());
1988 >        V val = null;
1989 >        int delta = 0;
1990 >        int binCount = 0;
1991 >        for (Node<K,V>[] tab = table;;) {
1992 >            Node<K,V> f; int n, i, fh;
1993 >            if (tab == null || (n = tab.length) == 0)
1994 >                tab = initTable();
1995 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1996 >                if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1997 >                    delta = 1;
1998 >                    val = value;
1999 >                    break;
2000 >                }
2001 >            }
2002 >            else if ((fh = f.hash) == MOVED)
2003 >                tab = helpTransfer(tab, f);
2004 >            else {
2005 >                synchronized (f) {
2006 >                    if (tabAt(tab, i) == f) {
2007 >                        if (fh >= 0) {
2008 >                            binCount = 1;
2009 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
2010 >                                K ek;
2011 >                                if (e.hash == h &&
2012 >                                    ((ek = e.key) == key ||
2013 >                                     (ek != null && key.equals(ek)))) {
2014 >                                    val = remappingFunction.apply(e.val, value);
2015 >                                    if (val != null)
2016 >                                        e.val = val;
2017 >                                    else {
2018 >                                        delta = -1;
2019 >                                        Node<K,V> en = e.next;
2020 >                                        if (pred != null)
2021 >                                            pred.next = en;
2022 >                                        else
2023 >                                            setTabAt(tab, i, en);
2024 >                                    }
2025 >                                    break;
2026 >                                }
2027 >                                pred = e;
2028 >                                if ((e = e.next) == null) {
2029 >                                    delta = 1;
2030 >                                    val = value;
2031 >                                    pred.next =
2032 >                                        new Node<K,V>(h, key, val, null);
2033 >                                    break;
2034                                  }
2035                              }
2036                          }
2037 <                        sum += seg.modCount;
2037 >                        else if (f instanceof TreeBin) {
2038 >                            binCount = 2;
2039 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
2040 >                            TreeNode<K,V> r = t.root;
2041 >                            TreeNode<K,V> p = (r == null) ? null :
2042 >                                r.findTreeNode(h, key, null);
2043 >                            val = (p == null) ? value :
2044 >                                remappingFunction.apply(p.val, value);
2045 >                            if (val != null) {
2046 >                                if (p != null)
2047 >                                    p.val = val;
2048 >                                else {
2049 >                                    delta = 1;
2050 >                                    t.putTreeVal(h, key, val);
2051 >                                }
2052 >                            }
2053 >                            else if (p != null) {
2054 >                                delta = -1;
2055 >                                if (t.removeTreeNode(p))
2056 >                                    setTabAt(tab, i, untreeify(t.first));
2057 >                            }
2058 >                        }
2059 >                        else if (f instanceof ReservationNode)
2060 >                            throw new IllegalStateException("Recursive update");
2061                      }
2062                  }
2063 <                if (retries > 0 && sum == last)
2063 >                if (binCount != 0) {
2064 >                    if (binCount >= TREEIFY_THRESHOLD)
2065 >                        treeifyBin(tab, i);
2066                      break;
2067 <                last = sum;
982 <            }
983 <        } finally {
984 <            if (retries > RETRIES_BEFORE_LOCK) {
985 <                for (int j = 0; j < segments.length; ++j)
986 <                    segmentAt(segments, j).unlock();
2067 >                }
2068              }
2069          }
2070 <        return found;
2070 >        if (delta != 0)
2071 >            addCount((long)delta, binCount);
2072 >        return val;
2073      }
2074  
2075 +    // Hashtable legacy methods
2076 +
2077      /**
2078 <     * Legacy method testing if some key maps into the specified value
2079 <     * in this table.  This method is identical in functionality to
2080 <     * {@link #containsValue}, and exists solely to ensure
2078 >     * Tests if some key maps into the specified value in this table.
2079 >     *
2080 >     * <p>Note that this method is identical in functionality to
2081 >     * {@link #containsValue(Object)}, and exists solely to ensure
2082       * full compatibility with class {@link java.util.Hashtable},
2083 <     * which supported this method prior to introduction of the
2084 <     * Java Collections framework.
2083 >     * which supported this method prior to introduction of the Java
2084 >     * Collections Framework.
2085       *
2086       * @param  value a value to search for
2087 <     * @return <tt>true</tt> if and only if some key maps to the
2088 <     *         <tt>value</tt> argument in this table as
2089 <     *         determined by the <tt>equals</tt> method;
2090 <     *         <tt>false</tt> otherwise
2087 >     * @return {@code true} if and only if some key maps to the
2088 >     *         {@code value} argument in this table as
2089 >     *         determined by the {@code equals} method;
2090 >     *         {@code false} otherwise
2091       * @throws NullPointerException if the specified value is null
2092       */
2093      public boolean contains(Object value) {
# Line 1009 | Line 2095 | public class ConcurrentHashMap<K, V> ext
2095      }
2096  
2097      /**
2098 <     * Maps the specified key to the specified value in this table.
1013 <     * Neither the key nor the value can be null.
1014 <     *
1015 <     * <p> The value can be retrieved by calling the <tt>get</tt> method
1016 <     * with a key that is equal to the original key.
2098 >     * Returns an enumeration of the keys in this table.
2099       *
2100 <     * @param key key with which the specified value is to be associated
2101 <     * @param value value to be associated with the specified key
1020 <     * @return the previous value associated with <tt>key</tt>, or
1021 <     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
1022 <     * @throws NullPointerException if the specified key or value is null
2100 >     * @return an enumeration of the keys in this table
2101 >     * @see #keySet()
2102       */
2103 <    @SuppressWarnings("unchecked")
2104 <    public V put(K key, V value) {
2105 <        Segment<K,V> s;
2106 <        if (value == null)
1028 <            throw new NullPointerException();
1029 <        int hash = hash(key.hashCode());
1030 <        int j = (hash >>> segmentShift) & segmentMask;
1031 <        if ((s = (Segment<K,V>)UNSAFE.getObject          // nonvolatile; recheck
1032 <             (segments, (j << SSHIFT) + SBASE)) == null) //  in ensureSegment
1033 <            s = ensureSegment(j);
1034 <        return s.put(key, hash, value, false);
2103 >    public Enumeration<K> keys() {
2104 >        Node<K,V>[] t;
2105 >        int f = (t = table) == null ? 0 : t.length;
2106 >        return new KeyIterator<K,V>(t, f, 0, f, this);
2107      }
2108  
2109      /**
2110 <     * {@inheritDoc}
2110 >     * Returns an enumeration of the values in this table.
2111       *
2112 <     * @return the previous value associated with the specified key,
2113 <     *         or <tt>null</tt> if there was no mapping for the key
1042 <     * @throws NullPointerException if the specified key or value is null
2112 >     * @return an enumeration of the values in this table
2113 >     * @see #values()
2114       */
2115 <    @SuppressWarnings("unchecked")
2116 <    public V putIfAbsent(K key, V value) {
2117 <        Segment<K,V> s;
2118 <        if (value == null)
1048 <            throw new NullPointerException();
1049 <        int hash = hash(key.hashCode());
1050 <        int j = (hash >>> segmentShift) & segmentMask;
1051 <        if ((s = (Segment<K,V>)UNSAFE.getObject
1052 <             (segments, (j << SSHIFT) + SBASE)) == null)
1053 <            s = ensureSegment(j);
1054 <        return s.put(key, hash, value, true);
2115 >    public Enumeration<V> elements() {
2116 >        Node<K,V>[] t;
2117 >        int f = (t = table) == null ? 0 : t.length;
2118 >        return new ValueIterator<K,V>(t, f, 0, f, this);
2119      }
2120  
2121 +    // ConcurrentHashMap-only methods
2122 +
2123      /**
2124 <     * Copies all of the mappings from the specified map to this one.
2125 <     * These mappings replace any mappings that this map had for any of the
2126 <     * keys currently in the specified map.
2124 >     * Returns the number of mappings. This method should be used
2125 >     * instead of {@link #size} because a ConcurrentHashMap may
2126 >     * contain more mappings than can be represented as an int. The
2127 >     * value returned is an estimate; the actual count may differ if
2128 >     * there are concurrent insertions or removals.
2129       *
2130 <     * @param m mappings to be stored in this map
2130 >     * @return the number of mappings
2131 >     * @since 1.8
2132       */
2133 <    public void putAll(Map<? extends K, ? extends V> m) {
2134 <        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
2135 <            put(e.getKey(), e.getValue());
2133 >    public long mappingCount() {
2134 >        long n = sumCount();
2135 >        return (n < 0L) ? 0L : n; // ignore transient negative values
2136      }
2137  
2138      /**
2139 <     * Removes the key (and its corresponding value) from this map.
2140 <     * This method does nothing if the key is not in the map.
2139 >     * Creates a new {@link Set} backed by a ConcurrentHashMap
2140 >     * from the given type to {@code Boolean.TRUE}.
2141       *
2142 <     * @param  key the key that needs to be removed
2143 <     * @return the previous value associated with <tt>key</tt>, or
2144 <     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
1076 <     * @throws NullPointerException if the specified key is null
2142 >     * @param <K> the element type of the returned set
2143 >     * @return the new set
2144 >     * @since 1.8
2145       */
2146 <    public V remove(Object key) {
2147 <        int hash = hash(key.hashCode());
2148 <        Segment<K,V> s = segmentForHash(hash);
1081 <        return s == null ? null : s.remove(key, hash, null);
2146 >    public static <K> KeySetView<K,Boolean> newKeySet() {
2147 >        return new KeySetView<K,Boolean>
2148 >            (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2149      }
2150  
2151      /**
2152 <     * {@inheritDoc}
2152 >     * Creates a new {@link Set} backed by a ConcurrentHashMap
2153 >     * from the given type to {@code Boolean.TRUE}.
2154       *
2155 <     * @throws NullPointerException if the specified key is null
2155 >     * @param initialCapacity The implementation performs internal
2156 >     * sizing to accommodate this many elements.
2157 >     * @param <K> the element type of the returned set
2158 >     * @return the new set
2159 >     * @throws IllegalArgumentException if the initial capacity of
2160 >     * elements is negative
2161 >     * @since 1.8
2162       */
2163 <    public boolean remove(Object key, Object value) {
2164 <        int hash = hash(key.hashCode());
2165 <        Segment<K,V> s;
1092 <        return value != null && (s = segmentForHash(hash)) != null &&
1093 <            s.remove(key, hash, value) != null;
2163 >    public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2164 >        return new KeySetView<K,Boolean>
2165 >            (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2166      }
2167  
2168      /**
2169 <     * {@inheritDoc}
2169 >     * Returns a {@link Set} view of the keys in this map, using the
2170 >     * given common mapped value for any additions (i.e., {@link
2171 >     * Collection#add} and {@link Collection#addAll(Collection)}).
2172 >     * This is of course only appropriate if it is acceptable to use
2173 >     * the same value for all additions from this view.
2174       *
2175 <     * @throws NullPointerException if any of the arguments are null
2175 >     * @param mappedValue the mapped value to use for any additions
2176 >     * @return the set view
2177 >     * @throws NullPointerException if the mappedValue is null
2178       */
2179 <    public boolean replace(K key, V oldValue, V newValue) {
2180 <        int hash = hash(key.hashCode());
1103 <        if (oldValue == null || newValue == null)
2179 >    public KeySetView<K,V> keySet(V mappedValue) {
2180 >        if (mappedValue == null)
2181              throw new NullPointerException();
2182 <        Segment<K,V> s = segmentForHash(hash);
1106 <        return s != null && s.replace(key, hash, oldValue, newValue);
2182 >        return new KeySetView<K,V>(this, mappedValue);
2183      }
2184  
2185 +    /* ---------------- Special Nodes -------------- */
2186 +
2187      /**
2188 <     * {@inheritDoc}
1111 <     *
1112 <     * @return the previous value associated with the specified key,
1113 <     *         or <tt>null</tt> if there was no mapping for the key
1114 <     * @throws NullPointerException if the specified key or value is null
2188 >     * A node inserted at head of bins during transfer operations.
2189       */
2190 <    public V replace(K key, V value) {
2191 <        int hash = hash(key.hashCode());
2192 <        if (value == null)
2193 <            throw new NullPointerException();
2194 <        Segment<K,V> s = segmentForHash(hash);
2195 <        return s == null ? null : s.replace(key, hash, value);
2190 >    static final class ForwardingNode<K,V> extends Node<K,V> {
2191 >        final Node<K,V>[] nextTable;
2192 >        ForwardingNode(Node<K,V>[] tab) {
2193 >            super(MOVED, null, null, null);
2194 >            this.nextTable = tab;
2195 >        }
2196 >
2197 >        Node<K,V> find(int h, Object k) {
2198 >            // loop to avoid arbitrarily deep recursion on forwarding nodes
2199 >            outer: for (Node<K,V>[] tab = nextTable;;) {
2200 >                Node<K,V> e; int n;
2201 >                if (k == null || tab == null || (n = tab.length) == 0 ||
2202 >                    (e = tabAt(tab, (n - 1) & h)) == null)
2203 >                    return null;
2204 >                for (;;) {
2205 >                    int eh; K ek;
2206 >                    if ((eh = e.hash) == h &&
2207 >                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
2208 >                        return e;
2209 >                    if (eh < 0) {
2210 >                        if (e instanceof ForwardingNode) {
2211 >                            tab = ((ForwardingNode<K,V>)e).nextTable;
2212 >                            continue outer;
2213 >                        }
2214 >                        else
2215 >                            return e.find(h, k);
2216 >                    }
2217 >                    if ((e = e.next) == null)
2218 >                        return null;
2219 >                }
2220 >            }
2221 >        }
2222      }
2223  
2224      /**
2225 <     * Removes all of the mappings from this map.
2225 >     * A place-holder node used in computeIfAbsent and compute
2226       */
2227 <    public void clear() {
2228 <        final Segment<K,V>[] segments = this.segments;
2229 <        for (int j = 0; j < segments.length; ++j) {
2230 <            Segment<K,V> s = segmentAt(segments, j);
2231 <            if (s != null)
2232 <                s.clear();
2227 >    static final class ReservationNode<K,V> extends Node<K,V> {
2228 >        ReservationNode() {
2229 >            super(RESERVED, null, null, null);
2230 >        }
2231 >
2232 >        Node<K,V> find(int h, Object k) {
2233 >            return null;
2234          }
2235      }
2236  
2237 +    /* ---------------- Table Initialization and Resizing -------------- */
2238 +
2239      /**
2240 <     * Returns a {@link Set} view of the keys contained in this map.
2241 <     * The set is backed by the map, so changes to the map are
2242 <     * reflected in the set, and vice-versa.  The set supports element
2243 <     * removal, which removes the corresponding mapping from this map,
2244 <     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
1142 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
1143 <     * operations.  It does not support the <tt>add</tt> or
1144 <     * <tt>addAll</tt> operations.
1145 <     *
1146 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1147 <     * that will never throw {@link ConcurrentModificationException},
1148 <     * and guarantees to traverse elements as they existed upon
1149 <     * construction of the iterator, and may (but is not guaranteed to)
1150 <     * reflect any modifications subsequent to construction.
1151 <     */
1152 <    public Set<K> keySet() {
1153 <        Set<K> ks = keySet;
1154 <        return (ks != null) ? ks : (keySet = new KeySet());
2240 >     * Returns the stamp bits for resizing a table of size n.
2241 >     * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2242 >     */
2243 >    static final int resizeStamp(int n) {
2244 >        return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2245      }
2246  
2247      /**
2248 <     * Returns a {@link Collection} view of the values contained in this map.
1159 <     * The collection is backed by the map, so changes to the map are
1160 <     * reflected in the collection, and vice-versa.  The collection
1161 <     * supports element removal, which removes the corresponding
1162 <     * mapping from this map, via the <tt>Iterator.remove</tt>,
1163 <     * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
1164 <     * <tt>retainAll</tt>, and <tt>clear</tt> operations.  It does not
1165 <     * support the <tt>add</tt> or <tt>addAll</tt> operations.
1166 <     *
1167 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1168 <     * that will never throw {@link ConcurrentModificationException},
1169 <     * and guarantees to traverse elements as they existed upon
1170 <     * construction of the iterator, and may (but is not guaranteed to)
1171 <     * reflect any modifications subsequent to construction.
2248 >     * Initializes table, using the size recorded in sizeCtl.
2249       */
2250 <    public Collection<V> values() {
2251 <        Collection<V> vs = values;
2252 <        return (vs != null) ? vs : (values = new Values());
2250 >    private final Node<K,V>[] initTable() {
2251 >        Node<K,V>[] tab; int sc;
2252 >        while ((tab = table) == null || tab.length == 0) {
2253 >            if ((sc = sizeCtl) < 0)
2254 >                Thread.yield(); // lost initialization race; just spin
2255 >            else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2256 >                try {
2257 >                    if ((tab = table) == null || tab.length == 0) {
2258 >                        int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2259 >                        @SuppressWarnings("unchecked")
2260 >                        Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2261 >                        table = tab = nt;
2262 >                        sc = n - (n >>> 2);
2263 >                    }
2264 >                } finally {
2265 >                    sizeCtl = sc;
2266 >                }
2267 >                break;
2268 >            }
2269 >        }
2270 >        return tab;
2271      }
2272  
2273      /**
2274 <     * Returns a {@link Set} view of the mappings contained in this map.
2275 <     * The set is backed by the map, so changes to the map are
2276 <     * reflected in the set, and vice-versa.  The set supports element
2277 <     * removal, which removes the corresponding mapping from the map,
2278 <     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
2279 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
2280 <     * operations.  It does not support the <tt>add</tt> or
2281 <     * <tt>addAll</tt> operations.
1187 <     *
1188 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1189 <     * that will never throw {@link ConcurrentModificationException},
1190 <     * and guarantees to traverse elements as they existed upon
1191 <     * construction of the iterator, and may (but is not guaranteed to)
1192 <     * reflect any modifications subsequent to construction.
2274 >     * Adds to count, and if table is too small and not already
2275 >     * resizing, initiates transfer. If already resizing, helps
2276 >     * perform transfer if work is available.  Rechecks occupancy
2277 >     * after a transfer to see if another resize is already needed
2278 >     * because resizings are lagging additions.
2279 >     *
2280 >     * @param x the count to add
2281 >     * @param check if <0, don't check resize, if <= 1 only check if uncontended
2282       */
2283 <    public Set<Map.Entry<K,V>> entrySet() {
2284 <        Set<Map.Entry<K,V>> es = entrySet;
2285 <        return (es != null) ? es : (entrySet = new EntrySet());
2283 >    private final void addCount(long x, int check) {
2284 >        CounterCell[] as; long b, s;
2285 >        if ((as = counterCells) != null ||
2286 >            !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2287 >            CounterCell a; long v; int m;
2288 >            boolean uncontended = true;
2289 >            if (as == null || (m = as.length - 1) < 0 ||
2290 >                (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
2291 >                !(uncontended =
2292 >                  U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2293 >                fullAddCount(x, uncontended);
2294 >                return;
2295 >            }
2296 >            if (check <= 1)
2297 >                return;
2298 >            s = sumCount();
2299 >        }
2300 >        if (check >= 0) {
2301 >            Node<K,V>[] tab, nt; int n, sc;
2302 >            while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2303 >                   (n = tab.length) < MAXIMUM_CAPACITY) {
2304 >                int rs = resizeStamp(n);
2305 >                if (sc < 0) {
2306 >                    if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2307 >                        sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2308 >                        transferIndex <= 0)
2309 >                        break;
2310 >                    if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2311 >                        transfer(tab, nt);
2312 >                }
2313 >                else if (U.compareAndSwapInt(this, SIZECTL, sc,
2314 >                                             (rs << RESIZE_STAMP_SHIFT) + 2))
2315 >                    transfer(tab, null);
2316 >                s = sumCount();
2317 >            }
2318 >        }
2319      }
2320  
2321      /**
2322 <     * Returns an enumeration of the keys in this table.
1201 <     *
1202 <     * @return an enumeration of the keys in this table
1203 <     * @see #keySet()
2322 >     * Helps transfer if a resize is in progress.
2323       */
2324 <    public Enumeration<K> keys() {
2325 <        return new KeyIterator();
2324 >    final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2325 >        Node<K,V>[] nextTab; int sc;
2326 >        if (tab != null && (f instanceof ForwardingNode) &&
2327 >            (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2328 >            int rs = resizeStamp(tab.length);
2329 >            while (nextTab == nextTable && table == tab &&
2330 >                   (sc = sizeCtl) < 0) {
2331 >                if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2332 >                    sc == rs + MAX_RESIZERS || transferIndex <= 0)
2333 >                    break;
2334 >                if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
2335 >                    transfer(tab, nextTab);
2336 >                    break;
2337 >                }
2338 >            }
2339 >            return nextTab;
2340 >        }
2341 >        return table;
2342      }
2343  
2344      /**
2345 <     * Returns an enumeration of the values in this table.
2345 >     * Tries to presize table to accommodate the given number of elements.
2346       *
2347 <     * @return an enumeration of the values in this table
1213 <     * @see #values()
2347 >     * @param size number of elements (doesn't need to be perfectly accurate)
2348       */
2349 <    public Enumeration<V> elements() {
2350 <        return new ValueIterator();
2349 >    private final void tryPresize(int size) {
2350 >        int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2351 >            tableSizeFor(size + (size >>> 1) + 1);
2352 >        int sc;
2353 >        while ((sc = sizeCtl) >= 0) {
2354 >            Node<K,V>[] tab = table; int n;
2355 >            if (tab == null || (n = tab.length) == 0) {
2356 >                n = (sc > c) ? sc : c;
2357 >                if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2358 >                    try {
2359 >                        if (table == tab) {
2360 >                            @SuppressWarnings("unchecked")
2361 >                            Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2362 >                            table = nt;
2363 >                            sc = n - (n >>> 2);
2364 >                        }
2365 >                    } finally {
2366 >                        sizeCtl = sc;
2367 >                    }
2368 >                }
2369 >            }
2370 >            else if (c <= sc || n >= MAXIMUM_CAPACITY)
2371 >                break;
2372 >            else if (tab == table) {
2373 >                int rs = resizeStamp(n);
2374 >                if (U.compareAndSwapInt(this, SIZECTL, sc,
2375 >                                        (rs << RESIZE_STAMP_SHIFT) + 2))
2376 >                    transfer(tab, null);
2377 >            }
2378 >        }
2379      }
2380  
2381 <    /* ---------------- Iterator Support -------------- */
2381 >    /**
2382 >     * Moves and/or copies the nodes in each bin to new table. See
2383 >     * above for explanation.
2384 >     */
2385 >    private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2386 >        int n = tab.length, stride;
2387 >        if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2388 >            stride = MIN_TRANSFER_STRIDE; // subdivide range
2389 >        if (nextTab == null) {            // initiating
2390 >            try {
2391 >                @SuppressWarnings("unchecked")
2392 >                Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2393 >                nextTab = nt;
2394 >            } catch (Throwable ex) {      // try to cope with OOME
2395 >                sizeCtl = Integer.MAX_VALUE;
2396 >                return;
2397 >            }
2398 >            nextTable = nextTab;
2399 >            transferIndex = n;
2400 >        }
2401 >        int nextn = nextTab.length;
2402 >        ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2403 >        boolean advance = true;
2404 >        boolean finishing = false; // to ensure sweep before committing nextTab
2405 >        for (int i = 0, bound = 0;;) {
2406 >            Node<K,V> f; int fh;
2407 >            while (advance) {
2408 >                int nextIndex, nextBound;
2409 >                if (--i >= bound || finishing)
2410 >                    advance = false;
2411 >                else if ((nextIndex = transferIndex) <= 0) {
2412 >                    i = -1;
2413 >                    advance = false;
2414 >                }
2415 >                else if (U.compareAndSwapInt
2416 >                         (this, TRANSFERINDEX, nextIndex,
2417 >                          nextBound = (nextIndex > stride ?
2418 >                                       nextIndex - stride : 0))) {
2419 >                    bound = nextBound;
2420 >                    i = nextIndex - 1;
2421 >                    advance = false;
2422 >                }
2423 >            }
2424 >            if (i < 0 || i >= n || i + n >= nextn) {
2425 >                int sc;
2426 >                if (finishing) {
2427 >                    nextTable = null;
2428 >                    table = nextTab;
2429 >                    sizeCtl = (n << 1) - (n >>> 1);
2430 >                    return;
2431 >                }
2432 >                if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2433 >                    if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2434 >                        return;
2435 >                    finishing = advance = true;
2436 >                    i = n; // recheck before commit
2437 >                }
2438 >            }
2439 >            else if ((f = tabAt(tab, i)) == null)
2440 >                advance = casTabAt(tab, i, null, fwd);
2441 >            else if ((fh = f.hash) == MOVED)
2442 >                advance = true; // already processed
2443 >            else {
2444 >                synchronized (f) {
2445 >                    if (tabAt(tab, i) == f) {
2446 >                        Node<K,V> ln, hn;
2447 >                        if (fh >= 0) {
2448 >                            int runBit = fh & n;
2449 >                            Node<K,V> lastRun = f;
2450 >                            for (Node<K,V> p = f.next; p != null; p = p.next) {
2451 >                                int b = p.hash & n;
2452 >                                if (b != runBit) {
2453 >                                    runBit = b;
2454 >                                    lastRun = p;
2455 >                                }
2456 >                            }
2457 >                            if (runBit == 0) {
2458 >                                ln = lastRun;
2459 >                                hn = null;
2460 >                            }
2461 >                            else {
2462 >                                hn = lastRun;
2463 >                                ln = null;
2464 >                            }
2465 >                            for (Node<K,V> p = f; p != lastRun; p = p.next) {
2466 >                                int ph = p.hash; K pk = p.key; V pv = p.val;
2467 >                                if ((ph & n) == 0)
2468 >                                    ln = new Node<K,V>(ph, pk, pv, ln);
2469 >                                else
2470 >                                    hn = new Node<K,V>(ph, pk, pv, hn);
2471 >                            }
2472 >                            setTabAt(nextTab, i, ln);
2473 >                            setTabAt(nextTab, i + n, hn);
2474 >                            setTabAt(tab, i, fwd);
2475 >                            advance = true;
2476 >                        }
2477 >                        else if (f instanceof TreeBin) {
2478 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
2479 >                            TreeNode<K,V> lo = null, loTail = null;
2480 >                            TreeNode<K,V> hi = null, hiTail = null;
2481 >                            int lc = 0, hc = 0;
2482 >                            for (Node<K,V> e = t.first; e != null; e = e.next) {
2483 >                                int h = e.hash;
2484 >                                TreeNode<K,V> p = new TreeNode<K,V>
2485 >                                    (h, e.key, e.val, null, null);
2486 >                                if ((h & n) == 0) {
2487 >                                    if ((p.prev = loTail) == null)
2488 >                                        lo = p;
2489 >                                    else
2490 >                                        loTail.next = p;
2491 >                                    loTail = p;
2492 >                                    ++lc;
2493 >                                }
2494 >                                else {
2495 >                                    if ((p.prev = hiTail) == null)
2496 >                                        hi = p;
2497 >                                    else
2498 >                                        hiTail.next = p;
2499 >                                    hiTail = p;
2500 >                                    ++hc;
2501 >                                }
2502 >                            }
2503 >                            ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2504 >                                (hc != 0) ? new TreeBin<K,V>(lo) : t;
2505 >                            hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2506 >                                (lc != 0) ? new TreeBin<K,V>(hi) : t;
2507 >                            setTabAt(nextTab, i, ln);
2508 >                            setTabAt(nextTab, i + n, hn);
2509 >                            setTabAt(tab, i, fwd);
2510 >                            advance = true;
2511 >                        }
2512 >                    }
2513 >                }
2514 >            }
2515 >        }
2516 >    }
2517  
2518 <    abstract class HashIterator {
1222 <        int nextSegmentIndex;
1223 <        int nextTableIndex;
1224 <        HashEntry<K,V>[] currentTable;
1225 <        HashEntry<K, V> nextEntry;
1226 <        HashEntry<K, V> lastReturned;
2518 >    /* ---------------- Counter support -------------- */
2519  
2520 <        HashIterator() {
2521 <            nextSegmentIndex = segments.length - 1;
2522 <            nextTableIndex = -1;
2523 <            advance();
2520 >    /**
2521 >     * A padded cell for distributing counts.  Adapted from LongAdder
2522 >     * and Striped64.  See their internal docs for explanation.
2523 >     */
2524 >    @sun.misc.Contended static final class CounterCell {
2525 >        volatile long value;
2526 >        CounterCell(long x) { value = x; }
2527 >    }
2528 >
2529 >    final long sumCount() {
2530 >        CounterCell[] as = counterCells; CounterCell a;
2531 >        long sum = baseCount;
2532 >        if (as != null) {
2533 >            for (int i = 0; i < as.length; ++i) {
2534 >                if ((a = as[i]) != null)
2535 >                    sum += a.value;
2536 >            }
2537 >        }
2538 >        return sum;
2539 >    }
2540 >
2541 >    // See LongAdder version for explanation
2542 >    private final void fullAddCount(long x, boolean wasUncontended) {
2543 >        int h;
2544 >        if ((h = ThreadLocalRandom.getProbe()) == 0) {
2545 >            ThreadLocalRandom.localInit();      // force initialization
2546 >            h = ThreadLocalRandom.getProbe();
2547 >            wasUncontended = true;
2548 >        }
2549 >        boolean collide = false;                // True if last slot nonempty
2550 >        for (;;) {
2551 >            CounterCell[] as; CounterCell a; int n; long v;
2552 >            if ((as = counterCells) != null && (n = as.length) > 0) {
2553 >                if ((a = as[(n - 1) & h]) == null) {
2554 >                    if (cellsBusy == 0) {            // Try to attach new Cell
2555 >                        CounterCell r = new CounterCell(x); // Optimistic create
2556 >                        if (cellsBusy == 0 &&
2557 >                            U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2558 >                            boolean created = false;
2559 >                            try {               // Recheck under lock
2560 >                                CounterCell[] rs; int m, j;
2561 >                                if ((rs = counterCells) != null &&
2562 >                                    (m = rs.length) > 0 &&
2563 >                                    rs[j = (m - 1) & h] == null) {
2564 >                                    rs[j] = r;
2565 >                                    created = true;
2566 >                                }
2567 >                            } finally {
2568 >                                cellsBusy = 0;
2569 >                            }
2570 >                            if (created)
2571 >                                break;
2572 >                            continue;           // Slot is now non-empty
2573 >                        }
2574 >                    }
2575 >                    collide = false;
2576 >                }
2577 >                else if (!wasUncontended)       // CAS already known to fail
2578 >                    wasUncontended = true;      // Continue after rehash
2579 >                else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2580 >                    break;
2581 >                else if (counterCells != as || n >= NCPU)
2582 >                    collide = false;            // At max size or stale
2583 >                else if (!collide)
2584 >                    collide = true;
2585 >                else if (cellsBusy == 0 &&
2586 >                         U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2587 >                    try {
2588 >                        if (counterCells == as) {// Expand table unless stale
2589 >                            CounterCell[] rs = new CounterCell[n << 1];
2590 >                            for (int i = 0; i < n; ++i)
2591 >                                rs[i] = as[i];
2592 >                            counterCells = rs;
2593 >                        }
2594 >                    } finally {
2595 >                        cellsBusy = 0;
2596 >                    }
2597 >                    collide = false;
2598 >                    continue;                   // Retry with expanded table
2599 >                }
2600 >                h = ThreadLocalRandom.advanceProbe(h);
2601 >            }
2602 >            else if (cellsBusy == 0 && counterCells == as &&
2603 >                     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2604 >                boolean init = false;
2605 >                try {                           // Initialize table
2606 >                    if (counterCells == as) {
2607 >                        CounterCell[] rs = new CounterCell[2];
2608 >                        rs[h & 1] = new CounterCell(x);
2609 >                        counterCells = rs;
2610 >                        init = true;
2611 >                    }
2612 >                } finally {
2613 >                    cellsBusy = 0;
2614 >                }
2615 >                if (init)
2616 >                    break;
2617 >            }
2618 >            else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2619 >                break;                          // Fall back on using base
2620 >        }
2621 >    }
2622 >
2623 >    /* ---------------- Conversion from/to TreeBins -------------- */
2624 >
2625 >    /**
2626 >     * Replaces all linked nodes in bin at given index unless table is
2627 >     * too small, in which case resizes instead.
2628 >     */
2629 >    private final void treeifyBin(Node<K,V>[] tab, int index) {
2630 >        Node<K,V> b; int n;
2631 >        if (tab != null) {
2632 >            if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2633 >                tryPresize(n << 1);
2634 >            else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2635 >                synchronized (b) {
2636 >                    if (tabAt(tab, index) == b) {
2637 >                        TreeNode<K,V> hd = null, tl = null;
2638 >                        for (Node<K,V> e = b; e != null; e = e.next) {
2639 >                            TreeNode<K,V> p =
2640 >                                new TreeNode<K,V>(e.hash, e.key, e.val,
2641 >                                                  null, null);
2642 >                            if ((p.prev = tl) == null)
2643 >                                hd = p;
2644 >                            else
2645 >                                tl.next = p;
2646 >                            tl = p;
2647 >                        }
2648 >                        setTabAt(tab, index, new TreeBin<K,V>(hd));
2649 >                    }
2650 >                }
2651 >            }
2652 >        }
2653 >    }
2654 >
2655 >    /**
2656 >     * Returns a list on non-TreeNodes replacing those in given list.
2657 >     */
2658 >    static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2659 >        Node<K,V> hd = null, tl = null;
2660 >        for (Node<K,V> q = b; q != null; q = q.next) {
2661 >            Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2662 >            if (tl == null)
2663 >                hd = p;
2664 >            else
2665 >                tl.next = p;
2666 >            tl = p;
2667 >        }
2668 >        return hd;
2669 >    }
2670 >
2671 >    /* ---------------- TreeNodes -------------- */
2672 >
2673 >    /**
2674 >     * Nodes for use in TreeBins
2675 >     */
2676 >    static final class TreeNode<K,V> extends Node<K,V> {
2677 >        TreeNode<K,V> parent;  // red-black tree links
2678 >        TreeNode<K,V> left;
2679 >        TreeNode<K,V> right;
2680 >        TreeNode<K,V> prev;    // needed to unlink next upon deletion
2681 >        boolean red;
2682 >
2683 >        TreeNode(int hash, K key, V val, Node<K,V> next,
2684 >                 TreeNode<K,V> parent) {
2685 >            super(hash, key, val, next);
2686 >            this.parent = parent;
2687 >        }
2688 >
2689 >        Node<K,V> find(int h, Object k) {
2690 >            return findTreeNode(h, k, null);
2691          }
2692  
2693          /**
2694 <         * Sets nextEntry to first node of next non-empty table
2695 <         * (in backwards order, to simplify checks).
2694 >         * Returns the TreeNode (or null if not found) for the given key
2695 >         * starting at given root.
2696           */
2697 <        final void advance() {
2698 <            for (;;) {
2699 <                if (nextTableIndex >= 0) {
2700 <                    if ((nextEntry = entryAt(currentTable,
2701 <                                             nextTableIndex--)) != null)
2702 <                        break;
2697 >        final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2698 >            if (k != null) {
2699 >                TreeNode<K,V> p = this;
2700 >                do {
2701 >                    int ph, dir; K pk; TreeNode<K,V> q;
2702 >                    TreeNode<K,V> pl = p.left, pr = p.right;
2703 >                    if ((ph = p.hash) > h)
2704 >                        p = pl;
2705 >                    else if (ph < h)
2706 >                        p = pr;
2707 >                    else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2708 >                        return p;
2709 >                    else if (pl == null)
2710 >                        p = pr;
2711 >                    else if (pr == null)
2712 >                        p = pl;
2713 >                    else if ((kc != null ||
2714 >                              (kc = comparableClassFor(k)) != null) &&
2715 >                             (dir = compareComparables(kc, k, pk)) != 0)
2716 >                        p = (dir < 0) ? pl : pr;
2717 >                    else if ((q = pr.findTreeNode(h, k, kc)) != null)
2718 >                        return q;
2719 >                    else
2720 >                        p = pl;
2721 >                } while (p != null);
2722 >            }
2723 >            return null;
2724 >        }
2725 >    }
2726 >
2727 >    /* ---------------- TreeBins -------------- */
2728 >
2729 >    /**
2730 >     * TreeNodes used at the heads of bins. TreeBins do not hold user
2731 >     * keys or values, but instead point to list of TreeNodes and
2732 >     * their root. They also maintain a parasitic read-write lock
2733 >     * forcing writers (who hold bin lock) to wait for readers (who do
2734 >     * not) to complete before tree restructuring operations.
2735 >     */
2736 >    static final class TreeBin<K,V> extends Node<K,V> {
2737 >        TreeNode<K,V> root;
2738 >        volatile TreeNode<K,V> first;
2739 >        volatile Thread waiter;
2740 >        volatile int lockState;
2741 >        // values for lockState
2742 >        static final int WRITER = 1; // set while holding write lock
2743 >        static final int WAITER = 2; // set when waiting for write lock
2744 >        static final int READER = 4; // increment value for setting read lock
2745 >
2746 >        /**
2747 >         * Tie-breaking utility for ordering insertions when equal
2748 >         * hashCodes and non-comparable. We don't require a total
2749 >         * order, just a consistent insertion rule to maintain
2750 >         * equivalence across rebalancings. Tie-breaking further than
2751 >         * necessary simplifies testing a bit.
2752 >         */
2753 >        static int tieBreakOrder(Object a, Object b) {
2754 >            int d;
2755 >            if (a == null || b == null ||
2756 >                (d = a.getClass().getName().
2757 >                 compareTo(b.getClass().getName())) == 0)
2758 >                d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2759 >                     -1 : 1);
2760 >            return d;
2761 >        }
2762 >
2763 >        /**
2764 >         * Creates bin with initial set of nodes headed by b.
2765 >         */
2766 >        TreeBin(TreeNode<K,V> b) {
2767 >            super(TREEBIN, null, null, null);
2768 >            this.first = b;
2769 >            TreeNode<K,V> r = null;
2770 >            for (TreeNode<K,V> x = b, next; x != null; x = next) {
2771 >                next = (TreeNode<K,V>)x.next;
2772 >                x.left = x.right = null;
2773 >                if (r == null) {
2774 >                    x.parent = null;
2775 >                    x.red = false;
2776 >                    r = x;
2777                  }
2778 <                else if (nextSegmentIndex >= 0) {
2779 <                    Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
2780 <                    if (seg != null && (currentTable = seg.table) != null)
2781 <                        nextTableIndex = currentTable.length - 1;
2778 >                else {
2779 >                    K k = x.key;
2780 >                    int h = x.hash;
2781 >                    Class<?> kc = null;
2782 >                    for (TreeNode<K,V> p = r;;) {
2783 >                        int dir, ph;
2784 >                        K pk = p.key;
2785 >                        if ((ph = p.hash) > h)
2786 >                            dir = -1;
2787 >                        else if (ph < h)
2788 >                            dir = 1;
2789 >                        else if ((kc == null &&
2790 >                                  (kc = comparableClassFor(k)) == null) ||
2791 >                                 (dir = compareComparables(kc, k, pk)) == 0)
2792 >                            dir = tieBreakOrder(k, pk);
2793 >                        TreeNode<K,V> xp = p;
2794 >                        if ((p = (dir <= 0) ? p.left : p.right) == null) {
2795 >                            x.parent = xp;
2796 >                            if (dir <= 0)
2797 >                                xp.left = x;
2798 >                            else
2799 >                                xp.right = x;
2800 >                            r = balanceInsertion(r, x);
2801 >                            break;
2802 >                        }
2803 >                    }
2804                  }
2805 <                else
2805 >            }
2806 >            this.root = r;
2807 >            assert checkInvariants(root);
2808 >        }
2809 >
2810 >        /**
2811 >         * Acquires write lock for tree restructuring.
2812 >         */
2813 >        private final void lockRoot() {
2814 >            if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2815 >                contendedLock(); // offload to separate method
2816 >        }
2817 >
2818 >        /**
2819 >         * Releases write lock for tree restructuring.
2820 >         */
2821 >        private final void unlockRoot() {
2822 >            lockState = 0;
2823 >        }
2824 >
2825 >        /**
2826 >         * Possibly blocks awaiting root lock.
2827 >         */
2828 >        private final void contendedLock() {
2829 >            boolean waiting = false;
2830 >            for (int s;;) {
2831 >                if (((s = lockState) & ~WAITER) == 0) {
2832 >                    if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2833 >                        if (waiting)
2834 >                            waiter = null;
2835 >                        return;
2836 >                    }
2837 >                }
2838 >                else if ((s & WAITER) == 0) {
2839 >                    if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2840 >                        waiting = true;
2841 >                        waiter = Thread.currentThread();
2842 >                    }
2843 >                }
2844 >                else if (waiting)
2845 >                    LockSupport.park(this);
2846 >            }
2847 >        }
2848 >
2849 >        /**
2850 >         * Returns matching node or null if none. Tries to search
2851 >         * using tree comparisons from root, but continues linear
2852 >         * search when lock not available.
2853 >         */
2854 >        final Node<K,V> find(int h, Object k) {
2855 >            if (k != null) {
2856 >                for (Node<K,V> e = first; e != null; ) {
2857 >                    int s; K ek;
2858 >                    if (((s = lockState) & (WAITER|WRITER)) != 0) {
2859 >                        if (e.hash == h &&
2860 >                            ((ek = e.key) == k || (ek != null && k.equals(ek))))
2861 >                            return e;
2862 >                        e = e.next;
2863 >                    }
2864 >                    else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2865 >                                                 s + READER)) {
2866 >                        TreeNode<K,V> r, p;
2867 >                        try {
2868 >                            p = ((r = root) == null ? null :
2869 >                                 r.findTreeNode(h, k, null));
2870 >                        } finally {
2871 >                            Thread w;
2872 >                            if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2873 >                                (READER|WAITER) && (w = waiter) != null)
2874 >                                LockSupport.unpark(w);
2875 >                        }
2876 >                        return p;
2877 >                    }
2878 >                }
2879 >            }
2880 >            return null;
2881 >        }
2882 >
2883 >        /**
2884 >         * Finds or adds a node.
2885 >         * @return null if added
2886 >         */
2887 >        final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2888 >            Class<?> kc = null;
2889 >            boolean searched = false;
2890 >            for (TreeNode<K,V> p = root;;) {
2891 >                int dir, ph; K pk;
2892 >                if (p == null) {
2893 >                    first = root = new TreeNode<K,V>(h, k, v, null, null);
2894                      break;
2895 +                }
2896 +                else if ((ph = p.hash) > h)
2897 +                    dir = -1;
2898 +                else if (ph < h)
2899 +                    dir = 1;
2900 +                else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2901 +                    return p;
2902 +                else if ((kc == null &&
2903 +                          (kc = comparableClassFor(k)) == null) ||
2904 +                         (dir = compareComparables(kc, k, pk)) == 0) {
2905 +                    if (!searched) {
2906 +                        TreeNode<K,V> q, ch;
2907 +                        searched = true;
2908 +                        if (((ch = p.left) != null &&
2909 +                             (q = ch.findTreeNode(h, k, kc)) != null) ||
2910 +                            ((ch = p.right) != null &&
2911 +                             (q = ch.findTreeNode(h, k, kc)) != null))
2912 +                            return q;
2913 +                    }
2914 +                    dir = tieBreakOrder(k, pk);
2915 +                }
2916 +
2917 +                TreeNode<K,V> xp = p;
2918 +                if ((p = (dir <= 0) ? p.left : p.right) == null) {
2919 +                    TreeNode<K,V> x, f = first;
2920 +                    first = x = new TreeNode<K,V>(h, k, v, f, xp);
2921 +                    if (f != null)
2922 +                        f.prev = x;
2923 +                    if (dir <= 0)
2924 +                        xp.left = x;
2925 +                    else
2926 +                        xp.right = x;
2927 +                    if (!xp.red)
2928 +                        x.red = true;
2929 +                    else {
2930 +                        lockRoot();
2931 +                        try {
2932 +                            root = balanceInsertion(root, x);
2933 +                        } finally {
2934 +                            unlockRoot();
2935 +                        }
2936 +                    }
2937 +                    break;
2938 +                }
2939              }
2940 +            assert checkInvariants(root);
2941 +            return null;
2942          }
2943  
2944 <        final HashEntry<K,V> nextEntry() {
2945 <            HashEntry<K,V> e = nextEntry;
2946 <            if (e == null)
2947 <                throw new NoSuchElementException();
2948 <            lastReturned = e; // cannot assign until after null check
2949 <            if ((nextEntry = e.next) == null)
2950 <                advance();
2951 <            return e;
2944 >        /**
2945 >         * Removes the given node, that must be present before this
2946 >         * call.  This is messier than typical red-black deletion code
2947 >         * because we cannot swap the contents of an interior node
2948 >         * with a leaf successor that is pinned by "next" pointers
2949 >         * that are accessible independently of lock. So instead we
2950 >         * swap the tree linkages.
2951 >         *
2952 >         * @return true if now too small, so should be untreeified
2953 >         */
2954 >        final boolean removeTreeNode(TreeNode<K,V> p) {
2955 >            TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2956 >            TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
2957 >            TreeNode<K,V> r, rl;
2958 >            if (pred == null)
2959 >                first = next;
2960 >            else
2961 >                pred.next = next;
2962 >            if (next != null)
2963 >                next.prev = pred;
2964 >            if (first == null) {
2965 >                root = null;
2966 >                return true;
2967 >            }
2968 >            if ((r = root) == null || r.right == null || // too small
2969 >                (rl = r.left) == null || rl.left == null)
2970 >                return true;
2971 >            lockRoot();
2972 >            try {
2973 >                TreeNode<K,V> replacement;
2974 >                TreeNode<K,V> pl = p.left;
2975 >                TreeNode<K,V> pr = p.right;
2976 >                if (pl != null && pr != null) {
2977 >                    TreeNode<K,V> s = pr, sl;
2978 >                    while ((sl = s.left) != null) // find successor
2979 >                        s = sl;
2980 >                    boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2981 >                    TreeNode<K,V> sr = s.right;
2982 >                    TreeNode<K,V> pp = p.parent;
2983 >                    if (s == pr) { // p was s's direct parent
2984 >                        p.parent = s;
2985 >                        s.right = p;
2986 >                    }
2987 >                    else {
2988 >                        TreeNode<K,V> sp = s.parent;
2989 >                        if ((p.parent = sp) != null) {
2990 >                            if (s == sp.left)
2991 >                                sp.left = p;
2992 >                            else
2993 >                                sp.right = p;
2994 >                        }
2995 >                        if ((s.right = pr) != null)
2996 >                            pr.parent = s;
2997 >                    }
2998 >                    p.left = null;
2999 >                    if ((p.right = sr) != null)
3000 >                        sr.parent = p;
3001 >                    if ((s.left = pl) != null)
3002 >                        pl.parent = s;
3003 >                    if ((s.parent = pp) == null)
3004 >                        r = s;
3005 >                    else if (p == pp.left)
3006 >                        pp.left = s;
3007 >                    else
3008 >                        pp.right = s;
3009 >                    if (sr != null)
3010 >                        replacement = sr;
3011 >                    else
3012 >                        replacement = p;
3013 >                }
3014 >                else if (pl != null)
3015 >                    replacement = pl;
3016 >                else if (pr != null)
3017 >                    replacement = pr;
3018 >                else
3019 >                    replacement = p;
3020 >                if (replacement != p) {
3021 >                    TreeNode<K,V> pp = replacement.parent = p.parent;
3022 >                    if (pp == null)
3023 >                        r = replacement;
3024 >                    else if (p == pp.left)
3025 >                        pp.left = replacement;
3026 >                    else
3027 >                        pp.right = replacement;
3028 >                    p.left = p.right = p.parent = null;
3029 >                }
3030 >
3031 >                root = (p.red) ? r : balanceDeletion(r, replacement);
3032 >
3033 >                if (p == replacement) {  // detach pointers
3034 >                    TreeNode<K,V> pp;
3035 >                    if ((pp = p.parent) != null) {
3036 >                        if (p == pp.left)
3037 >                            pp.left = null;
3038 >                        else if (p == pp.right)
3039 >                            pp.right = null;
3040 >                        p.parent = null;
3041 >                    }
3042 >                }
3043 >            } finally {
3044 >                unlockRoot();
3045 >            }
3046 >            assert checkInvariants(root);
3047 >            return false;
3048 >        }
3049 >
3050 >        /* ------------------------------------------------------------ */
3051 >        // Red-black tree methods, all adapted from CLR
3052 >
3053 >        static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3054 >                                              TreeNode<K,V> p) {
3055 >            TreeNode<K,V> r, pp, rl;
3056 >            if (p != null && (r = p.right) != null) {
3057 >                if ((rl = p.right = r.left) != null)
3058 >                    rl.parent = p;
3059 >                if ((pp = r.parent = p.parent) == null)
3060 >                    (root = r).red = false;
3061 >                else if (pp.left == p)
3062 >                    pp.left = r;
3063 >                else
3064 >                    pp.right = r;
3065 >                r.left = p;
3066 >                p.parent = r;
3067 >            }
3068 >            return root;
3069          }
3070  
3071 <        public final boolean hasNext() { return nextEntry != null; }
3072 <        public final boolean hasMoreElements() { return nextEntry != null; }
3071 >        static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3072 >                                               TreeNode<K,V> p) {
3073 >            TreeNode<K,V> l, pp, lr;
3074 >            if (p != null && (l = p.left) != null) {
3075 >                if ((lr = p.left = l.right) != null)
3076 >                    lr.parent = p;
3077 >                if ((pp = l.parent = p.parent) == null)
3078 >                    (root = l).red = false;
3079 >                else if (pp.right == p)
3080 >                    pp.right = l;
3081 >                else
3082 >                    pp.left = l;
3083 >                l.right = p;
3084 >                p.parent = l;
3085 >            }
3086 >            return root;
3087 >        }
3088 >
3089 >        static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3090 >                                                    TreeNode<K,V> x) {
3091 >            x.red = true;
3092 >            for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3093 >                if ((xp = x.parent) == null) {
3094 >                    x.red = false;
3095 >                    return x;
3096 >                }
3097 >                else if (!xp.red || (xpp = xp.parent) == null)
3098 >                    return root;
3099 >                if (xp == (xppl = xpp.left)) {
3100 >                    if ((xppr = xpp.right) != null && xppr.red) {
3101 >                        xppr.red = false;
3102 >                        xp.red = false;
3103 >                        xpp.red = true;
3104 >                        x = xpp;
3105 >                    }
3106 >                    else {
3107 >                        if (x == xp.right) {
3108 >                            root = rotateLeft(root, x = xp);
3109 >                            xpp = (xp = x.parent) == null ? null : xp.parent;
3110 >                        }
3111 >                        if (xp != null) {
3112 >                            xp.red = false;
3113 >                            if (xpp != null) {
3114 >                                xpp.red = true;
3115 >                                root = rotateRight(root, xpp);
3116 >                            }
3117 >                        }
3118 >                    }
3119 >                }
3120 >                else {
3121 >                    if (xppl != null && xppl.red) {
3122 >                        xppl.red = false;
3123 >                        xp.red = false;
3124 >                        xpp.red = true;
3125 >                        x = xpp;
3126 >                    }
3127 >                    else {
3128 >                        if (x == xp.left) {
3129 >                            root = rotateRight(root, x = xp);
3130 >                            xpp = (xp = x.parent) == null ? null : xp.parent;
3131 >                        }
3132 >                        if (xp != null) {
3133 >                            xp.red = false;
3134 >                            if (xpp != null) {
3135 >                                xpp.red = true;
3136 >                                root = rotateLeft(root, xpp);
3137 >                            }
3138 >                        }
3139 >                    }
3140 >                }
3141 >            }
3142 >        }
3143 >
3144 >        static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3145 >                                                   TreeNode<K,V> x) {
3146 >            for (TreeNode<K,V> xp, xpl, xpr;;) {
3147 >                if (x == null || x == root)
3148 >                    return root;
3149 >                else if ((xp = x.parent) == null) {
3150 >                    x.red = false;
3151 >                    return x;
3152 >                }
3153 >                else if (x.red) {
3154 >                    x.red = false;
3155 >                    return root;
3156 >                }
3157 >                else if ((xpl = xp.left) == x) {
3158 >                    if ((xpr = xp.right) != null && xpr.red) {
3159 >                        xpr.red = false;
3160 >                        xp.red = true;
3161 >                        root = rotateLeft(root, xp);
3162 >                        xpr = (xp = x.parent) == null ? null : xp.right;
3163 >                    }
3164 >                    if (xpr == null)
3165 >                        x = xp;
3166 >                    else {
3167 >                        TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3168 >                        if ((sr == null || !sr.red) &&
3169 >                            (sl == null || !sl.red)) {
3170 >                            xpr.red = true;
3171 >                            x = xp;
3172 >                        }
3173 >                        else {
3174 >                            if (sr == null || !sr.red) {
3175 >                                if (sl != null)
3176 >                                    sl.red = false;
3177 >                                xpr.red = true;
3178 >                                root = rotateRight(root, xpr);
3179 >                                xpr = (xp = x.parent) == null ?
3180 >                                    null : xp.right;
3181 >                            }
3182 >                            if (xpr != null) {
3183 >                                xpr.red = (xp == null) ? false : xp.red;
3184 >                                if ((sr = xpr.right) != null)
3185 >                                    sr.red = false;
3186 >                            }
3187 >                            if (xp != null) {
3188 >                                xp.red = false;
3189 >                                root = rotateLeft(root, xp);
3190 >                            }
3191 >                            x = root;
3192 >                        }
3193 >                    }
3194 >                }
3195 >                else { // symmetric
3196 >                    if (xpl != null && xpl.red) {
3197 >                        xpl.red = false;
3198 >                        xp.red = true;
3199 >                        root = rotateRight(root, xp);
3200 >                        xpl = (xp = x.parent) == null ? null : xp.left;
3201 >                    }
3202 >                    if (xpl == null)
3203 >                        x = xp;
3204 >                    else {
3205 >                        TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3206 >                        if ((sl == null || !sl.red) &&
3207 >                            (sr == null || !sr.red)) {
3208 >                            xpl.red = true;
3209 >                            x = xp;
3210 >                        }
3211 >                        else {
3212 >                            if (sl == null || !sl.red) {
3213 >                                if (sr != null)
3214 >                                    sr.red = false;
3215 >                                xpl.red = true;
3216 >                                root = rotateLeft(root, xpl);
3217 >                                xpl = (xp = x.parent) == null ?
3218 >                                    null : xp.left;
3219 >                            }
3220 >                            if (xpl != null) {
3221 >                                xpl.red = (xp == null) ? false : xp.red;
3222 >                                if ((sl = xpl.left) != null)
3223 >                                    sl.red = false;
3224 >                            }
3225 >                            if (xp != null) {
3226 >                                xp.red = false;
3227 >                                root = rotateRight(root, xp);
3228 >                            }
3229 >                            x = root;
3230 >                        }
3231 >                    }
3232 >                }
3233 >            }
3234 >        }
3235 >
3236 >        /**
3237 >         * Recursive invariant check
3238 >         */
3239 >        static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3240 >            TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3241 >                tb = t.prev, tn = (TreeNode<K,V>)t.next;
3242 >            if (tb != null && tb.next != t)
3243 >                return false;
3244 >            if (tn != null && tn.prev != t)
3245 >                return false;
3246 >            if (tp != null && t != tp.left && t != tp.right)
3247 >                return false;
3248 >            if (tl != null && (tl.parent != t || tl.hash > t.hash))
3249 >                return false;
3250 >            if (tr != null && (tr.parent != t || tr.hash < t.hash))
3251 >                return false;
3252 >            if (t.red && tl != null && tl.red && tr != null && tr.red)
3253 >                return false;
3254 >            if (tl != null && !checkInvariants(tl))
3255 >                return false;
3256 >            if (tr != null && !checkInvariants(tr))
3257 >                return false;
3258 >            return true;
3259 >        }
3260 >
3261 >        private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
3262 >        private static final long LOCKSTATE;
3263 >        static {
3264 >            try {
3265 >                LOCKSTATE = U.objectFieldOffset
3266 >                    (TreeBin.class.getDeclaredField("lockState"));
3267 >            } catch (ReflectiveOperationException e) {
3268 >                throw new Error(e);
3269 >            }
3270 >        }
3271 >    }
3272 >
3273 >    /* ----------------Table Traversal -------------- */
3274 >
3275 >    /**
3276 >     * Records the table, its length, and current traversal index for a
3277 >     * traverser that must process a region of a forwarded table before
3278 >     * proceeding with current table.
3279 >     */
3280 >    static final class TableStack<K,V> {
3281 >        int length;
3282 >        int index;
3283 >        Node<K,V>[] tab;
3284 >        TableStack<K,V> next;
3285 >    }
3286 >
3287 >    /**
3288 >     * Encapsulates traversal for methods such as containsValue; also
3289 >     * serves as a base class for other iterators and spliterators.
3290 >     *
3291 >     * Method advance visits once each still-valid node that was
3292 >     * reachable upon iterator construction. It might miss some that
3293 >     * were added to a bin after the bin was visited, which is OK wrt
3294 >     * consistency guarantees. Maintaining this property in the face
3295 >     * of possible ongoing resizes requires a fair amount of
3296 >     * bookkeeping state that is difficult to optimize away amidst
3297 >     * volatile accesses.  Even so, traversal maintains reasonable
3298 >     * throughput.
3299 >     *
3300 >     * Normally, iteration proceeds bin-by-bin traversing lists.
3301 >     * However, if the table has been resized, then all future steps
3302 >     * must traverse both the bin at the current index as well as at
3303 >     * (index + baseSize); and so on for further resizings. To
3304 >     * paranoically cope with potential sharing by users of iterators
3305 >     * across threads, iteration terminates if a bounds checks fails
3306 >     * for a table read.
3307 >     */
3308 >    static class Traverser<K,V> {
3309 >        Node<K,V>[] tab;        // current table; updated if resized
3310 >        Node<K,V> next;         // the next entry to use
3311 >        TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3312 >        int index;              // index of bin to use next
3313 >        int baseIndex;          // current index of initial table
3314 >        int baseLimit;          // index bound for initial table
3315 >        final int baseSize;     // initial table size
3316 >
3317 >        Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3318 >            this.tab = tab;
3319 >            this.baseSize = size;
3320 >            this.baseIndex = this.index = index;
3321 >            this.baseLimit = limit;
3322 >            this.next = null;
3323 >        }
3324 >
3325 >        /**
3326 >         * Advances if possible, returning next valid node, or null if none.
3327 >         */
3328 >        final Node<K,V> advance() {
3329 >            Node<K,V> e;
3330 >            if ((e = next) != null)
3331 >                e = e.next;
3332 >            for (;;) {
3333 >                Node<K,V>[] t; int i, n;  // must use locals in checks
3334 >                if (e != null)
3335 >                    return next = e;
3336 >                if (baseIndex >= baseLimit || (t = tab) == null ||
3337 >                    (n = t.length) <= (i = index) || i < 0)
3338 >                    return next = null;
3339 >                if ((e = tabAt(t, i)) != null && e.hash < 0) {
3340 >                    if (e instanceof ForwardingNode) {
3341 >                        tab = ((ForwardingNode<K,V>)e).nextTable;
3342 >                        e = null;
3343 >                        pushState(t, i, n);
3344 >                        continue;
3345 >                    }
3346 >                    else if (e instanceof TreeBin)
3347 >                        e = ((TreeBin<K,V>)e).first;
3348 >                    else
3349 >                        e = null;
3350 >                }
3351 >                if (stack != null)
3352 >                    recoverState(n);
3353 >                else if ((index = i + baseSize) >= n)
3354 >                    index = ++baseIndex; // visit upper slots if present
3355 >            }
3356 >        }
3357 >
3358 >        /**
3359 >         * Saves traversal state upon encountering a forwarding node.
3360 >         */
3361 >        private void pushState(Node<K,V>[] t, int i, int n) {
3362 >            TableStack<K,V> s = spare;  // reuse if possible
3363 >            if (s != null)
3364 >                spare = s.next;
3365 >            else
3366 >                s = new TableStack<K,V>();
3367 >            s.tab = t;
3368 >            s.length = n;
3369 >            s.index = i;
3370 >            s.next = stack;
3371 >            stack = s;
3372 >        }
3373 >
3374 >        /**
3375 >         * Possibly pops traversal state.
3376 >         *
3377 >         * @param n length of current table
3378 >         */
3379 >        private void recoverState(int n) {
3380 >            TableStack<K,V> s; int len;
3381 >            while ((s = stack) != null && (index += (len = s.length)) >= n) {
3382 >                n = len;
3383 >                index = s.index;
3384 >                tab = s.tab;
3385 >                s.tab = null;
3386 >                TableStack<K,V> next = s.next;
3387 >                s.next = spare; // save for reuse
3388 >                stack = next;
3389 >                spare = s;
3390 >            }
3391 >            if (s == null && (index += baseSize) >= n)
3392 >                index = ++baseIndex;
3393 >        }
3394 >    }
3395 >
3396 >    /**
3397 >     * Base of key, value, and entry Iterators. Adds fields to
3398 >     * Traverser to support iterator.remove.
3399 >     */
3400 >    static class BaseIterator<K,V> extends Traverser<K,V> {
3401 >        final ConcurrentHashMap<K,V> map;
3402 >        Node<K,V> lastReturned;
3403 >        BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3404 >                    ConcurrentHashMap<K,V> map) {
3405 >            super(tab, size, index, limit);
3406 >            this.map = map;
3407 >            advance();
3408 >        }
3409 >
3410 >        public final boolean hasNext() { return next != null; }
3411 >        public final boolean hasMoreElements() { return next != null; }
3412  
3413          public final void remove() {
3414 <            if (lastReturned == null)
3414 >            Node<K,V> p;
3415 >            if ((p = lastReturned) == null)
3416                  throw new IllegalStateException();
1271            ConcurrentHashMap.this.remove(lastReturned.key);
3417              lastReturned = null;
3418 +            map.replaceNode(p.key, null, null);
3419 +        }
3420 +    }
3421 +
3422 +    static final class KeyIterator<K,V> extends BaseIterator<K,V>
3423 +        implements Iterator<K>, Enumeration<K> {
3424 +        KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3425 +                    ConcurrentHashMap<K,V> map) {
3426 +            super(tab, index, size, limit, map);
3427 +        }
3428 +
3429 +        public final K next() {
3430 +            Node<K,V> p;
3431 +            if ((p = next) == null)
3432 +                throw new NoSuchElementException();
3433 +            K k = p.key;
3434 +            lastReturned = p;
3435 +            advance();
3436 +            return k;
3437 +        }
3438 +
3439 +        public final K nextElement() { return next(); }
3440 +    }
3441 +
3442 +    static final class ValueIterator<K,V> extends BaseIterator<K,V>
3443 +        implements Iterator<V>, Enumeration<V> {
3444 +        ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3445 +                      ConcurrentHashMap<K,V> map) {
3446 +            super(tab, index, size, limit, map);
3447          }
3448 +
3449 +        public final V next() {
3450 +            Node<K,V> p;
3451 +            if ((p = next) == null)
3452 +                throw new NoSuchElementException();
3453 +            V v = p.val;
3454 +            lastReturned = p;
3455 +            advance();
3456 +            return v;
3457 +        }
3458 +
3459 +        public final V nextElement() { return next(); }
3460      }
3461  
3462 <    final class KeyIterator
3463 <        extends HashIterator
3464 <        implements Iterator<K>, Enumeration<K>
3465 <    {
3466 <        public final K next()        { return super.nextEntry().key; }
3467 <        public final K nextElement() { return super.nextEntry().key; }
3468 <    }
3469 <
3470 <    final class ValueIterator
3471 <        extends HashIterator
3472 <        implements Iterator<V>, Enumeration<V>
3473 <    {
3474 <        public final V next()        { return super.nextEntry().value; }
3475 <        public final V nextElement() { return super.nextEntry().value; }
3462 >    static final class EntryIterator<K,V> extends BaseIterator<K,V>
3463 >        implements Iterator<Map.Entry<K,V>> {
3464 >        EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3465 >                      ConcurrentHashMap<K,V> map) {
3466 >            super(tab, index, size, limit, map);
3467 >        }
3468 >
3469 >        public final Map.Entry<K,V> next() {
3470 >            Node<K,V> p;
3471 >            if ((p = next) == null)
3472 >                throw new NoSuchElementException();
3473 >            K k = p.key;
3474 >            V v = p.val;
3475 >            lastReturned = p;
3476 >            advance();
3477 >            return new MapEntry<K,V>(k, v, map);
3478 >        }
3479      }
3480  
3481      /**
3482 <     * Custom Entry class used by EntryIterator.next(), that relays
3483 <     * setValue changes to the underlying map.
3484 <     */
3485 <    final class WriteThroughEntry
3486 <        extends AbstractMap.SimpleEntry<K,V>
3487 <    {
3488 <        WriteThroughEntry(K k, V v) {
3489 <            super(k,v);
3482 >     * Exported Entry for EntryIterator
3483 >     */
3484 >    static final class MapEntry<K,V> implements Map.Entry<K,V> {
3485 >        final K key; // non-null
3486 >        V val;       // non-null
3487 >        final ConcurrentHashMap<K,V> map;
3488 >        MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3489 >            this.key = key;
3490 >            this.val = val;
3491 >            this.map = map;
3492 >        }
3493 >        public K getKey()        { return key; }
3494 >        public V getValue()      { return val; }
3495 >        public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
3496 >        public String toString() {
3497 >            return Helpers.mapEntryToString(key, val);
3498 >        }
3499 >
3500 >        public boolean equals(Object o) {
3501 >            Object k, v; Map.Entry<?,?> e;
3502 >            return ((o instanceof Map.Entry) &&
3503 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3504 >                    (v = e.getValue()) != null &&
3505 >                    (k == key || k.equals(key)) &&
3506 >                    (v == val || v.equals(val)));
3507          }
3508  
3509          /**
3510           * Sets our entry's value and writes through to the map. The
3511 <         * value to return is somewhat arbitrary here. Since a
3512 <         * WriteThroughEntry does not necessarily track asynchronous
3513 <         * changes, the most recent "previous" value could be
3514 <         * different from what we return (or could even have been
3515 <         * removed in which case the put will re-establish). We do not
1310 <         * and cannot guarantee more.
3511 >         * value to return is somewhat arbitrary here. Since we do not
3512 >         * necessarily track asynchronous changes, the most recent
3513 >         * "previous" value could be different from what we return (or
3514 >         * could even have been removed, in which case the put will
3515 >         * re-establish). We do not and cannot guarantee more.
3516           */
3517          public V setValue(V value) {
3518              if (value == null) throw new NullPointerException();
3519 <            V v = super.setValue(value);
3520 <            ConcurrentHashMap.this.put(getKey(), value);
3519 >            V v = val;
3520 >            val = value;
3521 >            map.put(key, value);
3522              return v;
3523          }
3524      }
3525  
3526 <    final class EntryIterator
3527 <        extends HashIterator
3528 <        implements Iterator<Entry<K,V>>
3529 <    {
3530 <        public Map.Entry<K,V> next() {
3531 <            HashEntry<K,V> e = super.nextEntry();
3532 <            return new WriteThroughEntry(e.key, e.value);
3526 >    static final class KeySpliterator<K,V> extends Traverser<K,V>
3527 >        implements Spliterator<K> {
3528 >        long est;               // size estimate
3529 >        KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3530 >                       long est) {
3531 >            super(tab, size, index, limit);
3532 >            this.est = est;
3533 >        }
3534 >
3535 >        public Spliterator<K> trySplit() {
3536 >            int i, f, h;
3537 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3538 >                new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3539 >                                        f, est >>>= 1);
3540 >        }
3541 >
3542 >        public void forEachRemaining(Consumer<? super K> action) {
3543 >            if (action == null) throw new NullPointerException();
3544 >            for (Node<K,V> p; (p = advance()) != null;)
3545 >                action.accept(p.key);
3546 >        }
3547 >
3548 >        public boolean tryAdvance(Consumer<? super K> action) {
3549 >            if (action == null) throw new NullPointerException();
3550 >            Node<K,V> p;
3551 >            if ((p = advance()) == null)
3552 >                return false;
3553 >            action.accept(p.key);
3554 >            return true;
3555 >        }
3556 >
3557 >        public long estimateSize() { return est; }
3558 >
3559 >        public int characteristics() {
3560 >            return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3561 >                Spliterator.NONNULL;
3562          }
3563      }
3564  
3565 <    final class KeySet extends AbstractSet<K> {
3566 <        public Iterator<K> iterator() {
3567 <            return new KeyIterator();
3565 >    static final class ValueSpliterator<K,V> extends Traverser<K,V>
3566 >        implements Spliterator<V> {
3567 >        long est;               // size estimate
3568 >        ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3569 >                         long est) {
3570 >            super(tab, size, index, limit);
3571 >            this.est = est;
3572          }
3573 <        public int size() {
3574 <            return ConcurrentHashMap.this.size();
3573 >
3574 >        public Spliterator<V> trySplit() {
3575 >            int i, f, h;
3576 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3577 >                new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3578 >                                          f, est >>>= 1);
3579          }
3580 <        public boolean isEmpty() {
3581 <            return ConcurrentHashMap.this.isEmpty();
3580 >
3581 >        public void forEachRemaining(Consumer<? super V> action) {
3582 >            if (action == null) throw new NullPointerException();
3583 >            for (Node<K,V> p; (p = advance()) != null;)
3584 >                action.accept(p.val);
3585          }
3586 <        public boolean contains(Object o) {
3587 <            return ConcurrentHashMap.this.containsKey(o);
3586 >
3587 >        public boolean tryAdvance(Consumer<? super V> action) {
3588 >            if (action == null) throw new NullPointerException();
3589 >            Node<K,V> p;
3590 >            if ((p = advance()) == null)
3591 >                return false;
3592 >            action.accept(p.val);
3593 >            return true;
3594          }
3595 <        public boolean remove(Object o) {
3596 <            return ConcurrentHashMap.this.remove(o) != null;
3595 >
3596 >        public long estimateSize() { return est; }
3597 >
3598 >        public int characteristics() {
3599 >            return Spliterator.CONCURRENT | Spliterator.NONNULL;
3600 >        }
3601 >    }
3602 >
3603 >    static final class EntrySpliterator<K,V> extends Traverser<K,V>
3604 >        implements Spliterator<Map.Entry<K,V>> {
3605 >        final ConcurrentHashMap<K,V> map; // To export MapEntry
3606 >        long est;               // size estimate
3607 >        EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3608 >                         long est, ConcurrentHashMap<K,V> map) {
3609 >            super(tab, size, index, limit);
3610 >            this.map = map;
3611 >            this.est = est;
3612 >        }
3613 >
3614 >        public Spliterator<Map.Entry<K,V>> trySplit() {
3615 >            int i, f, h;
3616 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3617 >                new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3618 >                                          f, est >>>= 1, map);
3619 >        }
3620 >
3621 >        public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3622 >            if (action == null) throw new NullPointerException();
3623 >            for (Node<K,V> p; (p = advance()) != null; )
3624 >                action.accept(new MapEntry<K,V>(p.key, p.val, map));
3625 >        }
3626 >
3627 >        public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3628 >            if (action == null) throw new NullPointerException();
3629 >            Node<K,V> p;
3630 >            if ((p = advance()) == null)
3631 >                return false;
3632 >            action.accept(new MapEntry<K,V>(p.key, p.val, map));
3633 >            return true;
3634          }
3635 <        public void clear() {
3636 <            ConcurrentHashMap.this.clear();
3635 >
3636 >        public long estimateSize() { return est; }
3637 >
3638 >        public int characteristics() {
3639 >            return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3640 >                Spliterator.NONNULL;
3641          }
3642      }
3643  
3644 <    final class Values extends AbstractCollection<V> {
3645 <        public Iterator<V> iterator() {
3646 <            return new ValueIterator();
3644 >    // Parallel bulk operations
3645 >
3646 >    /**
3647 >     * Computes initial batch value for bulk tasks. The returned value
3648 >     * is approximately exp2 of the number of times (minus one) to
3649 >     * split task by two before executing leaf action. This value is
3650 >     * faster to compute and more convenient to use as a guide to
3651 >     * splitting than is the depth, since it is used while dividing by
3652 >     * two anyway.
3653 >     */
3654 >    final int batchFor(long b) {
3655 >        long n;
3656 >        if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3657 >            return 0;
3658 >        int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3659 >        return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3660 >    }
3661 >
3662 >    /**
3663 >     * Performs the given action for each (key, value).
3664 >     *
3665 >     * @param parallelismThreshold the (estimated) number of elements
3666 >     * needed for this operation to be executed in parallel
3667 >     * @param action the action
3668 >     * @since 1.8
3669 >     */
3670 >    public void forEach(long parallelismThreshold,
3671 >                        BiConsumer<? super K,? super V> action) {
3672 >        if (action == null) throw new NullPointerException();
3673 >        new ForEachMappingTask<K,V>
3674 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3675 >             action).invoke();
3676 >    }
3677 >
3678 >    /**
3679 >     * Performs the given action for each non-null transformation
3680 >     * of each (key, value).
3681 >     *
3682 >     * @param parallelismThreshold the (estimated) number of elements
3683 >     * needed for this operation to be executed in parallel
3684 >     * @param transformer a function returning the transformation
3685 >     * for an element, or null if there is no transformation (in
3686 >     * which case the action is not applied)
3687 >     * @param action the action
3688 >     * @param <U> the return type of the transformer
3689 >     * @since 1.8
3690 >     */
3691 >    public <U> void forEach(long parallelismThreshold,
3692 >                            BiFunction<? super K, ? super V, ? extends U> transformer,
3693 >                            Consumer<? super U> action) {
3694 >        if (transformer == null || action == null)
3695 >            throw new NullPointerException();
3696 >        new ForEachTransformedMappingTask<K,V,U>
3697 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3698 >             transformer, action).invoke();
3699 >    }
3700 >
3701 >    /**
3702 >     * Returns a non-null result from applying the given search
3703 >     * function on each (key, value), or null if none.  Upon
3704 >     * success, further element processing is suppressed and the
3705 >     * results of any other parallel invocations of the search
3706 >     * function are ignored.
3707 >     *
3708 >     * @param parallelismThreshold the (estimated) number of elements
3709 >     * needed for this operation to be executed in parallel
3710 >     * @param searchFunction a function returning a non-null
3711 >     * result on success, else null
3712 >     * @param <U> the return type of the search function
3713 >     * @return a non-null result from applying the given search
3714 >     * function on each (key, value), or null if none
3715 >     * @since 1.8
3716 >     */
3717 >    public <U> U search(long parallelismThreshold,
3718 >                        BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3719 >        if (searchFunction == null) throw new NullPointerException();
3720 >        return new SearchMappingsTask<K,V,U>
3721 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3722 >             searchFunction, new AtomicReference<U>()).invoke();
3723 >    }
3724 >
3725 >    /**
3726 >     * Returns the result of accumulating the given transformation
3727 >     * of all (key, value) pairs using the given reducer to
3728 >     * combine values, or null if none.
3729 >     *
3730 >     * @param parallelismThreshold the (estimated) number of elements
3731 >     * needed for this operation to be executed in parallel
3732 >     * @param transformer a function returning the transformation
3733 >     * for an element, or null if there is no transformation (in
3734 >     * which case it is not combined)
3735 >     * @param reducer a commutative associative combining function
3736 >     * @param <U> the return type of the transformer
3737 >     * @return the result of accumulating the given transformation
3738 >     * of all (key, value) pairs
3739 >     * @since 1.8
3740 >     */
3741 >    public <U> U reduce(long parallelismThreshold,
3742 >                        BiFunction<? super K, ? super V, ? extends U> transformer,
3743 >                        BiFunction<? super U, ? super U, ? extends U> reducer) {
3744 >        if (transformer == null || reducer == null)
3745 >            throw new NullPointerException();
3746 >        return new MapReduceMappingsTask<K,V,U>
3747 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3748 >             null, transformer, reducer).invoke();
3749 >    }
3750 >
3751 >    /**
3752 >     * Returns the result of accumulating the given transformation
3753 >     * of all (key, value) pairs using the given reducer to
3754 >     * combine values, and the given basis as an identity value.
3755 >     *
3756 >     * @param parallelismThreshold the (estimated) number of elements
3757 >     * needed for this operation to be executed in parallel
3758 >     * @param transformer a function returning the transformation
3759 >     * for an element
3760 >     * @param basis the identity (initial default value) for the reduction
3761 >     * @param reducer a commutative associative combining function
3762 >     * @return the result of accumulating the given transformation
3763 >     * of all (key, value) pairs
3764 >     * @since 1.8
3765 >     */
3766 >    public double reduceToDouble(long parallelismThreshold,
3767 >                                 ToDoubleBiFunction<? super K, ? super V> transformer,
3768 >                                 double basis,
3769 >                                 DoubleBinaryOperator reducer) {
3770 >        if (transformer == null || reducer == null)
3771 >            throw new NullPointerException();
3772 >        return new MapReduceMappingsToDoubleTask<K,V>
3773 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3774 >             null, transformer, basis, reducer).invoke();
3775 >    }
3776 >
3777 >    /**
3778 >     * Returns the result of accumulating the given transformation
3779 >     * of all (key, value) pairs using the given reducer to
3780 >     * combine values, and the given basis as an identity value.
3781 >     *
3782 >     * @param parallelismThreshold the (estimated) number of elements
3783 >     * needed for this operation to be executed in parallel
3784 >     * @param transformer a function returning the transformation
3785 >     * for an element
3786 >     * @param basis the identity (initial default value) for the reduction
3787 >     * @param reducer a commutative associative combining function
3788 >     * @return the result of accumulating the given transformation
3789 >     * of all (key, value) pairs
3790 >     * @since 1.8
3791 >     */
3792 >    public long reduceToLong(long parallelismThreshold,
3793 >                             ToLongBiFunction<? super K, ? super V> transformer,
3794 >                             long basis,
3795 >                             LongBinaryOperator reducer) {
3796 >        if (transformer == null || reducer == null)
3797 >            throw new NullPointerException();
3798 >        return new MapReduceMappingsToLongTask<K,V>
3799 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3800 >             null, transformer, basis, reducer).invoke();
3801 >    }
3802 >
3803 >    /**
3804 >     * Returns the result of accumulating the given transformation
3805 >     * of all (key, value) pairs using the given reducer to
3806 >     * combine values, and the given basis as an identity value.
3807 >     *
3808 >     * @param parallelismThreshold the (estimated) number of elements
3809 >     * needed for this operation to be executed in parallel
3810 >     * @param transformer a function returning the transformation
3811 >     * for an element
3812 >     * @param basis the identity (initial default value) for the reduction
3813 >     * @param reducer a commutative associative combining function
3814 >     * @return the result of accumulating the given transformation
3815 >     * of all (key, value) pairs
3816 >     * @since 1.8
3817 >     */
3818 >    public int reduceToInt(long parallelismThreshold,
3819 >                           ToIntBiFunction<? super K, ? super V> transformer,
3820 >                           int basis,
3821 >                           IntBinaryOperator reducer) {
3822 >        if (transformer == null || reducer == null)
3823 >            throw new NullPointerException();
3824 >        return new MapReduceMappingsToIntTask<K,V>
3825 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3826 >             null, transformer, basis, reducer).invoke();
3827 >    }
3828 >
3829 >    /**
3830 >     * Performs the given action for each key.
3831 >     *
3832 >     * @param parallelismThreshold the (estimated) number of elements
3833 >     * needed for this operation to be executed in parallel
3834 >     * @param action the action
3835 >     * @since 1.8
3836 >     */
3837 >    public void forEachKey(long parallelismThreshold,
3838 >                           Consumer<? super K> action) {
3839 >        if (action == null) throw new NullPointerException();
3840 >        new ForEachKeyTask<K,V>
3841 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3842 >             action).invoke();
3843 >    }
3844 >
3845 >    /**
3846 >     * Performs the given action for each non-null transformation
3847 >     * of each key.
3848 >     *
3849 >     * @param parallelismThreshold the (estimated) number of elements
3850 >     * needed for this operation to be executed in parallel
3851 >     * @param transformer a function returning the transformation
3852 >     * for an element, or null if there is no transformation (in
3853 >     * which case the action is not applied)
3854 >     * @param action the action
3855 >     * @param <U> the return type of the transformer
3856 >     * @since 1.8
3857 >     */
3858 >    public <U> void forEachKey(long parallelismThreshold,
3859 >                               Function<? super K, ? extends U> transformer,
3860 >                               Consumer<? super U> action) {
3861 >        if (transformer == null || action == null)
3862 >            throw new NullPointerException();
3863 >        new ForEachTransformedKeyTask<K,V,U>
3864 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3865 >             transformer, action).invoke();
3866 >    }
3867 >
3868 >    /**
3869 >     * Returns a non-null result from applying the given search
3870 >     * function on each key, or null if none. Upon success,
3871 >     * further element processing is suppressed and the results of
3872 >     * any other parallel invocations of the search function are
3873 >     * ignored.
3874 >     *
3875 >     * @param parallelismThreshold the (estimated) number of elements
3876 >     * needed for this operation to be executed in parallel
3877 >     * @param searchFunction a function returning a non-null
3878 >     * result on success, else null
3879 >     * @param <U> the return type of the search function
3880 >     * @return a non-null result from applying the given search
3881 >     * function on each key, or null if none
3882 >     * @since 1.8
3883 >     */
3884 >    public <U> U searchKeys(long parallelismThreshold,
3885 >                            Function<? super K, ? extends U> searchFunction) {
3886 >        if (searchFunction == null) throw new NullPointerException();
3887 >        return new SearchKeysTask<K,V,U>
3888 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3889 >             searchFunction, new AtomicReference<U>()).invoke();
3890 >    }
3891 >
3892 >    /**
3893 >     * Returns the result of accumulating all keys using the given
3894 >     * reducer to combine values, or null if none.
3895 >     *
3896 >     * @param parallelismThreshold the (estimated) number of elements
3897 >     * needed for this operation to be executed in parallel
3898 >     * @param reducer a commutative associative combining function
3899 >     * @return the result of accumulating all keys using the given
3900 >     * reducer to combine values, or null if none
3901 >     * @since 1.8
3902 >     */
3903 >    public K reduceKeys(long parallelismThreshold,
3904 >                        BiFunction<? super K, ? super K, ? extends K> reducer) {
3905 >        if (reducer == null) throw new NullPointerException();
3906 >        return new ReduceKeysTask<K,V>
3907 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3908 >             null, reducer).invoke();
3909 >    }
3910 >
3911 >    /**
3912 >     * Returns the result of accumulating the given transformation
3913 >     * of all keys using the given reducer to combine values, or
3914 >     * null if none.
3915 >     *
3916 >     * @param parallelismThreshold the (estimated) number of elements
3917 >     * needed for this operation to be executed in parallel
3918 >     * @param transformer a function returning the transformation
3919 >     * for an element, or null if there is no transformation (in
3920 >     * which case it is not combined)
3921 >     * @param reducer a commutative associative combining function
3922 >     * @param <U> the return type of the transformer
3923 >     * @return the result of accumulating the given transformation
3924 >     * of all keys
3925 >     * @since 1.8
3926 >     */
3927 >    public <U> U reduceKeys(long parallelismThreshold,
3928 >                            Function<? super K, ? extends U> transformer,
3929 >         BiFunction<? super U, ? super U, ? extends U> reducer) {
3930 >        if (transformer == null || reducer == null)
3931 >            throw new NullPointerException();
3932 >        return new MapReduceKeysTask<K,V,U>
3933 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3934 >             null, transformer, reducer).invoke();
3935 >    }
3936 >
3937 >    /**
3938 >     * Returns the result of accumulating the given transformation
3939 >     * of all keys using the given reducer to combine values, and
3940 >     * the given basis as an identity value.
3941 >     *
3942 >     * @param parallelismThreshold the (estimated) number of elements
3943 >     * needed for this operation to be executed in parallel
3944 >     * @param transformer a function returning the transformation
3945 >     * for an element
3946 >     * @param basis the identity (initial default value) for the reduction
3947 >     * @param reducer a commutative associative combining function
3948 >     * @return the result of accumulating the given transformation
3949 >     * of all keys
3950 >     * @since 1.8
3951 >     */
3952 >    public double reduceKeysToDouble(long parallelismThreshold,
3953 >                                     ToDoubleFunction<? super K> transformer,
3954 >                                     double basis,
3955 >                                     DoubleBinaryOperator reducer) {
3956 >        if (transformer == null || reducer == null)
3957 >            throw new NullPointerException();
3958 >        return new MapReduceKeysToDoubleTask<K,V>
3959 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3960 >             null, transformer, basis, reducer).invoke();
3961 >    }
3962 >
3963 >    /**
3964 >     * Returns the result of accumulating the given transformation
3965 >     * of all keys using the given reducer to combine values, and
3966 >     * the given basis as an identity value.
3967 >     *
3968 >     * @param parallelismThreshold the (estimated) number of elements
3969 >     * needed for this operation to be executed in parallel
3970 >     * @param transformer a function returning the transformation
3971 >     * for an element
3972 >     * @param basis the identity (initial default value) for the reduction
3973 >     * @param reducer a commutative associative combining function
3974 >     * @return the result of accumulating the given transformation
3975 >     * of all keys
3976 >     * @since 1.8
3977 >     */
3978 >    public long reduceKeysToLong(long parallelismThreshold,
3979 >                                 ToLongFunction<? super K> transformer,
3980 >                                 long basis,
3981 >                                 LongBinaryOperator reducer) {
3982 >        if (transformer == null || reducer == null)
3983 >            throw new NullPointerException();
3984 >        return new MapReduceKeysToLongTask<K,V>
3985 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3986 >             null, transformer, basis, reducer).invoke();
3987 >    }
3988 >
3989 >    /**
3990 >     * Returns the result of accumulating the given transformation
3991 >     * of all keys using the given reducer to combine values, and
3992 >     * the given basis as an identity value.
3993 >     *
3994 >     * @param parallelismThreshold the (estimated) number of elements
3995 >     * needed for this operation to be executed in parallel
3996 >     * @param transformer a function returning the transformation
3997 >     * for an element
3998 >     * @param basis the identity (initial default value) for the reduction
3999 >     * @param reducer a commutative associative combining function
4000 >     * @return the result of accumulating the given transformation
4001 >     * of all keys
4002 >     * @since 1.8
4003 >     */
4004 >    public int reduceKeysToInt(long parallelismThreshold,
4005 >                               ToIntFunction<? super K> transformer,
4006 >                               int basis,
4007 >                               IntBinaryOperator reducer) {
4008 >        if (transformer == null || reducer == null)
4009 >            throw new NullPointerException();
4010 >        return new MapReduceKeysToIntTask<K,V>
4011 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4012 >             null, transformer, basis, reducer).invoke();
4013 >    }
4014 >
4015 >    /**
4016 >     * Performs the given action for each value.
4017 >     *
4018 >     * @param parallelismThreshold the (estimated) number of elements
4019 >     * needed for this operation to be executed in parallel
4020 >     * @param action the action
4021 >     * @since 1.8
4022 >     */
4023 >    public void forEachValue(long parallelismThreshold,
4024 >                             Consumer<? super V> action) {
4025 >        if (action == null)
4026 >            throw new NullPointerException();
4027 >        new ForEachValueTask<K,V>
4028 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4029 >             action).invoke();
4030 >    }
4031 >
4032 >    /**
4033 >     * Performs the given action for each non-null transformation
4034 >     * of each value.
4035 >     *
4036 >     * @param parallelismThreshold the (estimated) number of elements
4037 >     * needed for this operation to be executed in parallel
4038 >     * @param transformer a function returning the transformation
4039 >     * for an element, or null if there is no transformation (in
4040 >     * which case the action is not applied)
4041 >     * @param action the action
4042 >     * @param <U> the return type of the transformer
4043 >     * @since 1.8
4044 >     */
4045 >    public <U> void forEachValue(long parallelismThreshold,
4046 >                                 Function<? super V, ? extends U> transformer,
4047 >                                 Consumer<? super U> action) {
4048 >        if (transformer == null || action == null)
4049 >            throw new NullPointerException();
4050 >        new ForEachTransformedValueTask<K,V,U>
4051 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4052 >             transformer, action).invoke();
4053 >    }
4054 >
4055 >    /**
4056 >     * Returns a non-null result from applying the given search
4057 >     * function on each value, or null if none.  Upon success,
4058 >     * further element processing is suppressed and the results of
4059 >     * any other parallel invocations of the search function are
4060 >     * ignored.
4061 >     *
4062 >     * @param parallelismThreshold the (estimated) number of elements
4063 >     * needed for this operation to be executed in parallel
4064 >     * @param searchFunction a function returning a non-null
4065 >     * result on success, else null
4066 >     * @param <U> the return type of the search function
4067 >     * @return a non-null result from applying the given search
4068 >     * function on each value, or null if none
4069 >     * @since 1.8
4070 >     */
4071 >    public <U> U searchValues(long parallelismThreshold,
4072 >                              Function<? super V, ? extends U> searchFunction) {
4073 >        if (searchFunction == null) throw new NullPointerException();
4074 >        return new SearchValuesTask<K,V,U>
4075 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4076 >             searchFunction, new AtomicReference<U>()).invoke();
4077 >    }
4078 >
4079 >    /**
4080 >     * Returns the result of accumulating all values using the
4081 >     * given reducer to combine values, or null if none.
4082 >     *
4083 >     * @param parallelismThreshold the (estimated) number of elements
4084 >     * needed for this operation to be executed in parallel
4085 >     * @param reducer a commutative associative combining function
4086 >     * @return the result of accumulating all values
4087 >     * @since 1.8
4088 >     */
4089 >    public V reduceValues(long parallelismThreshold,
4090 >                          BiFunction<? super V, ? super V, ? extends V> reducer) {
4091 >        if (reducer == null) throw new NullPointerException();
4092 >        return new ReduceValuesTask<K,V>
4093 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4094 >             null, reducer).invoke();
4095 >    }
4096 >
4097 >    /**
4098 >     * Returns the result of accumulating the given transformation
4099 >     * of all values using the given reducer to combine values, or
4100 >     * null if none.
4101 >     *
4102 >     * @param parallelismThreshold the (estimated) number of elements
4103 >     * needed for this operation to be executed in parallel
4104 >     * @param transformer a function returning the transformation
4105 >     * for an element, or null if there is no transformation (in
4106 >     * which case it is not combined)
4107 >     * @param reducer a commutative associative combining function
4108 >     * @param <U> the return type of the transformer
4109 >     * @return the result of accumulating the given transformation
4110 >     * of all values
4111 >     * @since 1.8
4112 >     */
4113 >    public <U> U reduceValues(long parallelismThreshold,
4114 >                              Function<? super V, ? extends U> transformer,
4115 >                              BiFunction<? super U, ? super U, ? extends U> reducer) {
4116 >        if (transformer == null || reducer == null)
4117 >            throw new NullPointerException();
4118 >        return new MapReduceValuesTask<K,V,U>
4119 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4120 >             null, transformer, reducer).invoke();
4121 >    }
4122 >
4123 >    /**
4124 >     * Returns the result of accumulating the given transformation
4125 >     * of all values using the given reducer to combine values,
4126 >     * and the given basis as an identity value.
4127 >     *
4128 >     * @param parallelismThreshold the (estimated) number of elements
4129 >     * needed for this operation to be executed in parallel
4130 >     * @param transformer a function returning the transformation
4131 >     * for an element
4132 >     * @param basis the identity (initial default value) for the reduction
4133 >     * @param reducer a commutative associative combining function
4134 >     * @return the result of accumulating the given transformation
4135 >     * of all values
4136 >     * @since 1.8
4137 >     */
4138 >    public double reduceValuesToDouble(long parallelismThreshold,
4139 >                                       ToDoubleFunction<? super V> transformer,
4140 >                                       double basis,
4141 >                                       DoubleBinaryOperator reducer) {
4142 >        if (transformer == null || reducer == null)
4143 >            throw new NullPointerException();
4144 >        return new MapReduceValuesToDoubleTask<K,V>
4145 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4146 >             null, transformer, basis, reducer).invoke();
4147 >    }
4148 >
4149 >    /**
4150 >     * Returns the result of accumulating the given transformation
4151 >     * of all values using the given reducer to combine values,
4152 >     * and the given basis as an identity value.
4153 >     *
4154 >     * @param parallelismThreshold the (estimated) number of elements
4155 >     * needed for this operation to be executed in parallel
4156 >     * @param transformer a function returning the transformation
4157 >     * for an element
4158 >     * @param basis the identity (initial default value) for the reduction
4159 >     * @param reducer a commutative associative combining function
4160 >     * @return the result of accumulating the given transformation
4161 >     * of all values
4162 >     * @since 1.8
4163 >     */
4164 >    public long reduceValuesToLong(long parallelismThreshold,
4165 >                                   ToLongFunction<? super V> transformer,
4166 >                                   long basis,
4167 >                                   LongBinaryOperator reducer) {
4168 >        if (transformer == null || reducer == null)
4169 >            throw new NullPointerException();
4170 >        return new MapReduceValuesToLongTask<K,V>
4171 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4172 >             null, transformer, basis, reducer).invoke();
4173 >    }
4174 >
4175 >    /**
4176 >     * Returns the result of accumulating the given transformation
4177 >     * of all values using the given reducer to combine values,
4178 >     * and the given basis as an identity value.
4179 >     *
4180 >     * @param parallelismThreshold the (estimated) number of elements
4181 >     * needed for this operation to be executed in parallel
4182 >     * @param transformer a function returning the transformation
4183 >     * for an element
4184 >     * @param basis the identity (initial default value) for the reduction
4185 >     * @param reducer a commutative associative combining function
4186 >     * @return the result of accumulating the given transformation
4187 >     * of all values
4188 >     * @since 1.8
4189 >     */
4190 >    public int reduceValuesToInt(long parallelismThreshold,
4191 >                                 ToIntFunction<? super V> transformer,
4192 >                                 int basis,
4193 >                                 IntBinaryOperator reducer) {
4194 >        if (transformer == null || reducer == null)
4195 >            throw new NullPointerException();
4196 >        return new MapReduceValuesToIntTask<K,V>
4197 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4198 >             null, transformer, basis, reducer).invoke();
4199 >    }
4200 >
4201 >    /**
4202 >     * Performs the given action for each entry.
4203 >     *
4204 >     * @param parallelismThreshold the (estimated) number of elements
4205 >     * needed for this operation to be executed in parallel
4206 >     * @param action the action
4207 >     * @since 1.8
4208 >     */
4209 >    public void forEachEntry(long parallelismThreshold,
4210 >                             Consumer<? super Map.Entry<K,V>> action) {
4211 >        if (action == null) throw new NullPointerException();
4212 >        new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4213 >                                  action).invoke();
4214 >    }
4215 >
4216 >    /**
4217 >     * Performs the given action for each non-null transformation
4218 >     * of each entry.
4219 >     *
4220 >     * @param parallelismThreshold the (estimated) number of elements
4221 >     * needed for this operation to be executed in parallel
4222 >     * @param transformer a function returning the transformation
4223 >     * for an element, or null if there is no transformation (in
4224 >     * which case the action is not applied)
4225 >     * @param action the action
4226 >     * @param <U> the return type of the transformer
4227 >     * @since 1.8
4228 >     */
4229 >    public <U> void forEachEntry(long parallelismThreshold,
4230 >                                 Function<Map.Entry<K,V>, ? extends U> transformer,
4231 >                                 Consumer<? super U> action) {
4232 >        if (transformer == null || action == null)
4233 >            throw new NullPointerException();
4234 >        new ForEachTransformedEntryTask<K,V,U>
4235 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4236 >             transformer, action).invoke();
4237 >    }
4238 >
4239 >    /**
4240 >     * Returns a non-null result from applying the given search
4241 >     * function on each entry, or null if none.  Upon success,
4242 >     * further element processing is suppressed and the results of
4243 >     * any other parallel invocations of the search function are
4244 >     * ignored.
4245 >     *
4246 >     * @param parallelismThreshold the (estimated) number of elements
4247 >     * needed for this operation to be executed in parallel
4248 >     * @param searchFunction a function returning a non-null
4249 >     * result on success, else null
4250 >     * @param <U> the return type of the search function
4251 >     * @return a non-null result from applying the given search
4252 >     * function on each entry, or null if none
4253 >     * @since 1.8
4254 >     */
4255 >    public <U> U searchEntries(long parallelismThreshold,
4256 >                               Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4257 >        if (searchFunction == null) throw new NullPointerException();
4258 >        return new SearchEntriesTask<K,V,U>
4259 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4260 >             searchFunction, new AtomicReference<U>()).invoke();
4261 >    }
4262 >
4263 >    /**
4264 >     * Returns the result of accumulating all entries using the
4265 >     * given reducer to combine values, or null if none.
4266 >     *
4267 >     * @param parallelismThreshold the (estimated) number of elements
4268 >     * needed for this operation to be executed in parallel
4269 >     * @param reducer a commutative associative combining function
4270 >     * @return the result of accumulating all entries
4271 >     * @since 1.8
4272 >     */
4273 >    public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4274 >                                        BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4275 >        if (reducer == null) throw new NullPointerException();
4276 >        return new ReduceEntriesTask<K,V>
4277 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4278 >             null, reducer).invoke();
4279 >    }
4280 >
4281 >    /**
4282 >     * Returns the result of accumulating the given transformation
4283 >     * of all entries using the given reducer to combine values,
4284 >     * or null if none.
4285 >     *
4286 >     * @param parallelismThreshold the (estimated) number of elements
4287 >     * needed for this operation to be executed in parallel
4288 >     * @param transformer a function returning the transformation
4289 >     * for an element, or null if there is no transformation (in
4290 >     * which case it is not combined)
4291 >     * @param reducer a commutative associative combining function
4292 >     * @param <U> the return type of the transformer
4293 >     * @return the result of accumulating the given transformation
4294 >     * of all entries
4295 >     * @since 1.8
4296 >     */
4297 >    public <U> U reduceEntries(long parallelismThreshold,
4298 >                               Function<Map.Entry<K,V>, ? extends U> transformer,
4299 >                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4300 >        if (transformer == null || reducer == null)
4301 >            throw new NullPointerException();
4302 >        return new MapReduceEntriesTask<K,V,U>
4303 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4304 >             null, transformer, reducer).invoke();
4305 >    }
4306 >
4307 >    /**
4308 >     * Returns the result of accumulating the given transformation
4309 >     * of all entries using the given reducer to combine values,
4310 >     * and the given basis as an identity value.
4311 >     *
4312 >     * @param parallelismThreshold the (estimated) number of elements
4313 >     * needed for this operation to be executed in parallel
4314 >     * @param transformer a function returning the transformation
4315 >     * for an element
4316 >     * @param basis the identity (initial default value) for the reduction
4317 >     * @param reducer a commutative associative combining function
4318 >     * @return the result of accumulating the given transformation
4319 >     * of all entries
4320 >     * @since 1.8
4321 >     */
4322 >    public double reduceEntriesToDouble(long parallelismThreshold,
4323 >                                        ToDoubleFunction<Map.Entry<K,V>> transformer,
4324 >                                        double basis,
4325 >                                        DoubleBinaryOperator reducer) {
4326 >        if (transformer == null || reducer == null)
4327 >            throw new NullPointerException();
4328 >        return new MapReduceEntriesToDoubleTask<K,V>
4329 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4330 >             null, transformer, basis, reducer).invoke();
4331 >    }
4332 >
4333 >    /**
4334 >     * Returns the result of accumulating the given transformation
4335 >     * of all entries using the given reducer to combine values,
4336 >     * and the given basis as an identity value.
4337 >     *
4338 >     * @param parallelismThreshold the (estimated) number of elements
4339 >     * needed for this operation to be executed in parallel
4340 >     * @param transformer a function returning the transformation
4341 >     * for an element
4342 >     * @param basis the identity (initial default value) for the reduction
4343 >     * @param reducer a commutative associative combining function
4344 >     * @return the result of accumulating the given transformation
4345 >     * of all entries
4346 >     * @since 1.8
4347 >     */
4348 >    public long reduceEntriesToLong(long parallelismThreshold,
4349 >                                    ToLongFunction<Map.Entry<K,V>> transformer,
4350 >                                    long basis,
4351 >                                    LongBinaryOperator reducer) {
4352 >        if (transformer == null || reducer == null)
4353 >            throw new NullPointerException();
4354 >        return new MapReduceEntriesToLongTask<K,V>
4355 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4356 >             null, transformer, basis, reducer).invoke();
4357 >    }
4358 >
4359 >    /**
4360 >     * Returns the result of accumulating the given transformation
4361 >     * of all entries using the given reducer to combine values,
4362 >     * and the given basis as an identity value.
4363 >     *
4364 >     * @param parallelismThreshold the (estimated) number of elements
4365 >     * needed for this operation to be executed in parallel
4366 >     * @param transformer a function returning the transformation
4367 >     * for an element
4368 >     * @param basis the identity (initial default value) for the reduction
4369 >     * @param reducer a commutative associative combining function
4370 >     * @return the result of accumulating the given transformation
4371 >     * of all entries
4372 >     * @since 1.8
4373 >     */
4374 >    public int reduceEntriesToInt(long parallelismThreshold,
4375 >                                  ToIntFunction<Map.Entry<K,V>> transformer,
4376 >                                  int basis,
4377 >                                  IntBinaryOperator reducer) {
4378 >        if (transformer == null || reducer == null)
4379 >            throw new NullPointerException();
4380 >        return new MapReduceEntriesToIntTask<K,V>
4381 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4382 >             null, transformer, basis, reducer).invoke();
4383 >    }
4384 >
4385 >
4386 >    /* ----------------Views -------------- */
4387 >
4388 >    /**
4389 >     * Base class for views.
4390 >     */
4391 >    abstract static class CollectionView<K,V,E>
4392 >        implements Collection<E>, java.io.Serializable {
4393 >        private static final long serialVersionUID = 7249069246763182397L;
4394 >        final ConcurrentHashMap<K,V> map;
4395 >        CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4396 >
4397 >        /**
4398 >         * Returns the map backing this view.
4399 >         *
4400 >         * @return the map backing this view
4401 >         */
4402 >        public ConcurrentHashMap<K,V> getMap() { return map; }
4403 >
4404 >        /**
4405 >         * Removes all of the elements from this view, by removing all
4406 >         * the mappings from the map backing this view.
4407 >         */
4408 >        public final void clear()      { map.clear(); }
4409 >        public final int size()        { return map.size(); }
4410 >        public final boolean isEmpty() { return map.isEmpty(); }
4411 >
4412 >        // implementations below rely on concrete classes supplying these
4413 >        // abstract methods
4414 >        /**
4415 >         * Returns an iterator over the elements in this collection.
4416 >         *
4417 >         * <p>The returned iterator is
4418 >         * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4419 >         *
4420 >         * @return an iterator over the elements in this collection
4421 >         */
4422 >        public abstract Iterator<E> iterator();
4423 >        public abstract boolean contains(Object o);
4424 >        public abstract boolean remove(Object o);
4425 >
4426 >        private static final String oomeMsg = "Required array size too large";
4427 >
4428 >        public final Object[] toArray() {
4429 >            long sz = map.mappingCount();
4430 >            if (sz > MAX_ARRAY_SIZE)
4431 >                throw new OutOfMemoryError(oomeMsg);
4432 >            int n = (int)sz;
4433 >            Object[] r = new Object[n];
4434 >            int i = 0;
4435 >            for (E e : this) {
4436 >                if (i == n) {
4437 >                    if (n >= MAX_ARRAY_SIZE)
4438 >                        throw new OutOfMemoryError(oomeMsg);
4439 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4440 >                        n = MAX_ARRAY_SIZE;
4441 >                    else
4442 >                        n += (n >>> 1) + 1;
4443 >                    r = Arrays.copyOf(r, n);
4444 >                }
4445 >                r[i++] = e;
4446 >            }
4447 >            return (i == n) ? r : Arrays.copyOf(r, i);
4448 >        }
4449 >
4450 >        @SuppressWarnings("unchecked")
4451 >        public final <T> T[] toArray(T[] a) {
4452 >            long sz = map.mappingCount();
4453 >            if (sz > MAX_ARRAY_SIZE)
4454 >                throw new OutOfMemoryError(oomeMsg);
4455 >            int m = (int)sz;
4456 >            T[] r = (a.length >= m) ? a :
4457 >                (T[])java.lang.reflect.Array
4458 >                .newInstance(a.getClass().getComponentType(), m);
4459 >            int n = r.length;
4460 >            int i = 0;
4461 >            for (E e : this) {
4462 >                if (i == n) {
4463 >                    if (n >= MAX_ARRAY_SIZE)
4464 >                        throw new OutOfMemoryError(oomeMsg);
4465 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4466 >                        n = MAX_ARRAY_SIZE;
4467 >                    else
4468 >                        n += (n >>> 1) + 1;
4469 >                    r = Arrays.copyOf(r, n);
4470 >                }
4471 >                r[i++] = (T)e;
4472 >            }
4473 >            if (a == r && i < n) {
4474 >                r[i] = null; // null-terminate
4475 >                return r;
4476 >            }
4477 >            return (i == n) ? r : Arrays.copyOf(r, i);
4478          }
4479 <        public int size() {
4480 <            return ConcurrentHashMap.this.size();
4479 >
4480 >        /**
4481 >         * Returns a string representation of this collection.
4482 >         * The string representation consists of the string representations
4483 >         * of the collection's elements in the order they are returned by
4484 >         * its iterator, enclosed in square brackets ({@code "[]"}).
4485 >         * Adjacent elements are separated by the characters {@code ", "}
4486 >         * (comma and space).  Elements are converted to strings as by
4487 >         * {@link String#valueOf(Object)}.
4488 >         *
4489 >         * @return a string representation of this collection
4490 >         */
4491 >        public final String toString() {
4492 >            StringBuilder sb = new StringBuilder();
4493 >            sb.append('[');
4494 >            Iterator<E> it = iterator();
4495 >            if (it.hasNext()) {
4496 >                for (;;) {
4497 >                    Object e = it.next();
4498 >                    sb.append(e == this ? "(this Collection)" : e);
4499 >                    if (!it.hasNext())
4500 >                        break;
4501 >                    sb.append(',').append(' ');
4502 >                }
4503 >            }
4504 >            return sb.append(']').toString();
4505          }
4506 <        public boolean isEmpty() {
4507 <            return ConcurrentHashMap.this.isEmpty();
4506 >
4507 >        public final boolean containsAll(Collection<?> c) {
4508 >            if (c != this) {
4509 >                for (Object e : c) {
4510 >                    if (e == null || !contains(e))
4511 >                        return false;
4512 >                }
4513 >            }
4514 >            return true;
4515          }
4516 <        public boolean contains(Object o) {
4517 <            return ConcurrentHashMap.this.containsValue(o);
4516 >
4517 >        public final boolean removeAll(Collection<?> c) {
4518 >            if (c == null) throw new NullPointerException();
4519 >            boolean modified = false;
4520 >            for (Iterator<E> it = iterator(); it.hasNext();) {
4521 >                if (c.contains(it.next())) {
4522 >                    it.remove();
4523 >                    modified = true;
4524 >                }
4525 >            }
4526 >            return modified;
4527          }
4528 <        public void clear() {
4529 <            ConcurrentHashMap.this.clear();
4528 >
4529 >        public final boolean retainAll(Collection<?> c) {
4530 >            if (c == null) throw new NullPointerException();
4531 >            boolean modified = false;
4532 >            for (Iterator<E> it = iterator(); it.hasNext();) {
4533 >                if (!c.contains(it.next())) {
4534 >                    it.remove();
4535 >                    modified = true;
4536 >                }
4537 >            }
4538 >            return modified;
4539          }
4540 +
4541      }
4542  
4543 <    final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
4544 <        public Iterator<Map.Entry<K,V>> iterator() {
4545 <            return new EntryIterator();
4543 >    /**
4544 >     * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4545 >     * which additions may optionally be enabled by mapping to a
4546 >     * common value.  This class cannot be directly instantiated.
4547 >     * See {@link #keySet() keySet()},
4548 >     * {@link #keySet(Object) keySet(V)},
4549 >     * {@link #newKeySet() newKeySet()},
4550 >     * {@link #newKeySet(int) newKeySet(int)}.
4551 >     *
4552 >     * @since 1.8
4553 >     */
4554 >    public static class KeySetView<K,V> extends CollectionView<K,V,K>
4555 >        implements Set<K>, java.io.Serializable {
4556 >        private static final long serialVersionUID = 7249069246763182397L;
4557 >        private final V value;
4558 >        KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4559 >            super(map);
4560 >            this.value = value;
4561 >        }
4562 >
4563 >        /**
4564 >         * Returns the default mapped value for additions,
4565 >         * or {@code null} if additions are not supported.
4566 >         *
4567 >         * @return the default mapped value for additions, or {@code null}
4568 >         * if not supported
4569 >         */
4570 >        public V getMappedValue() { return value; }
4571 >
4572 >        /**
4573 >         * {@inheritDoc}
4574 >         * @throws NullPointerException if the specified key is null
4575 >         */
4576 >        public boolean contains(Object o) { return map.containsKey(o); }
4577 >
4578 >        /**
4579 >         * Removes the key from this map view, by removing the key (and its
4580 >         * corresponding value) from the backing map.  This method does
4581 >         * nothing if the key is not in the map.
4582 >         *
4583 >         * @param  o the key to be removed from the backing map
4584 >         * @return {@code true} if the backing map contained the specified key
4585 >         * @throws NullPointerException if the specified key is null
4586 >         */
4587 >        public boolean remove(Object o) { return map.remove(o) != null; }
4588 >
4589 >        /**
4590 >         * @return an iterator over the keys of the backing map
4591 >         */
4592 >        public Iterator<K> iterator() {
4593 >            Node<K,V>[] t;
4594 >            ConcurrentHashMap<K,V> m = map;
4595 >            int f = (t = m.table) == null ? 0 : t.length;
4596 >            return new KeyIterator<K,V>(t, f, 0, f, m);
4597 >        }
4598 >
4599 >        /**
4600 >         * Adds the specified key to this set view by mapping the key to
4601 >         * the default mapped value in the backing map, if defined.
4602 >         *
4603 >         * @param e key to be added
4604 >         * @return {@code true} if this set changed as a result of the call
4605 >         * @throws NullPointerException if the specified key is null
4606 >         * @throws UnsupportedOperationException if no default mapped value
4607 >         * for additions was provided
4608 >         */
4609 >        public boolean add(K e) {
4610 >            V v;
4611 >            if ((v = value) == null)
4612 >                throw new UnsupportedOperationException();
4613 >            return map.putVal(e, v, true) == null;
4614          }
4615 +
4616 +        /**
4617 +         * Adds all of the elements in the specified collection to this set,
4618 +         * as if by calling {@link #add} on each one.
4619 +         *
4620 +         * @param c the elements to be inserted into this set
4621 +         * @return {@code true} if this set changed as a result of the call
4622 +         * @throws NullPointerException if the collection or any of its
4623 +         * elements are {@code null}
4624 +         * @throws UnsupportedOperationException if no default mapped value
4625 +         * for additions was provided
4626 +         */
4627 +        public boolean addAll(Collection<? extends K> c) {
4628 +            boolean added = false;
4629 +            V v;
4630 +            if ((v = value) == null)
4631 +                throw new UnsupportedOperationException();
4632 +            for (K e : c) {
4633 +                if (map.putVal(e, v, true) == null)
4634 +                    added = true;
4635 +            }
4636 +            return added;
4637 +        }
4638 +
4639 +        public int hashCode() {
4640 +            int h = 0;
4641 +            for (K e : this)
4642 +                h += e.hashCode();
4643 +            return h;
4644 +        }
4645 +
4646 +        public boolean equals(Object o) {
4647 +            Set<?> c;
4648 +            return ((o instanceof Set) &&
4649 +                    ((c = (Set<?>)o) == this ||
4650 +                     (containsAll(c) && c.containsAll(this))));
4651 +        }
4652 +
4653 +        public Spliterator<K> spliterator() {
4654 +            Node<K,V>[] t;
4655 +            ConcurrentHashMap<K,V> m = map;
4656 +            long n = m.sumCount();
4657 +            int f = (t = m.table) == null ? 0 : t.length;
4658 +            return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4659 +        }
4660 +
4661 +        public void forEach(Consumer<? super K> action) {
4662 +            if (action == null) throw new NullPointerException();
4663 +            Node<K,V>[] t;
4664 +            if ((t = map.table) != null) {
4665 +                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4666 +                for (Node<K,V> p; (p = it.advance()) != null; )
4667 +                    action.accept(p.key);
4668 +            }
4669 +        }
4670 +    }
4671 +
4672 +    /**
4673 +     * A view of a ConcurrentHashMap as a {@link Collection} of
4674 +     * values, in which additions are disabled. This class cannot be
4675 +     * directly instantiated. See {@link #values()}.
4676 +     */
4677 +    static final class ValuesView<K,V> extends CollectionView<K,V,V>
4678 +        implements Collection<V>, java.io.Serializable {
4679 +        private static final long serialVersionUID = 2249069246763182397L;
4680 +        ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4681 +        public final boolean contains(Object o) {
4682 +            return map.containsValue(o);
4683 +        }
4684 +
4685 +        public final boolean remove(Object o) {
4686 +            if (o != null) {
4687 +                for (Iterator<V> it = iterator(); it.hasNext();) {
4688 +                    if (o.equals(it.next())) {
4689 +                        it.remove();
4690 +                        return true;
4691 +                    }
4692 +                }
4693 +            }
4694 +            return false;
4695 +        }
4696 +
4697 +        public final Iterator<V> iterator() {
4698 +            ConcurrentHashMap<K,V> m = map;
4699 +            Node<K,V>[] t;
4700 +            int f = (t = m.table) == null ? 0 : t.length;
4701 +            return new ValueIterator<K,V>(t, f, 0, f, m);
4702 +        }
4703 +
4704 +        public final boolean add(V e) {
4705 +            throw new UnsupportedOperationException();
4706 +        }
4707 +        public final boolean addAll(Collection<? extends V> c) {
4708 +            throw new UnsupportedOperationException();
4709 +        }
4710 +
4711 +        public boolean removeIf(Predicate<? super V> filter) {
4712 +            return map.removeValueIf(filter);
4713 +        }
4714 +
4715 +        public Spliterator<V> spliterator() {
4716 +            Node<K,V>[] t;
4717 +            ConcurrentHashMap<K,V> m = map;
4718 +            long n = m.sumCount();
4719 +            int f = (t = m.table) == null ? 0 : t.length;
4720 +            return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4721 +        }
4722 +
4723 +        public void forEach(Consumer<? super V> action) {
4724 +            if (action == null) throw new NullPointerException();
4725 +            Node<K,V>[] t;
4726 +            if ((t = map.table) != null) {
4727 +                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4728 +                for (Node<K,V> p; (p = it.advance()) != null; )
4729 +                    action.accept(p.val);
4730 +            }
4731 +        }
4732 +    }
4733 +
4734 +    /**
4735 +     * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4736 +     * entries.  This class cannot be directly instantiated. See
4737 +     * {@link #entrySet()}.
4738 +     */
4739 +    static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4740 +        implements Set<Map.Entry<K,V>>, java.io.Serializable {
4741 +        private static final long serialVersionUID = 2249069246763182397L;
4742 +        EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4743 +
4744          public boolean contains(Object o) {
4745 <            if (!(o instanceof Map.Entry))
4746 <                return false;
4747 <            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
4748 <            V v = ConcurrentHashMap.this.get(e.getKey());
4749 <            return v != null && v.equals(e.getValue());
4745 >            Object k, v, r; Map.Entry<?,?> e;
4746 >            return ((o instanceof Map.Entry) &&
4747 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4748 >                    (r = map.get(k)) != null &&
4749 >                    (v = e.getValue()) != null &&
4750 >                    (v == r || v.equals(r)));
4751          }
4752 +
4753          public boolean remove(Object o) {
4754 <            if (!(o instanceof Map.Entry))
4755 <                return false;
4756 <            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
4757 <            return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
4754 >            Object k, v; Map.Entry<?,?> e;
4755 >            return ((o instanceof Map.Entry) &&
4756 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4757 >                    (v = e.getValue()) != null &&
4758 >                    map.remove(k, v));
4759          }
4760 <        public int size() {
4761 <            return ConcurrentHashMap.this.size();
4760 >
4761 >        /**
4762 >         * @return an iterator over the entries of the backing map
4763 >         */
4764 >        public Iterator<Map.Entry<K,V>> iterator() {
4765 >            ConcurrentHashMap<K,V> m = map;
4766 >            Node<K,V>[] t;
4767 >            int f = (t = m.table) == null ? 0 : t.length;
4768 >            return new EntryIterator<K,V>(t, f, 0, f, m);
4769          }
4770 <        public boolean isEmpty() {
4771 <            return ConcurrentHashMap.this.isEmpty();
4770 >
4771 >        public boolean add(Entry<K,V> e) {
4772 >            return map.putVal(e.getKey(), e.getValue(), false) == null;
4773          }
4774 <        public void clear() {
4775 <            ConcurrentHashMap.this.clear();
4774 >
4775 >        public boolean addAll(Collection<? extends Entry<K,V>> c) {
4776 >            boolean added = false;
4777 >            for (Entry<K,V> e : c) {
4778 >                if (add(e))
4779 >                    added = true;
4780 >            }
4781 >            return added;
4782 >        }
4783 >
4784 >        public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4785 >            return map.removeEntryIf(filter);
4786 >        }
4787 >
4788 >        public final int hashCode() {
4789 >            int h = 0;
4790 >            Node<K,V>[] t;
4791 >            if ((t = map.table) != null) {
4792 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4793 >                for (Node<K,V> p; (p = it.advance()) != null; ) {
4794 >                    h += p.hashCode();
4795 >                }
4796 >            }
4797 >            return h;
4798 >        }
4799 >
4800 >        public final boolean equals(Object o) {
4801 >            Set<?> c;
4802 >            return ((o instanceof Set) &&
4803 >                    ((c = (Set<?>)o) == this ||
4804 >                     (containsAll(c) && c.containsAll(this))));
4805 >        }
4806 >
4807 >        public Spliterator<Map.Entry<K,V>> spliterator() {
4808 >            Node<K,V>[] t;
4809 >            ConcurrentHashMap<K,V> m = map;
4810 >            long n = m.sumCount();
4811 >            int f = (t = m.table) == null ? 0 : t.length;
4812 >            return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4813 >        }
4814 >
4815 >        public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4816 >            if (action == null) throw new NullPointerException();
4817 >            Node<K,V>[] t;
4818 >            if ((t = map.table) != null) {
4819 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4820 >                for (Node<K,V> p; (p = it.advance()) != null; )
4821 >                    action.accept(new MapEntry<K,V>(p.key, p.val, map));
4822 >            }
4823          }
4824 +
4825      }
4826  
4827 <    /* ---------------- Serialization Support -------------- */
4827 >    // -------------------------------------------------------
4828  
4829      /**
4830 <     * Saves the state of the <tt>ConcurrentHashMap</tt> instance to a
4831 <     * stream (i.e., serializes it).
1402 <     * @param s the stream
1403 <     * @serialData
1404 <     * the key (Object) and value (Object)
1405 <     * for each key-value mapping, followed by a null pair.
1406 <     * The key-value mappings are emitted in no particular order.
4830 >     * Base class for bulk tasks. Repeats some fields and code from
4831 >     * class Traverser, because we need to subclass CountedCompleter.
4832       */
4833 <    private void writeObject(java.io.ObjectOutputStream s) throws IOException {
4834 <        // force all segments for serialization compatibility
4835 <        for (int k = 0; k < segments.length; ++k)
4836 <            ensureSegment(k);
4837 <        s.defaultWriteObject();
4838 <
4839 <        final Segment<K,V>[] segments = this.segments;
4840 <        for (int k = 0; k < segments.length; ++k) {
4841 <            Segment<K,V> seg = segmentAt(segments, k);
4842 <            seg.lock();
4843 <            try {
4844 <                HashEntry<K,V>[] tab = seg.table;
4845 <                for (int i = 0; i < tab.length; ++i) {
4846 <                    HashEntry<K,V> e;
4847 <                    for (e = entryAt(tab, i); e != null; e = e.next) {
4848 <                        s.writeObject(e.key);
4849 <                        s.writeObject(e.value);
4833 >    @SuppressWarnings("serial")
4834 >    abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4835 >        Node<K,V>[] tab;        // same as Traverser
4836 >        Node<K,V> next;
4837 >        TableStack<K,V> stack, spare;
4838 >        int index;
4839 >        int baseIndex;
4840 >        int baseLimit;
4841 >        final int baseSize;
4842 >        int batch;              // split control
4843 >
4844 >        BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4845 >            super(par);
4846 >            this.batch = b;
4847 >            this.index = this.baseIndex = i;
4848 >            if ((this.tab = t) == null)
4849 >                this.baseSize = this.baseLimit = 0;
4850 >            else if (par == null)
4851 >                this.baseSize = this.baseLimit = t.length;
4852 >            else {
4853 >                this.baseLimit = f;
4854 >                this.baseSize = par.baseSize;
4855 >            }
4856 >        }
4857 >
4858 >        /**
4859 >         * Same as Traverser version
4860 >         */
4861 >        final Node<K,V> advance() {
4862 >            Node<K,V> e;
4863 >            if ((e = next) != null)
4864 >                e = e.next;
4865 >            for (;;) {
4866 >                Node<K,V>[] t; int i, n;
4867 >                if (e != null)
4868 >                    return next = e;
4869 >                if (baseIndex >= baseLimit || (t = tab) == null ||
4870 >                    (n = t.length) <= (i = index) || i < 0)
4871 >                    return next = null;
4872 >                if ((e = tabAt(t, i)) != null && e.hash < 0) {
4873 >                    if (e instanceof ForwardingNode) {
4874 >                        tab = ((ForwardingNode<K,V>)e).nextTable;
4875 >                        e = null;
4876 >                        pushState(t, i, n);
4877 >                        continue;
4878                      }
4879 +                    else if (e instanceof TreeBin)
4880 +                        e = ((TreeBin<K,V>)e).first;
4881 +                    else
4882 +                        e = null;
4883                  }
4884 <            } finally {
4885 <                seg.unlock();
4884 >                if (stack != null)
4885 >                    recoverState(n);
4886 >                else if ((index = i + baseSize) >= n)
4887 >                    index = ++baseIndex;
4888              }
4889          }
4890 <        s.writeObject(null);
4891 <        s.writeObject(null);
4890 >
4891 >        private void pushState(Node<K,V>[] t, int i, int n) {
4892 >            TableStack<K,V> s = spare;
4893 >            if (s != null)
4894 >                spare = s.next;
4895 >            else
4896 >                s = new TableStack<K,V>();
4897 >            s.tab = t;
4898 >            s.length = n;
4899 >            s.index = i;
4900 >            s.next = stack;
4901 >            stack = s;
4902 >        }
4903 >
4904 >        private void recoverState(int n) {
4905 >            TableStack<K,V> s; int len;
4906 >            while ((s = stack) != null && (index += (len = s.length)) >= n) {
4907 >                n = len;
4908 >                index = s.index;
4909 >                tab = s.tab;
4910 >                s.tab = null;
4911 >                TableStack<K,V> next = s.next;
4912 >                s.next = spare; // save for reuse
4913 >                stack = next;
4914 >                spare = s;
4915 >            }
4916 >            if (s == null && (index += baseSize) >= n)
4917 >                index = ++baseIndex;
4918 >        }
4919      }
4920  
4921 <    /**
4922 <     * Reconstitutes the <tt>ConcurrentHashMap</tt> instance from a
4923 <     * stream (i.e., deserializes it).
4924 <     * @param s the stream
4925 <     */
4926 <    @SuppressWarnings("unchecked")
4927 <    private void readObject(java.io.ObjectInputStream s)
4928 <        throws IOException, ClassNotFoundException {
4929 <        s.defaultReadObject();
4921 >    /*
4922 >     * Task classes. Coded in a regular but ugly format/style to
4923 >     * simplify checks that each variant differs in the right way from
4924 >     * others. The null screenings exist because compilers cannot tell
4925 >     * that we've already null-checked task arguments, so we force
4926 >     * simplest hoisted bypass to help avoid convoluted traps.
4927 >     */
4928 >    @SuppressWarnings("serial")
4929 >    static final class ForEachKeyTask<K,V>
4930 >        extends BulkTask<K,V,Void> {
4931 >        final Consumer<? super K> action;
4932 >        ForEachKeyTask
4933 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4934 >             Consumer<? super K> action) {
4935 >            super(p, b, i, f, t);
4936 >            this.action = action;
4937 >        }
4938 >        public final void compute() {
4939 >            final Consumer<? super K> action;
4940 >            if ((action = this.action) != null) {
4941 >                for (int i = baseIndex, f, h; batch > 0 &&
4942 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4943 >                    addToPendingCount(1);
4944 >                    new ForEachKeyTask<K,V>
4945 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4946 >                         action).fork();
4947 >                }
4948 >                for (Node<K,V> p; (p = advance()) != null;)
4949 >                    action.accept(p.key);
4950 >                propagateCompletion();
4951 >            }
4952 >        }
4953 >    }
4954  
4955 <        // Re-initialize segments to be minimally sized, and let grow.
4956 <        int cap = MIN_SEGMENT_TABLE_CAPACITY;
4957 <        final Segment<K,V>[] segments = this.segments;
4958 <        for (int k = 0; k < segments.length; ++k) {
4959 <            Segment<K,V> seg = segments[k];
4960 <            if (seg != null) {
4961 <                seg.threshold = (int)(cap * seg.loadFactor);
4962 <                seg.table = (HashEntry<K,V>[]) new HashEntry[cap];
4955 >    @SuppressWarnings("serial")
4956 >    static final class ForEachValueTask<K,V>
4957 >        extends BulkTask<K,V,Void> {
4958 >        final Consumer<? super V> action;
4959 >        ForEachValueTask
4960 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4961 >             Consumer<? super V> action) {
4962 >            super(p, b, i, f, t);
4963 >            this.action = action;
4964 >        }
4965 >        public final void compute() {
4966 >            final Consumer<? super V> action;
4967 >            if ((action = this.action) != null) {
4968 >                for (int i = baseIndex, f, h; batch > 0 &&
4969 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4970 >                    addToPendingCount(1);
4971 >                    new ForEachValueTask<K,V>
4972 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4973 >                         action).fork();
4974 >                }
4975 >                for (Node<K,V> p; (p = advance()) != null;)
4976 >                    action.accept(p.val);
4977 >                propagateCompletion();
4978              }
4979          }
4980 +    }
4981  
4982 <        // Read the keys and values, and put the mappings in the table
4983 <        for (;;) {
4984 <            K key = (K) s.readObject();
4985 <            V value = (V) s.readObject();
4986 <            if (key == null)
4987 <                break;
4988 <            put(key, value);
4982 >    @SuppressWarnings("serial")
4983 >    static final class ForEachEntryTask<K,V>
4984 >        extends BulkTask<K,V,Void> {
4985 >        final Consumer<? super Entry<K,V>> action;
4986 >        ForEachEntryTask
4987 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4988 >             Consumer<? super Entry<K,V>> action) {
4989 >            super(p, b, i, f, t);
4990 >            this.action = action;
4991 >        }
4992 >        public final void compute() {
4993 >            final Consumer<? super Entry<K,V>> action;
4994 >            if ((action = this.action) != null) {
4995 >                for (int i = baseIndex, f, h; batch > 0 &&
4996 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4997 >                    addToPendingCount(1);
4998 >                    new ForEachEntryTask<K,V>
4999 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5000 >                         action).fork();
5001 >                }
5002 >                for (Node<K,V> p; (p = advance()) != null; )
5003 >                    action.accept(p);
5004 >                propagateCompletion();
5005 >            }
5006 >        }
5007 >    }
5008 >
5009 >    @SuppressWarnings("serial")
5010 >    static final class ForEachMappingTask<K,V>
5011 >        extends BulkTask<K,V,Void> {
5012 >        final BiConsumer<? super K, ? super V> action;
5013 >        ForEachMappingTask
5014 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5015 >             BiConsumer<? super K,? super V> action) {
5016 >            super(p, b, i, f, t);
5017 >            this.action = action;
5018 >        }
5019 >        public final void compute() {
5020 >            final BiConsumer<? super K, ? super V> action;
5021 >            if ((action = this.action) != null) {
5022 >                for (int i = baseIndex, f, h; batch > 0 &&
5023 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5024 >                    addToPendingCount(1);
5025 >                    new ForEachMappingTask<K,V>
5026 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5027 >                         action).fork();
5028 >                }
5029 >                for (Node<K,V> p; (p = advance()) != null; )
5030 >                    action.accept(p.key, p.val);
5031 >                propagateCompletion();
5032 >            }
5033 >        }
5034 >    }
5035 >
5036 >    @SuppressWarnings("serial")
5037 >    static final class ForEachTransformedKeyTask<K,V,U>
5038 >        extends BulkTask<K,V,Void> {
5039 >        final Function<? super K, ? extends U> transformer;
5040 >        final Consumer<? super U> action;
5041 >        ForEachTransformedKeyTask
5042 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5043 >             Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5044 >            super(p, b, i, f, t);
5045 >            this.transformer = transformer; this.action = action;
5046 >        }
5047 >        public final void compute() {
5048 >            final Function<? super K, ? extends U> transformer;
5049 >            final Consumer<? super U> action;
5050 >            if ((transformer = this.transformer) != null &&
5051 >                (action = this.action) != null) {
5052 >                for (int i = baseIndex, f, h; batch > 0 &&
5053 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5054 >                    addToPendingCount(1);
5055 >                    new ForEachTransformedKeyTask<K,V,U>
5056 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5057 >                         transformer, action).fork();
5058 >                }
5059 >                for (Node<K,V> p; (p = advance()) != null; ) {
5060 >                    U u;
5061 >                    if ((u = transformer.apply(p.key)) != null)
5062 >                        action.accept(u);
5063 >                }
5064 >                propagateCompletion();
5065 >            }
5066 >        }
5067 >    }
5068 >
5069 >    @SuppressWarnings("serial")
5070 >    static final class ForEachTransformedValueTask<K,V,U>
5071 >        extends BulkTask<K,V,Void> {
5072 >        final Function<? super V, ? extends U> transformer;
5073 >        final Consumer<? super U> action;
5074 >        ForEachTransformedValueTask
5075 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5076 >             Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5077 >            super(p, b, i, f, t);
5078 >            this.transformer = transformer; this.action = action;
5079 >        }
5080 >        public final void compute() {
5081 >            final Function<? super V, ? extends U> transformer;
5082 >            final Consumer<? super U> action;
5083 >            if ((transformer = this.transformer) != null &&
5084 >                (action = this.action) != null) {
5085 >                for (int i = baseIndex, f, h; batch > 0 &&
5086 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5087 >                    addToPendingCount(1);
5088 >                    new ForEachTransformedValueTask<K,V,U>
5089 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5090 >                         transformer, action).fork();
5091 >                }
5092 >                for (Node<K,V> p; (p = advance()) != null; ) {
5093 >                    U u;
5094 >                    if ((u = transformer.apply(p.val)) != null)
5095 >                        action.accept(u);
5096 >                }
5097 >                propagateCompletion();
5098 >            }
5099 >        }
5100 >    }
5101 >
5102 >    @SuppressWarnings("serial")
5103 >    static final class ForEachTransformedEntryTask<K,V,U>
5104 >        extends BulkTask<K,V,Void> {
5105 >        final Function<Map.Entry<K,V>, ? extends U> transformer;
5106 >        final Consumer<? super U> action;
5107 >        ForEachTransformedEntryTask
5108 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5109 >             Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5110 >            super(p, b, i, f, t);
5111 >            this.transformer = transformer; this.action = action;
5112 >        }
5113 >        public final void compute() {
5114 >            final Function<Map.Entry<K,V>, ? extends U> transformer;
5115 >            final Consumer<? super U> action;
5116 >            if ((transformer = this.transformer) != null &&
5117 >                (action = this.action) != null) {
5118 >                for (int i = baseIndex, f, h; batch > 0 &&
5119 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5120 >                    addToPendingCount(1);
5121 >                    new ForEachTransformedEntryTask<K,V,U>
5122 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5123 >                         transformer, action).fork();
5124 >                }
5125 >                for (Node<K,V> p; (p = advance()) != null; ) {
5126 >                    U u;
5127 >                    if ((u = transformer.apply(p)) != null)
5128 >                        action.accept(u);
5129 >                }
5130 >                propagateCompletion();
5131 >            }
5132 >        }
5133 >    }
5134 >
5135 >    @SuppressWarnings("serial")
5136 >    static final class ForEachTransformedMappingTask<K,V,U>
5137 >        extends BulkTask<K,V,Void> {
5138 >        final BiFunction<? super K, ? super V, ? extends U> transformer;
5139 >        final Consumer<? super U> action;
5140 >        ForEachTransformedMappingTask
5141 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5142 >             BiFunction<? super K, ? super V, ? extends U> transformer,
5143 >             Consumer<? super U> action) {
5144 >            super(p, b, i, f, t);
5145 >            this.transformer = transformer; this.action = action;
5146 >        }
5147 >        public final void compute() {
5148 >            final BiFunction<? super K, ? super V, ? extends U> transformer;
5149 >            final Consumer<? super U> action;
5150 >            if ((transformer = this.transformer) != null &&
5151 >                (action = this.action) != null) {
5152 >                for (int i = baseIndex, f, h; batch > 0 &&
5153 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5154 >                    addToPendingCount(1);
5155 >                    new ForEachTransformedMappingTask<K,V,U>
5156 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5157 >                         transformer, action).fork();
5158 >                }
5159 >                for (Node<K,V> p; (p = advance()) != null; ) {
5160 >                    U u;
5161 >                    if ((u = transformer.apply(p.key, p.val)) != null)
5162 >                        action.accept(u);
5163 >                }
5164 >                propagateCompletion();
5165 >            }
5166 >        }
5167 >    }
5168 >
5169 >    @SuppressWarnings("serial")
5170 >    static final class SearchKeysTask<K,V,U>
5171 >        extends BulkTask<K,V,U> {
5172 >        final Function<? super K, ? extends U> searchFunction;
5173 >        final AtomicReference<U> result;
5174 >        SearchKeysTask
5175 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5176 >             Function<? super K, ? extends U> searchFunction,
5177 >             AtomicReference<U> result) {
5178 >            super(p, b, i, f, t);
5179 >            this.searchFunction = searchFunction; this.result = result;
5180 >        }
5181 >        public final U getRawResult() { return result.get(); }
5182 >        public final void compute() {
5183 >            final Function<? super K, ? extends U> searchFunction;
5184 >            final AtomicReference<U> result;
5185 >            if ((searchFunction = this.searchFunction) != null &&
5186 >                (result = this.result) != null) {
5187 >                for (int i = baseIndex, f, h; batch > 0 &&
5188 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5189 >                    if (result.get() != null)
5190 >                        return;
5191 >                    addToPendingCount(1);
5192 >                    new SearchKeysTask<K,V,U>
5193 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5194 >                         searchFunction, result).fork();
5195 >                }
5196 >                while (result.get() == null) {
5197 >                    U u;
5198 >                    Node<K,V> p;
5199 >                    if ((p = advance()) == null) {
5200 >                        propagateCompletion();
5201 >                        break;
5202 >                    }
5203 >                    if ((u = searchFunction.apply(p.key)) != null) {
5204 >                        if (result.compareAndSet(null, u))
5205 >                            quietlyCompleteRoot();
5206 >                        break;
5207 >                    }
5208 >                }
5209 >            }
5210 >        }
5211 >    }
5212 >
5213 >    @SuppressWarnings("serial")
5214 >    static final class SearchValuesTask<K,V,U>
5215 >        extends BulkTask<K,V,U> {
5216 >        final Function<? super V, ? extends U> searchFunction;
5217 >        final AtomicReference<U> result;
5218 >        SearchValuesTask
5219 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5220 >             Function<? super V, ? extends U> searchFunction,
5221 >             AtomicReference<U> result) {
5222 >            super(p, b, i, f, t);
5223 >            this.searchFunction = searchFunction; this.result = result;
5224 >        }
5225 >        public final U getRawResult() { return result.get(); }
5226 >        public final void compute() {
5227 >            final Function<? super V, ? extends U> searchFunction;
5228 >            final AtomicReference<U> result;
5229 >            if ((searchFunction = this.searchFunction) != null &&
5230 >                (result = this.result) != null) {
5231 >                for (int i = baseIndex, f, h; batch > 0 &&
5232 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5233 >                    if (result.get() != null)
5234 >                        return;
5235 >                    addToPendingCount(1);
5236 >                    new SearchValuesTask<K,V,U>
5237 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5238 >                         searchFunction, result).fork();
5239 >                }
5240 >                while (result.get() == null) {
5241 >                    U u;
5242 >                    Node<K,V> p;
5243 >                    if ((p = advance()) == null) {
5244 >                        propagateCompletion();
5245 >                        break;
5246 >                    }
5247 >                    if ((u = searchFunction.apply(p.val)) != null) {
5248 >                        if (result.compareAndSet(null, u))
5249 >                            quietlyCompleteRoot();
5250 >                        break;
5251 >                    }
5252 >                }
5253 >            }
5254 >        }
5255 >    }
5256 >
5257 >    @SuppressWarnings("serial")
5258 >    static final class SearchEntriesTask<K,V,U>
5259 >        extends BulkTask<K,V,U> {
5260 >        final Function<Entry<K,V>, ? extends U> searchFunction;
5261 >        final AtomicReference<U> result;
5262 >        SearchEntriesTask
5263 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5264 >             Function<Entry<K,V>, ? extends U> searchFunction,
5265 >             AtomicReference<U> result) {
5266 >            super(p, b, i, f, t);
5267 >            this.searchFunction = searchFunction; this.result = result;
5268 >        }
5269 >        public final U getRawResult() { return result.get(); }
5270 >        public final void compute() {
5271 >            final Function<Entry<K,V>, ? extends U> searchFunction;
5272 >            final AtomicReference<U> result;
5273 >            if ((searchFunction = this.searchFunction) != null &&
5274 >                (result = this.result) != null) {
5275 >                for (int i = baseIndex, f, h; batch > 0 &&
5276 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5277 >                    if (result.get() != null)
5278 >                        return;
5279 >                    addToPendingCount(1);
5280 >                    new SearchEntriesTask<K,V,U>
5281 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5282 >                         searchFunction, result).fork();
5283 >                }
5284 >                while (result.get() == null) {
5285 >                    U u;
5286 >                    Node<K,V> p;
5287 >                    if ((p = advance()) == null) {
5288 >                        propagateCompletion();
5289 >                        break;
5290 >                    }
5291 >                    if ((u = searchFunction.apply(p)) != null) {
5292 >                        if (result.compareAndSet(null, u))
5293 >                            quietlyCompleteRoot();
5294 >                        return;
5295 >                    }
5296 >                }
5297 >            }
5298 >        }
5299 >    }
5300 >
5301 >    @SuppressWarnings("serial")
5302 >    static final class SearchMappingsTask<K,V,U>
5303 >        extends BulkTask<K,V,U> {
5304 >        final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5305 >        final AtomicReference<U> result;
5306 >        SearchMappingsTask
5307 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5308 >             BiFunction<? super K, ? super V, ? extends U> searchFunction,
5309 >             AtomicReference<U> result) {
5310 >            super(p, b, i, f, t);
5311 >            this.searchFunction = searchFunction; this.result = result;
5312 >        }
5313 >        public final U getRawResult() { return result.get(); }
5314 >        public final void compute() {
5315 >            final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5316 >            final AtomicReference<U> result;
5317 >            if ((searchFunction = this.searchFunction) != null &&
5318 >                (result = this.result) != null) {
5319 >                for (int i = baseIndex, f, h; batch > 0 &&
5320 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5321 >                    if (result.get() != null)
5322 >                        return;
5323 >                    addToPendingCount(1);
5324 >                    new SearchMappingsTask<K,V,U>
5325 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5326 >                         searchFunction, result).fork();
5327 >                }
5328 >                while (result.get() == null) {
5329 >                    U u;
5330 >                    Node<K,V> p;
5331 >                    if ((p = advance()) == null) {
5332 >                        propagateCompletion();
5333 >                        break;
5334 >                    }
5335 >                    if ((u = searchFunction.apply(p.key, p.val)) != null) {
5336 >                        if (result.compareAndSet(null, u))
5337 >                            quietlyCompleteRoot();
5338 >                        break;
5339 >                    }
5340 >                }
5341 >            }
5342 >        }
5343 >    }
5344 >
5345 >    @SuppressWarnings("serial")
5346 >    static final class ReduceKeysTask<K,V>
5347 >        extends BulkTask<K,V,K> {
5348 >        final BiFunction<? super K, ? super K, ? extends K> reducer;
5349 >        K result;
5350 >        ReduceKeysTask<K,V> rights, nextRight;
5351 >        ReduceKeysTask
5352 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5353 >             ReduceKeysTask<K,V> nextRight,
5354 >             BiFunction<? super K, ? super K, ? extends K> reducer) {
5355 >            super(p, b, i, f, t); this.nextRight = nextRight;
5356 >            this.reducer = reducer;
5357 >        }
5358 >        public final K getRawResult() { return result; }
5359 >        public final void compute() {
5360 >            final BiFunction<? super K, ? super K, ? extends K> reducer;
5361 >            if ((reducer = this.reducer) != null) {
5362 >                for (int i = baseIndex, f, h; batch > 0 &&
5363 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5364 >                    addToPendingCount(1);
5365 >                    (rights = new ReduceKeysTask<K,V>
5366 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5367 >                      rights, reducer)).fork();
5368 >                }
5369 >                K r = null;
5370 >                for (Node<K,V> p; (p = advance()) != null; ) {
5371 >                    K u = p.key;
5372 >                    r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5373 >                }
5374 >                result = r;
5375 >                CountedCompleter<?> c;
5376 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5377 >                    @SuppressWarnings("unchecked")
5378 >                    ReduceKeysTask<K,V>
5379 >                        t = (ReduceKeysTask<K,V>)c,
5380 >                        s = t.rights;
5381 >                    while (s != null) {
5382 >                        K tr, sr;
5383 >                        if ((sr = s.result) != null)
5384 >                            t.result = (((tr = t.result) == null) ? sr :
5385 >                                        reducer.apply(tr, sr));
5386 >                        s = t.rights = s.nextRight;
5387 >                    }
5388 >                }
5389 >            }
5390 >        }
5391 >    }
5392 >
5393 >    @SuppressWarnings("serial")
5394 >    static final class ReduceValuesTask<K,V>
5395 >        extends BulkTask<K,V,V> {
5396 >        final BiFunction<? super V, ? super V, ? extends V> reducer;
5397 >        V result;
5398 >        ReduceValuesTask<K,V> rights, nextRight;
5399 >        ReduceValuesTask
5400 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5401 >             ReduceValuesTask<K,V> nextRight,
5402 >             BiFunction<? super V, ? super V, ? extends V> reducer) {
5403 >            super(p, b, i, f, t); this.nextRight = nextRight;
5404 >            this.reducer = reducer;
5405 >        }
5406 >        public final V getRawResult() { return result; }
5407 >        public final void compute() {
5408 >            final BiFunction<? super V, ? super V, ? extends V> reducer;
5409 >            if ((reducer = this.reducer) != null) {
5410 >                for (int i = baseIndex, f, h; batch > 0 &&
5411 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5412 >                    addToPendingCount(1);
5413 >                    (rights = new ReduceValuesTask<K,V>
5414 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5415 >                      rights, reducer)).fork();
5416 >                }
5417 >                V r = null;
5418 >                for (Node<K,V> p; (p = advance()) != null; ) {
5419 >                    V v = p.val;
5420 >                    r = (r == null) ? v : reducer.apply(r, v);
5421 >                }
5422 >                result = r;
5423 >                CountedCompleter<?> c;
5424 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5425 >                    @SuppressWarnings("unchecked")
5426 >                    ReduceValuesTask<K,V>
5427 >                        t = (ReduceValuesTask<K,V>)c,
5428 >                        s = t.rights;
5429 >                    while (s != null) {
5430 >                        V tr, sr;
5431 >                        if ((sr = s.result) != null)
5432 >                            t.result = (((tr = t.result) == null) ? sr :
5433 >                                        reducer.apply(tr, sr));
5434 >                        s = t.rights = s.nextRight;
5435 >                    }
5436 >                }
5437 >            }
5438 >        }
5439 >    }
5440 >
5441 >    @SuppressWarnings("serial")
5442 >    static final class ReduceEntriesTask<K,V>
5443 >        extends BulkTask<K,V,Map.Entry<K,V>> {
5444 >        final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5445 >        Map.Entry<K,V> result;
5446 >        ReduceEntriesTask<K,V> rights, nextRight;
5447 >        ReduceEntriesTask
5448 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5449 >             ReduceEntriesTask<K,V> nextRight,
5450 >             BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5451 >            super(p, b, i, f, t); this.nextRight = nextRight;
5452 >            this.reducer = reducer;
5453 >        }
5454 >        public final Map.Entry<K,V> getRawResult() { return result; }
5455 >        public final void compute() {
5456 >            final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5457 >            if ((reducer = this.reducer) != null) {
5458 >                for (int i = baseIndex, f, h; batch > 0 &&
5459 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5460 >                    addToPendingCount(1);
5461 >                    (rights = new ReduceEntriesTask<K,V>
5462 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5463 >                      rights, reducer)).fork();
5464 >                }
5465 >                Map.Entry<K,V> r = null;
5466 >                for (Node<K,V> p; (p = advance()) != null; )
5467 >                    r = (r == null) ? p : reducer.apply(r, p);
5468 >                result = r;
5469 >                CountedCompleter<?> c;
5470 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5471 >                    @SuppressWarnings("unchecked")
5472 >                    ReduceEntriesTask<K,V>
5473 >                        t = (ReduceEntriesTask<K,V>)c,
5474 >                        s = t.rights;
5475 >                    while (s != null) {
5476 >                        Map.Entry<K,V> tr, sr;
5477 >                        if ((sr = s.result) != null)
5478 >                            t.result = (((tr = t.result) == null) ? sr :
5479 >                                        reducer.apply(tr, sr));
5480 >                        s = t.rights = s.nextRight;
5481 >                    }
5482 >                }
5483 >            }
5484 >        }
5485 >    }
5486 >
5487 >    @SuppressWarnings("serial")
5488 >    static final class MapReduceKeysTask<K,V,U>
5489 >        extends BulkTask<K,V,U> {
5490 >        final Function<? super K, ? extends U> transformer;
5491 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5492 >        U result;
5493 >        MapReduceKeysTask<K,V,U> rights, nextRight;
5494 >        MapReduceKeysTask
5495 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5496 >             MapReduceKeysTask<K,V,U> nextRight,
5497 >             Function<? super K, ? extends U> transformer,
5498 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5499 >            super(p, b, i, f, t); this.nextRight = nextRight;
5500 >            this.transformer = transformer;
5501 >            this.reducer = reducer;
5502 >        }
5503 >        public final U getRawResult() { return result; }
5504 >        public final void compute() {
5505 >            final Function<? super K, ? extends U> transformer;
5506 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5507 >            if ((transformer = this.transformer) != null &&
5508 >                (reducer = this.reducer) != null) {
5509 >                for (int i = baseIndex, f, h; batch > 0 &&
5510 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5511 >                    addToPendingCount(1);
5512 >                    (rights = new MapReduceKeysTask<K,V,U>
5513 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5514 >                      rights, transformer, reducer)).fork();
5515 >                }
5516 >                U r = null;
5517 >                for (Node<K,V> p; (p = advance()) != null; ) {
5518 >                    U u;
5519 >                    if ((u = transformer.apply(p.key)) != null)
5520 >                        r = (r == null) ? u : reducer.apply(r, u);
5521 >                }
5522 >                result = r;
5523 >                CountedCompleter<?> c;
5524 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5525 >                    @SuppressWarnings("unchecked")
5526 >                    MapReduceKeysTask<K,V,U>
5527 >                        t = (MapReduceKeysTask<K,V,U>)c,
5528 >                        s = t.rights;
5529 >                    while (s != null) {
5530 >                        U tr, sr;
5531 >                        if ((sr = s.result) != null)
5532 >                            t.result = (((tr = t.result) == null) ? sr :
5533 >                                        reducer.apply(tr, sr));
5534 >                        s = t.rights = s.nextRight;
5535 >                    }
5536 >                }
5537 >            }
5538 >        }
5539 >    }
5540 >
5541 >    @SuppressWarnings("serial")
5542 >    static final class MapReduceValuesTask<K,V,U>
5543 >        extends BulkTask<K,V,U> {
5544 >        final Function<? super V, ? extends U> transformer;
5545 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5546 >        U result;
5547 >        MapReduceValuesTask<K,V,U> rights, nextRight;
5548 >        MapReduceValuesTask
5549 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5550 >             MapReduceValuesTask<K,V,U> nextRight,
5551 >             Function<? super V, ? extends U> transformer,
5552 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5553 >            super(p, b, i, f, t); this.nextRight = nextRight;
5554 >            this.transformer = transformer;
5555 >            this.reducer = reducer;
5556 >        }
5557 >        public final U getRawResult() { return result; }
5558 >        public final void compute() {
5559 >            final Function<? super V, ? extends U> transformer;
5560 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5561 >            if ((transformer = this.transformer) != null &&
5562 >                (reducer = this.reducer) != null) {
5563 >                for (int i = baseIndex, f, h; batch > 0 &&
5564 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5565 >                    addToPendingCount(1);
5566 >                    (rights = new MapReduceValuesTask<K,V,U>
5567 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5568 >                      rights, transformer, reducer)).fork();
5569 >                }
5570 >                U r = null;
5571 >                for (Node<K,V> p; (p = advance()) != null; ) {
5572 >                    U u;
5573 >                    if ((u = transformer.apply(p.val)) != null)
5574 >                        r = (r == null) ? u : reducer.apply(r, u);
5575 >                }
5576 >                result = r;
5577 >                CountedCompleter<?> c;
5578 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5579 >                    @SuppressWarnings("unchecked")
5580 >                    MapReduceValuesTask<K,V,U>
5581 >                        t = (MapReduceValuesTask<K,V,U>)c,
5582 >                        s = t.rights;
5583 >                    while (s != null) {
5584 >                        U tr, sr;
5585 >                        if ((sr = s.result) != null)
5586 >                            t.result = (((tr = t.result) == null) ? sr :
5587 >                                        reducer.apply(tr, sr));
5588 >                        s = t.rights = s.nextRight;
5589 >                    }
5590 >                }
5591 >            }
5592 >        }
5593 >    }
5594 >
5595 >    @SuppressWarnings("serial")
5596 >    static final class MapReduceEntriesTask<K,V,U>
5597 >        extends BulkTask<K,V,U> {
5598 >        final Function<Map.Entry<K,V>, ? extends U> transformer;
5599 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5600 >        U result;
5601 >        MapReduceEntriesTask<K,V,U> rights, nextRight;
5602 >        MapReduceEntriesTask
5603 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5604 >             MapReduceEntriesTask<K,V,U> nextRight,
5605 >             Function<Map.Entry<K,V>, ? extends U> transformer,
5606 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5607 >            super(p, b, i, f, t); this.nextRight = nextRight;
5608 >            this.transformer = transformer;
5609 >            this.reducer = reducer;
5610 >        }
5611 >        public final U getRawResult() { return result; }
5612 >        public final void compute() {
5613 >            final Function<Map.Entry<K,V>, ? extends U> transformer;
5614 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5615 >            if ((transformer = this.transformer) != null &&
5616 >                (reducer = this.reducer) != null) {
5617 >                for (int i = baseIndex, f, h; batch > 0 &&
5618 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5619 >                    addToPendingCount(1);
5620 >                    (rights = new MapReduceEntriesTask<K,V,U>
5621 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5622 >                      rights, transformer, reducer)).fork();
5623 >                }
5624 >                U r = null;
5625 >                for (Node<K,V> p; (p = advance()) != null; ) {
5626 >                    U u;
5627 >                    if ((u = transformer.apply(p)) != null)
5628 >                        r = (r == null) ? u : reducer.apply(r, u);
5629 >                }
5630 >                result = r;
5631 >                CountedCompleter<?> c;
5632 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5633 >                    @SuppressWarnings("unchecked")
5634 >                    MapReduceEntriesTask<K,V,U>
5635 >                        t = (MapReduceEntriesTask<K,V,U>)c,
5636 >                        s = t.rights;
5637 >                    while (s != null) {
5638 >                        U tr, sr;
5639 >                        if ((sr = s.result) != null)
5640 >                            t.result = (((tr = t.result) == null) ? sr :
5641 >                                        reducer.apply(tr, sr));
5642 >                        s = t.rights = s.nextRight;
5643 >                    }
5644 >                }
5645 >            }
5646 >        }
5647 >    }
5648 >
5649 >    @SuppressWarnings("serial")
5650 >    static final class MapReduceMappingsTask<K,V,U>
5651 >        extends BulkTask<K,V,U> {
5652 >        final BiFunction<? super K, ? super V, ? extends U> transformer;
5653 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5654 >        U result;
5655 >        MapReduceMappingsTask<K,V,U> rights, nextRight;
5656 >        MapReduceMappingsTask
5657 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5658 >             MapReduceMappingsTask<K,V,U> nextRight,
5659 >             BiFunction<? super K, ? super V, ? extends U> transformer,
5660 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5661 >            super(p, b, i, f, t); this.nextRight = nextRight;
5662 >            this.transformer = transformer;
5663 >            this.reducer = reducer;
5664 >        }
5665 >        public final U getRawResult() { return result; }
5666 >        public final void compute() {
5667 >            final BiFunction<? super K, ? super V, ? extends U> transformer;
5668 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5669 >            if ((transformer = this.transformer) != null &&
5670 >                (reducer = this.reducer) != null) {
5671 >                for (int i = baseIndex, f, h; batch > 0 &&
5672 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5673 >                    addToPendingCount(1);
5674 >                    (rights = new MapReduceMappingsTask<K,V,U>
5675 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5676 >                      rights, transformer, reducer)).fork();
5677 >                }
5678 >                U r = null;
5679 >                for (Node<K,V> p; (p = advance()) != null; ) {
5680 >                    U u;
5681 >                    if ((u = transformer.apply(p.key, p.val)) != null)
5682 >                        r = (r == null) ? u : reducer.apply(r, u);
5683 >                }
5684 >                result = r;
5685 >                CountedCompleter<?> c;
5686 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5687 >                    @SuppressWarnings("unchecked")
5688 >                    MapReduceMappingsTask<K,V,U>
5689 >                        t = (MapReduceMappingsTask<K,V,U>)c,
5690 >                        s = t.rights;
5691 >                    while (s != null) {
5692 >                        U tr, sr;
5693 >                        if ((sr = s.result) != null)
5694 >                            t.result = (((tr = t.result) == null) ? sr :
5695 >                                        reducer.apply(tr, sr));
5696 >                        s = t.rights = s.nextRight;
5697 >                    }
5698 >                }
5699 >            }
5700 >        }
5701 >    }
5702 >
5703 >    @SuppressWarnings("serial")
5704 >    static final class MapReduceKeysToDoubleTask<K,V>
5705 >        extends BulkTask<K,V,Double> {
5706 >        final ToDoubleFunction<? super K> transformer;
5707 >        final DoubleBinaryOperator reducer;
5708 >        final double basis;
5709 >        double result;
5710 >        MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5711 >        MapReduceKeysToDoubleTask
5712 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5713 >             MapReduceKeysToDoubleTask<K,V> nextRight,
5714 >             ToDoubleFunction<? super K> transformer,
5715 >             double basis,
5716 >             DoubleBinaryOperator reducer) {
5717 >            super(p, b, i, f, t); this.nextRight = nextRight;
5718 >            this.transformer = transformer;
5719 >            this.basis = basis; this.reducer = reducer;
5720 >        }
5721 >        public final Double getRawResult() { return result; }
5722 >        public final void compute() {
5723 >            final ToDoubleFunction<? super K> transformer;
5724 >            final DoubleBinaryOperator reducer;
5725 >            if ((transformer = this.transformer) != null &&
5726 >                (reducer = this.reducer) != null) {
5727 >                double r = this.basis;
5728 >                for (int i = baseIndex, f, h; batch > 0 &&
5729 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5730 >                    addToPendingCount(1);
5731 >                    (rights = new MapReduceKeysToDoubleTask<K,V>
5732 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5733 >                      rights, transformer, r, reducer)).fork();
5734 >                }
5735 >                for (Node<K,V> p; (p = advance()) != null; )
5736 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5737 >                result = r;
5738 >                CountedCompleter<?> c;
5739 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5740 >                    @SuppressWarnings("unchecked")
5741 >                    MapReduceKeysToDoubleTask<K,V>
5742 >                        t = (MapReduceKeysToDoubleTask<K,V>)c,
5743 >                        s = t.rights;
5744 >                    while (s != null) {
5745 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5746 >                        s = t.rights = s.nextRight;
5747 >                    }
5748 >                }
5749 >            }
5750 >        }
5751 >    }
5752 >
5753 >    @SuppressWarnings("serial")
5754 >    static final class MapReduceValuesToDoubleTask<K,V>
5755 >        extends BulkTask<K,V,Double> {
5756 >        final ToDoubleFunction<? super V> transformer;
5757 >        final DoubleBinaryOperator reducer;
5758 >        final double basis;
5759 >        double result;
5760 >        MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5761 >        MapReduceValuesToDoubleTask
5762 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5763 >             MapReduceValuesToDoubleTask<K,V> nextRight,
5764 >             ToDoubleFunction<? super V> transformer,
5765 >             double basis,
5766 >             DoubleBinaryOperator reducer) {
5767 >            super(p, b, i, f, t); this.nextRight = nextRight;
5768 >            this.transformer = transformer;
5769 >            this.basis = basis; this.reducer = reducer;
5770 >        }
5771 >        public final Double getRawResult() { return result; }
5772 >        public final void compute() {
5773 >            final ToDoubleFunction<? super V> transformer;
5774 >            final DoubleBinaryOperator reducer;
5775 >            if ((transformer = this.transformer) != null &&
5776 >                (reducer = this.reducer) != null) {
5777 >                double r = this.basis;
5778 >                for (int i = baseIndex, f, h; batch > 0 &&
5779 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5780 >                    addToPendingCount(1);
5781 >                    (rights = new MapReduceValuesToDoubleTask<K,V>
5782 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5783 >                      rights, transformer, r, reducer)).fork();
5784 >                }
5785 >                for (Node<K,V> p; (p = advance()) != null; )
5786 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5787 >                result = r;
5788 >                CountedCompleter<?> c;
5789 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5790 >                    @SuppressWarnings("unchecked")
5791 >                    MapReduceValuesToDoubleTask<K,V>
5792 >                        t = (MapReduceValuesToDoubleTask<K,V>)c,
5793 >                        s = t.rights;
5794 >                    while (s != null) {
5795 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5796 >                        s = t.rights = s.nextRight;
5797 >                    }
5798 >                }
5799 >            }
5800 >        }
5801 >    }
5802 >
5803 >    @SuppressWarnings("serial")
5804 >    static final class MapReduceEntriesToDoubleTask<K,V>
5805 >        extends BulkTask<K,V,Double> {
5806 >        final ToDoubleFunction<Map.Entry<K,V>> transformer;
5807 >        final DoubleBinaryOperator reducer;
5808 >        final double basis;
5809 >        double result;
5810 >        MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5811 >        MapReduceEntriesToDoubleTask
5812 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5813 >             MapReduceEntriesToDoubleTask<K,V> nextRight,
5814 >             ToDoubleFunction<Map.Entry<K,V>> transformer,
5815 >             double basis,
5816 >             DoubleBinaryOperator reducer) {
5817 >            super(p, b, i, f, t); this.nextRight = nextRight;
5818 >            this.transformer = transformer;
5819 >            this.basis = basis; this.reducer = reducer;
5820 >        }
5821 >        public final Double getRawResult() { return result; }
5822 >        public final void compute() {
5823 >            final ToDoubleFunction<Map.Entry<K,V>> transformer;
5824 >            final DoubleBinaryOperator reducer;
5825 >            if ((transformer = this.transformer) != null &&
5826 >                (reducer = this.reducer) != null) {
5827 >                double r = this.basis;
5828 >                for (int i = baseIndex, f, h; batch > 0 &&
5829 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5830 >                    addToPendingCount(1);
5831 >                    (rights = new MapReduceEntriesToDoubleTask<K,V>
5832 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5833 >                      rights, transformer, r, reducer)).fork();
5834 >                }
5835 >                for (Node<K,V> p; (p = advance()) != null; )
5836 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5837 >                result = r;
5838 >                CountedCompleter<?> c;
5839 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5840 >                    @SuppressWarnings("unchecked")
5841 >                    MapReduceEntriesToDoubleTask<K,V>
5842 >                        t = (MapReduceEntriesToDoubleTask<K,V>)c,
5843 >                        s = t.rights;
5844 >                    while (s != null) {
5845 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5846 >                        s = t.rights = s.nextRight;
5847 >                    }
5848 >                }
5849 >            }
5850 >        }
5851 >    }
5852 >
5853 >    @SuppressWarnings("serial")
5854 >    static final class MapReduceMappingsToDoubleTask<K,V>
5855 >        extends BulkTask<K,V,Double> {
5856 >        final ToDoubleBiFunction<? super K, ? super V> transformer;
5857 >        final DoubleBinaryOperator reducer;
5858 >        final double basis;
5859 >        double result;
5860 >        MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5861 >        MapReduceMappingsToDoubleTask
5862 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5863 >             MapReduceMappingsToDoubleTask<K,V> nextRight,
5864 >             ToDoubleBiFunction<? super K, ? super V> transformer,
5865 >             double basis,
5866 >             DoubleBinaryOperator reducer) {
5867 >            super(p, b, i, f, t); this.nextRight = nextRight;
5868 >            this.transformer = transformer;
5869 >            this.basis = basis; this.reducer = reducer;
5870 >        }
5871 >        public final Double getRawResult() { return result; }
5872 >        public final void compute() {
5873 >            final ToDoubleBiFunction<? super K, ? super V> transformer;
5874 >            final DoubleBinaryOperator reducer;
5875 >            if ((transformer = this.transformer) != null &&
5876 >                (reducer = this.reducer) != null) {
5877 >                double r = this.basis;
5878 >                for (int i = baseIndex, f, h; batch > 0 &&
5879 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5880 >                    addToPendingCount(1);
5881 >                    (rights = new MapReduceMappingsToDoubleTask<K,V>
5882 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5883 >                      rights, transformer, r, reducer)).fork();
5884 >                }
5885 >                for (Node<K,V> p; (p = advance()) != null; )
5886 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5887 >                result = r;
5888 >                CountedCompleter<?> c;
5889 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5890 >                    @SuppressWarnings("unchecked")
5891 >                    MapReduceMappingsToDoubleTask<K,V>
5892 >                        t = (MapReduceMappingsToDoubleTask<K,V>)c,
5893 >                        s = t.rights;
5894 >                    while (s != null) {
5895 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5896 >                        s = t.rights = s.nextRight;
5897 >                    }
5898 >                }
5899 >            }
5900 >        }
5901 >    }
5902 >
5903 >    @SuppressWarnings("serial")
5904 >    static final class MapReduceKeysToLongTask<K,V>
5905 >        extends BulkTask<K,V,Long> {
5906 >        final ToLongFunction<? super K> transformer;
5907 >        final LongBinaryOperator reducer;
5908 >        final long basis;
5909 >        long result;
5910 >        MapReduceKeysToLongTask<K,V> rights, nextRight;
5911 >        MapReduceKeysToLongTask
5912 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5913 >             MapReduceKeysToLongTask<K,V> nextRight,
5914 >             ToLongFunction<? super K> transformer,
5915 >             long basis,
5916 >             LongBinaryOperator reducer) {
5917 >            super(p, b, i, f, t); this.nextRight = nextRight;
5918 >            this.transformer = transformer;
5919 >            this.basis = basis; this.reducer = reducer;
5920 >        }
5921 >        public final Long getRawResult() { return result; }
5922 >        public final void compute() {
5923 >            final ToLongFunction<? super K> transformer;
5924 >            final LongBinaryOperator reducer;
5925 >            if ((transformer = this.transformer) != null &&
5926 >                (reducer = this.reducer) != null) {
5927 >                long r = this.basis;
5928 >                for (int i = baseIndex, f, h; batch > 0 &&
5929 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5930 >                    addToPendingCount(1);
5931 >                    (rights = new MapReduceKeysToLongTask<K,V>
5932 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5933 >                      rights, transformer, r, reducer)).fork();
5934 >                }
5935 >                for (Node<K,V> p; (p = advance()) != null; )
5936 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
5937 >                result = r;
5938 >                CountedCompleter<?> c;
5939 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5940 >                    @SuppressWarnings("unchecked")
5941 >                    MapReduceKeysToLongTask<K,V>
5942 >                        t = (MapReduceKeysToLongTask<K,V>)c,
5943 >                        s = t.rights;
5944 >                    while (s != null) {
5945 >                        t.result = reducer.applyAsLong(t.result, s.result);
5946 >                        s = t.rights = s.nextRight;
5947 >                    }
5948 >                }
5949 >            }
5950 >        }
5951 >    }
5952 >
5953 >    @SuppressWarnings("serial")
5954 >    static final class MapReduceValuesToLongTask<K,V>
5955 >        extends BulkTask<K,V,Long> {
5956 >        final ToLongFunction<? super V> transformer;
5957 >        final LongBinaryOperator reducer;
5958 >        final long basis;
5959 >        long result;
5960 >        MapReduceValuesToLongTask<K,V> rights, nextRight;
5961 >        MapReduceValuesToLongTask
5962 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5963 >             MapReduceValuesToLongTask<K,V> nextRight,
5964 >             ToLongFunction<? super V> transformer,
5965 >             long basis,
5966 >             LongBinaryOperator reducer) {
5967 >            super(p, b, i, f, t); this.nextRight = nextRight;
5968 >            this.transformer = transformer;
5969 >            this.basis = basis; this.reducer = reducer;
5970 >        }
5971 >        public final Long getRawResult() { return result; }
5972 >        public final void compute() {
5973 >            final ToLongFunction<? super V> transformer;
5974 >            final LongBinaryOperator reducer;
5975 >            if ((transformer = this.transformer) != null &&
5976 >                (reducer = this.reducer) != null) {
5977 >                long r = this.basis;
5978 >                for (int i = baseIndex, f, h; batch > 0 &&
5979 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5980 >                    addToPendingCount(1);
5981 >                    (rights = new MapReduceValuesToLongTask<K,V>
5982 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5983 >                      rights, transformer, r, reducer)).fork();
5984 >                }
5985 >                for (Node<K,V> p; (p = advance()) != null; )
5986 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
5987 >                result = r;
5988 >                CountedCompleter<?> c;
5989 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5990 >                    @SuppressWarnings("unchecked")
5991 >                    MapReduceValuesToLongTask<K,V>
5992 >                        t = (MapReduceValuesToLongTask<K,V>)c,
5993 >                        s = t.rights;
5994 >                    while (s != null) {
5995 >                        t.result = reducer.applyAsLong(t.result, s.result);
5996 >                        s = t.rights = s.nextRight;
5997 >                    }
5998 >                }
5999 >            }
6000 >        }
6001 >    }
6002 >
6003 >    @SuppressWarnings("serial")
6004 >    static final class MapReduceEntriesToLongTask<K,V>
6005 >        extends BulkTask<K,V,Long> {
6006 >        final ToLongFunction<Map.Entry<K,V>> transformer;
6007 >        final LongBinaryOperator reducer;
6008 >        final long basis;
6009 >        long result;
6010 >        MapReduceEntriesToLongTask<K,V> rights, nextRight;
6011 >        MapReduceEntriesToLongTask
6012 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6013 >             MapReduceEntriesToLongTask<K,V> nextRight,
6014 >             ToLongFunction<Map.Entry<K,V>> transformer,
6015 >             long basis,
6016 >             LongBinaryOperator reducer) {
6017 >            super(p, b, i, f, t); this.nextRight = nextRight;
6018 >            this.transformer = transformer;
6019 >            this.basis = basis; this.reducer = reducer;
6020 >        }
6021 >        public final Long getRawResult() { return result; }
6022 >        public final void compute() {
6023 >            final ToLongFunction<Map.Entry<K,V>> transformer;
6024 >            final LongBinaryOperator reducer;
6025 >            if ((transformer = this.transformer) != null &&
6026 >                (reducer = this.reducer) != null) {
6027 >                long r = this.basis;
6028 >                for (int i = baseIndex, f, h; batch > 0 &&
6029 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6030 >                    addToPendingCount(1);
6031 >                    (rights = new MapReduceEntriesToLongTask<K,V>
6032 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
6033 >                      rights, transformer, r, reducer)).fork();
6034 >                }
6035 >                for (Node<K,V> p; (p = advance()) != null; )
6036 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6037 >                result = r;
6038 >                CountedCompleter<?> c;
6039 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6040 >                    @SuppressWarnings("unchecked")
6041 >                    MapReduceEntriesToLongTask<K,V>
6042 >                        t = (MapReduceEntriesToLongTask<K,V>)c,
6043 >                        s = t.rights;
6044 >                    while (s != null) {
6045 >                        t.result = reducer.applyAsLong(t.result, s.result);
6046 >                        s = t.rights = s.nextRight;
6047 >                    }
6048 >                }
6049 >            }
6050 >        }
6051 >    }
6052 >
6053 >    @SuppressWarnings("serial")
6054 >    static final class MapReduceMappingsToLongTask<K,V>
6055 >        extends BulkTask<K,V,Long> {
6056 >        final ToLongBiFunction<? super K, ? super V> transformer;
6057 >        final LongBinaryOperator reducer;
6058 >        final long basis;
6059 >        long result;
6060 >        MapReduceMappingsToLongTask<K,V> rights, nextRight;
6061 >        MapReduceMappingsToLongTask
6062 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6063 >             MapReduceMappingsToLongTask<K,V> nextRight,
6064 >             ToLongBiFunction<? super K, ? super V> transformer,
6065 >             long basis,
6066 >             LongBinaryOperator reducer) {
6067 >            super(p, b, i, f, t); this.nextRight = nextRight;
6068 >            this.transformer = transformer;
6069 >            this.basis = basis; this.reducer = reducer;
6070 >        }
6071 >        public final Long getRawResult() { return result; }
6072 >        public final void compute() {
6073 >            final ToLongBiFunction<? super K, ? super V> transformer;
6074 >            final LongBinaryOperator reducer;
6075 >            if ((transformer = this.transformer) != null &&
6076 >                (reducer = this.reducer) != null) {
6077 >                long r = this.basis;
6078 >                for (int i = baseIndex, f, h; batch > 0 &&
6079 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6080 >                    addToPendingCount(1);
6081 >                    (rights = new MapReduceMappingsToLongTask<K,V>
6082 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
6083 >                      rights, transformer, r, reducer)).fork();
6084 >                }
6085 >                for (Node<K,V> p; (p = advance()) != null; )
6086 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6087 >                result = r;
6088 >                CountedCompleter<?> c;
6089 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6090 >                    @SuppressWarnings("unchecked")
6091 >                    MapReduceMappingsToLongTask<K,V>
6092 >                        t = (MapReduceMappingsToLongTask<K,V>)c,
6093 >                        s = t.rights;
6094 >                    while (s != null) {
6095 >                        t.result = reducer.applyAsLong(t.result, s.result);
6096 >                        s = t.rights = s.nextRight;
6097 >                    }
6098 >                }
6099 >            }
6100 >        }
6101 >    }
6102 >
6103 >    @SuppressWarnings("serial")
6104 >    static final class MapReduceKeysToIntTask<K,V>
6105 >        extends BulkTask<K,V,Integer> {
6106 >        final ToIntFunction<? super K> transformer;
6107 >        final IntBinaryOperator reducer;
6108 >        final int basis;
6109 >        int result;
6110 >        MapReduceKeysToIntTask<K,V> rights, nextRight;
6111 >        MapReduceKeysToIntTask
6112 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6113 >             MapReduceKeysToIntTask<K,V> nextRight,
6114 >             ToIntFunction<? super K> transformer,
6115 >             int basis,
6116 >             IntBinaryOperator reducer) {
6117 >            super(p, b, i, f, t); this.nextRight = nextRight;
6118 >            this.transformer = transformer;
6119 >            this.basis = basis; this.reducer = reducer;
6120 >        }
6121 >        public final Integer getRawResult() { return result; }
6122 >        public final void compute() {
6123 >            final ToIntFunction<? super K> transformer;
6124 >            final IntBinaryOperator reducer;
6125 >            if ((transformer = this.transformer) != null &&
6126 >                (reducer = this.reducer) != null) {
6127 >                int r = this.basis;
6128 >                for (int i = baseIndex, f, h; batch > 0 &&
6129 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6130 >                    addToPendingCount(1);
6131 >                    (rights = new MapReduceKeysToIntTask<K,V>
6132 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
6133 >                      rights, transformer, r, reducer)).fork();
6134 >                }
6135 >                for (Node<K,V> p; (p = advance()) != null; )
6136 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6137 >                result = r;
6138 >                CountedCompleter<?> c;
6139 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6140 >                    @SuppressWarnings("unchecked")
6141 >                    MapReduceKeysToIntTask<K,V>
6142 >                        t = (MapReduceKeysToIntTask<K,V>)c,
6143 >                        s = t.rights;
6144 >                    while (s != null) {
6145 >                        t.result = reducer.applyAsInt(t.result, s.result);
6146 >                        s = t.rights = s.nextRight;
6147 >                    }
6148 >                }
6149 >            }
6150 >        }
6151 >    }
6152 >
6153 >    @SuppressWarnings("serial")
6154 >    static final class MapReduceValuesToIntTask<K,V>
6155 >        extends BulkTask<K,V,Integer> {
6156 >        final ToIntFunction<? super V> transformer;
6157 >        final IntBinaryOperator reducer;
6158 >        final int basis;
6159 >        int result;
6160 >        MapReduceValuesToIntTask<K,V> rights, nextRight;
6161 >        MapReduceValuesToIntTask
6162 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6163 >             MapReduceValuesToIntTask<K,V> nextRight,
6164 >             ToIntFunction<? super V> transformer,
6165 >             int basis,
6166 >             IntBinaryOperator reducer) {
6167 >            super(p, b, i, f, t); this.nextRight = nextRight;
6168 >            this.transformer = transformer;
6169 >            this.basis = basis; this.reducer = reducer;
6170 >        }
6171 >        public final Integer getRawResult() { return result; }
6172 >        public final void compute() {
6173 >            final ToIntFunction<? super V> transformer;
6174 >            final IntBinaryOperator reducer;
6175 >            if ((transformer = this.transformer) != null &&
6176 >                (reducer = this.reducer) != null) {
6177 >                int r = this.basis;
6178 >                for (int i = baseIndex, f, h; batch > 0 &&
6179 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6180 >                    addToPendingCount(1);
6181 >                    (rights = new MapReduceValuesToIntTask<K,V>
6182 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
6183 >                      rights, transformer, r, reducer)).fork();
6184 >                }
6185 >                for (Node<K,V> p; (p = advance()) != null; )
6186 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6187 >                result = r;
6188 >                CountedCompleter<?> c;
6189 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6190 >                    @SuppressWarnings("unchecked")
6191 >                    MapReduceValuesToIntTask<K,V>
6192 >                        t = (MapReduceValuesToIntTask<K,V>)c,
6193 >                        s = t.rights;
6194 >                    while (s != null) {
6195 >                        t.result = reducer.applyAsInt(t.result, s.result);
6196 >                        s = t.rights = s.nextRight;
6197 >                    }
6198 >                }
6199 >            }
6200 >        }
6201 >    }
6202 >
6203 >    @SuppressWarnings("serial")
6204 >    static final class MapReduceEntriesToIntTask<K,V>
6205 >        extends BulkTask<K,V,Integer> {
6206 >        final ToIntFunction<Map.Entry<K,V>> transformer;
6207 >        final IntBinaryOperator reducer;
6208 >        final int basis;
6209 >        int result;
6210 >        MapReduceEntriesToIntTask<K,V> rights, nextRight;
6211 >        MapReduceEntriesToIntTask
6212 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6213 >             MapReduceEntriesToIntTask<K,V> nextRight,
6214 >             ToIntFunction<Map.Entry<K,V>> transformer,
6215 >             int basis,
6216 >             IntBinaryOperator reducer) {
6217 >            super(p, b, i, f, t); this.nextRight = nextRight;
6218 >            this.transformer = transformer;
6219 >            this.basis = basis; this.reducer = reducer;
6220 >        }
6221 >        public final Integer getRawResult() { return result; }
6222 >        public final void compute() {
6223 >            final ToIntFunction<Map.Entry<K,V>> transformer;
6224 >            final IntBinaryOperator reducer;
6225 >            if ((transformer = this.transformer) != null &&
6226 >                (reducer = this.reducer) != null) {
6227 >                int r = this.basis;
6228 >                for (int i = baseIndex, f, h; batch > 0 &&
6229 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6230 >                    addToPendingCount(1);
6231 >                    (rights = new MapReduceEntriesToIntTask<K,V>
6232 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
6233 >                      rights, transformer, r, reducer)).fork();
6234 >                }
6235 >                for (Node<K,V> p; (p = advance()) != null; )
6236 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6237 >                result = r;
6238 >                CountedCompleter<?> c;
6239 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6240 >                    @SuppressWarnings("unchecked")
6241 >                    MapReduceEntriesToIntTask<K,V>
6242 >                        t = (MapReduceEntriesToIntTask<K,V>)c,
6243 >                        s = t.rights;
6244 >                    while (s != null) {
6245 >                        t.result = reducer.applyAsInt(t.result, s.result);
6246 >                        s = t.rights = s.nextRight;
6247 >                    }
6248 >                }
6249 >            }
6250 >        }
6251 >    }
6252 >
6253 >    @SuppressWarnings("serial")
6254 >    static final class MapReduceMappingsToIntTask<K,V>
6255 >        extends BulkTask<K,V,Integer> {
6256 >        final ToIntBiFunction<? super K, ? super V> transformer;
6257 >        final IntBinaryOperator reducer;
6258 >        final int basis;
6259 >        int result;
6260 >        MapReduceMappingsToIntTask<K,V> rights, nextRight;
6261 >        MapReduceMappingsToIntTask
6262 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6263 >             MapReduceMappingsToIntTask<K,V> nextRight,
6264 >             ToIntBiFunction<? super K, ? super V> transformer,
6265 >             int basis,
6266 >             IntBinaryOperator reducer) {
6267 >            super(p, b, i, f, t); this.nextRight = nextRight;
6268 >            this.transformer = transformer;
6269 >            this.basis = basis; this.reducer = reducer;
6270 >        }
6271 >        public final Integer getRawResult() { return result; }
6272 >        public final void compute() {
6273 >            final ToIntBiFunction<? super K, ? super V> transformer;
6274 >            final IntBinaryOperator reducer;
6275 >            if ((transformer = this.transformer) != null &&
6276 >                (reducer = this.reducer) != null) {
6277 >                int r = this.basis;
6278 >                for (int i = baseIndex, f, h; batch > 0 &&
6279 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6280 >                    addToPendingCount(1);
6281 >                    (rights = new MapReduceMappingsToIntTask<K,V>
6282 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
6283 >                      rights, transformer, r, reducer)).fork();
6284 >                }
6285 >                for (Node<K,V> p; (p = advance()) != null; )
6286 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6287 >                result = r;
6288 >                CountedCompleter<?> c;
6289 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6290 >                    @SuppressWarnings("unchecked")
6291 >                    MapReduceMappingsToIntTask<K,V>
6292 >                        t = (MapReduceMappingsToIntTask<K,V>)c,
6293 >                        s = t.rights;
6294 >                    while (s != null) {
6295 >                        t.result = reducer.applyAsInt(t.result, s.result);
6296 >                        s = t.rights = s.nextRight;
6297 >                    }
6298 >                }
6299 >            }
6300          }
6301      }
6302  
6303      // Unsafe mechanics
6304 <    private static final sun.misc.Unsafe UNSAFE;
6305 <    private static final long SBASE;
6306 <    private static final int SSHIFT;
6307 <    private static final long TBASE;
6308 <    private static final int TSHIFT;
6304 >    private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
6305 >    private static final long SIZECTL;
6306 >    private static final long TRANSFERINDEX;
6307 >    private static final long BASECOUNT;
6308 >    private static final long CELLSBUSY;
6309 >    private static final long CELLVALUE;
6310 >    private static final int ABASE;
6311 >    private static final int ASHIFT;
6312  
6313      static {
1474        int ss, ts;
6314          try {
6315 <            UNSAFE = sun.misc.Unsafe.getUnsafe();
6316 <            Class tc = HashEntry[].class;
6317 <            Class sc = Segment[].class;
6318 <            TBASE = UNSAFE.arrayBaseOffset(tc);
6319 <            SBASE = UNSAFE.arrayBaseOffset(sc);
6320 <            ts = UNSAFE.arrayIndexScale(tc);
6321 <            ss = UNSAFE.arrayIndexScale(sc);
6322 <        } catch (Exception e) {
6315 >            SIZECTL = U.objectFieldOffset
6316 >                (ConcurrentHashMap.class.getDeclaredField("sizeCtl"));
6317 >            TRANSFERINDEX = U.objectFieldOffset
6318 >                (ConcurrentHashMap.class.getDeclaredField("transferIndex"));
6319 >            BASECOUNT = U.objectFieldOffset
6320 >                (ConcurrentHashMap.class.getDeclaredField("baseCount"));
6321 >            CELLSBUSY = U.objectFieldOffset
6322 >                (ConcurrentHashMap.class.getDeclaredField("cellsBusy"));
6323 >
6324 >            CELLVALUE = U.objectFieldOffset
6325 >                (CounterCell.class.getDeclaredField("value"));
6326 >
6327 >            ABASE = U.arrayBaseOffset(Node[].class);
6328 >            int scale = U.arrayIndexScale(Node[].class);
6329 >            if ((scale & (scale - 1)) != 0)
6330 >                throw new Error("array index scale not a power of two");
6331 >            ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6332 >        } catch (ReflectiveOperationException e) {
6333              throw new Error(e);
6334          }
1486        if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
1487            throw new Error("data type scale not a power of two");
1488        SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
1489        TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
1490    }
6335  
6336 +        // Reduce the risk of rare disastrous classloading in first call to
6337 +        // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
6338 +        Class<?> ensureLoaded = LockSupport.class;
6339 +    }
6340   }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines