ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
(Generate patch)

Comparing jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java (file contents):
Revision 1.110 by jsr166, Wed Apr 27 14:06:30 2011 UTC vs.
Revision 1.215 by dl, Wed May 22 20:42:30 2013 UTC

# Line 5 | Line 5
5   */
6  
7   package java.util.concurrent;
8 import java.util.concurrent.locks.*;
9 import java.util.*;
8   import java.io.Serializable;
9 < import java.io.IOException;
10 < import java.io.ObjectInputStream;
11 < import java.io.ObjectOutputStream;
9 > import java.io.ObjectStreamField;
10 > import java.lang.reflect.ParameterizedType;
11 > import java.lang.reflect.Type;
12 > import java.util.Arrays;
13 > import java.util.Collection;
14 > import java.util.Comparator;
15 > import java.util.ConcurrentModificationException;
16 > import java.util.Enumeration;
17 > import java.util.HashMap;
18 > import java.util.Hashtable;
19 > import java.util.Iterator;
20 > import java.util.Map;
21 > import java.util.NoSuchElementException;
22 > import java.util.Set;
23 > import java.util.Spliterator;
24 > import java.util.concurrent.ConcurrentMap;
25 > import java.util.concurrent.ForkJoinPool;
26 > import java.util.concurrent.atomic.AtomicReference;
27 > import java.util.concurrent.locks.ReentrantLock;
28 > import java.util.concurrent.locks.StampedLock;
29 > import java.util.function.BiConsumer;
30 > import java.util.function.BiFunction;
31 > import java.util.function.BinaryOperator;
32 > import java.util.function.Consumer;
33 > import java.util.function.DoubleBinaryOperator;
34 > import java.util.function.Function;
35 > import java.util.function.IntBinaryOperator;
36 > import java.util.function.LongBinaryOperator;
37 > import java.util.function.ToDoubleBiFunction;
38 > import java.util.function.ToDoubleFunction;
39 > import java.util.function.ToIntBiFunction;
40 > import java.util.function.ToIntFunction;
41 > import java.util.function.ToLongBiFunction;
42 > import java.util.function.ToLongFunction;
43 > import java.util.stream.Stream;
44  
45   /**
46   * A hash table supporting full concurrency of retrievals and
47 < * adjustable expected concurrency for updates. This class obeys the
47 > * high expected concurrency for updates. This class obeys the
48   * same functional specification as {@link java.util.Hashtable}, and
49   * includes versions of methods corresponding to each method of
50 < * <tt>Hashtable</tt>. However, even though all operations are
50 > * {@code Hashtable}. However, even though all operations are
51   * thread-safe, retrieval operations do <em>not</em> entail locking,
52   * and there is <em>not</em> any support for locking the entire table
53   * in a way that prevents all access.  This class is fully
54 < * interoperable with <tt>Hashtable</tt> in programs that rely on its
54 > * interoperable with {@code Hashtable} in programs that rely on its
55   * thread safety but not on its synchronization details.
56   *
57 < * <p> Retrieval operations (including <tt>get</tt>) generally do not
58 < * block, so may overlap with update operations (including
59 < * <tt>put</tt> and <tt>remove</tt>). Retrievals reflect the results
60 < * of the most recently <em>completed</em> update operations holding
61 < * upon their onset.  For aggregate operations such as <tt>putAll</tt>
62 < * and <tt>clear</tt>, concurrent retrievals may reflect insertion or
63 < * removal of only some entries.  Similarly, Iterators and
64 < * Enumerations return elements reflecting the state of the hash table
65 < * at some point at or since the creation of the iterator/enumeration.
66 < * They do <em>not</em> throw {@link ConcurrentModificationException}.
67 < * However, iterators are designed to be used by only one thread at a time.
68 < *
69 < * <p> The allowed concurrency among update operations is guided by
70 < * the optional <tt>concurrencyLevel</tt> constructor argument
71 < * (default <tt>16</tt>), which is used as a hint for internal sizing.  The
72 < * table is internally partitioned to try to permit the indicated
73 < * number of concurrent updates without contention. Because placement
74 < * in hash tables is essentially random, the actual concurrency will
75 < * vary.  Ideally, you should choose a value to accommodate as many
76 < * threads as will ever concurrently modify the table. Using a
77 < * significantly higher value than you need can waste space and time,
78 < * and a significantly lower value can lead to thread contention. But
79 < * overestimates and underestimates within an order of magnitude do
80 < * not usually have much noticeable impact. A value of one is
81 < * appropriate when it is known that only one thread will modify and
82 < * all others will only read. Also, resizing this or any other kind of
83 < * hash table is a relatively slow operation, so, when possible, it is
84 < * a good idea to provide estimates of expected table sizes in
85 < * constructors.
57 > * <p>Retrieval operations (including {@code get}) generally do not
58 > * block, so may overlap with update operations (including {@code put}
59 > * and {@code remove}). Retrievals reflect the results of the most
60 > * recently <em>completed</em> update operations holding upon their
61 > * onset. (More formally, an update operation for a given key bears a
62 > * <em>happens-before</em> relation with any (non-null) retrieval for
63 > * that key reporting the updated value.)  For aggregate operations
64 > * such as {@code putAll} and {@code clear}, concurrent retrievals may
65 > * reflect insertion or removal of only some entries.  Similarly,
66 > * Iterators and Enumerations return elements reflecting the state of
67 > * the hash table at some point at or since the creation of the
68 > * iterator/enumeration.  They do <em>not</em> throw {@link
69 > * ConcurrentModificationException}.  However, iterators are designed
70 > * to be used by only one thread at a time.  Bear in mind that the
71 > * results of aggregate status methods including {@code size}, {@code
72 > * isEmpty}, and {@code containsValue} are typically useful only when
73 > * a map is not undergoing concurrent updates in other threads.
74 > * Otherwise the results of these methods reflect transient states
75 > * that may be adequate for monitoring or estimation purposes, but not
76 > * for program control.
77 > *
78 > * <p>The table is dynamically expanded when there are too many
79 > * collisions (i.e., keys that have distinct hash codes but fall into
80 > * the same slot modulo the table size), with the expected average
81 > * effect of maintaining roughly two bins per mapping (corresponding
82 > * to a 0.75 load factor threshold for resizing). There may be much
83 > * variance around this average as mappings are added and removed, but
84 > * overall, this maintains a commonly accepted time/space tradeoff for
85 > * hash tables.  However, resizing this or any other kind of hash
86 > * table may be a relatively slow operation. When possible, it is a
87 > * good idea to provide a size estimate as an optional {@code
88 > * initialCapacity} constructor argument. An additional optional
89 > * {@code loadFactor} constructor argument provides a further means of
90 > * customizing initial table capacity by specifying the table density
91 > * to be used in calculating the amount of space to allocate for the
92 > * given number of elements.  Also, for compatibility with previous
93 > * versions of this class, constructors may optionally specify an
94 > * expected {@code concurrencyLevel} as an additional hint for
95 > * internal sizing.  Note that using many keys with exactly the same
96 > * {@code hashCode()} is a sure way to slow down performance of any
97 > * hash table. To ameliorate impact, when keys are {@link Comparable},
98 > * this class may use comparison order among keys to help break ties.
99 > *
100 > * <p>A {@link Set} projection of a ConcurrentHashMap may be created
101 > * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
102 > * (using {@link #keySet(Object)} when only keys are of interest, and the
103 > * mapped values are (perhaps transiently) not used or all take the
104 > * same mapping value.
105 > *
106 > * <p>A ConcurrentHashMap can be used as scalable frequency map (a
107 > * form of histogram or multiset) by using {@link
108 > * java.util.concurrent.atomic.LongAdder} values and initializing via
109 > * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
110 > * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
111 > * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();}
112   *
113   * <p>This class and its views and iterators implement all of the
114   * <em>optional</em> methods of the {@link Map} and {@link Iterator}
115   * interfaces.
116   *
117 < * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
118 < * does <em>not</em> allow <tt>null</tt> to be used as a key or value.
117 > * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
118 > * does <em>not</em> allow {@code null} to be used as a key or value.
119 > *
120 > * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
121 > * operations that, unlike most {@link Stream} methods, are designed
122 > * to be safely, and often sensibly, applied even with maps that are
123 > * being concurrently updated by other threads; for example, when
124 > * computing a snapshot summary of the values in a shared registry.
125 > * There are three kinds of operation, each with four forms, accepting
126 > * functions with Keys, Values, Entries, and (Key, Value) arguments
127 > * and/or return values. Because the elements of a ConcurrentHashMap
128 > * are not ordered in any particular way, and may be processed in
129 > * different orders in different parallel executions, the correctness
130 > * of supplied functions should not depend on any ordering, or on any
131 > * other objects or values that may transiently change while
132 > * computation is in progress; and except for forEach actions, should
133 > * ideally be side-effect-free. Bulk operations on {@link Map.Entry}
134 > * objects do not support method {@code setValue}.
135 > *
136 > * <ul>
137 > * <li> forEach: Perform a given action on each element.
138 > * A variant form applies a given transformation on each element
139 > * before performing the action.</li>
140 > *
141 > * <li> search: Return the first available non-null result of
142 > * applying a given function on each element; skipping further
143 > * search when a result is found.</li>
144 > *
145 > * <li> reduce: Accumulate each element.  The supplied reduction
146 > * function cannot rely on ordering (more formally, it should be
147 > * both associative and commutative).  There are five variants:
148 > *
149 > * <ul>
150 > *
151 > * <li> Plain reductions. (There is not a form of this method for
152 > * (key, value) function arguments since there is no corresponding
153 > * return type.)</li>
154 > *
155 > * <li> Mapped reductions that accumulate the results of a given
156 > * function applied to each element.</li>
157 > *
158 > * <li> Reductions to scalar doubles, longs, and ints, using a
159 > * given basis value.</li>
160 > *
161 > * </ul>
162 > * </li>
163 > * </ul>
164 > *
165 > * <p>These bulk operations accept a {@code parallelismThreshold}
166 > * argument. Methods proceed sequentially if the current map size is
167 > * estimated to be less than the given threshold. Using a value of
168 > * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
169 > * of {@code 1} results in maximal parallelism.  In-between values can
170 > * be used to trade off overhead versus throughput. Parallel forms use
171 > * the {@link ForkJoinPool#commonPool()}.
172 > *
173 > * <p>The concurrency properties of bulk operations follow
174 > * from those of ConcurrentHashMap: Any non-null result returned
175 > * from {@code get(key)} and related access methods bears a
176 > * happens-before relation with the associated insertion or
177 > * update.  The result of any bulk operation reflects the
178 > * composition of these per-element relations (but is not
179 > * necessarily atomic with respect to the map as a whole unless it
180 > * is somehow known to be quiescent).  Conversely, because keys
181 > * and values in the map are never null, null serves as a reliable
182 > * atomic indicator of the current lack of any result.  To
183 > * maintain this property, null serves as an implicit basis for
184 > * all non-scalar reduction operations. For the double, long, and
185 > * int versions, the basis should be one that, when combined with
186 > * any other value, returns that other value (more formally, it
187 > * should be the identity element for the reduction). Most common
188 > * reductions have these properties; for example, computing a sum
189 > * with basis 0 or a minimum with basis MAX_VALUE.
190 > *
191 > * <p>Search and transformation functions provided as arguments
192 > * should similarly return null to indicate the lack of any result
193 > * (in which case it is not used). In the case of mapped
194 > * reductions, this also enables transformations to serve as
195 > * filters, returning null (or, in the case of primitive
196 > * specializations, the identity basis) if the element should not
197 > * be combined. You can create compound transformations and
198 > * filterings by composing them yourself under this "null means
199 > * there is nothing there now" rule before using them in search or
200 > * reduce operations.
201 > *
202 > * <p>Methods accepting and/or returning Entry arguments maintain
203 > * key-value associations. They may be useful for example when
204 > * finding the key for the greatest value. Note that "plain" Entry
205 > * arguments can be supplied using {@code new
206 > * AbstractMap.SimpleEntry(k,v)}.
207 > *
208 > * <p>Bulk operations may complete abruptly, throwing an
209 > * exception encountered in the application of a supplied
210 > * function. Bear in mind when handling such exceptions that other
211 > * concurrently executing functions could also have thrown
212 > * exceptions, or would have done so if the first exception had
213 > * not occurred.
214 > *
215 > * <p>Speedups for parallel compared to sequential forms are common
216 > * but not guaranteed.  Parallel operations involving brief functions
217 > * on small maps may execute more slowly than sequential forms if the
218 > * underlying work to parallelize the computation is more expensive
219 > * than the computation itself.  Similarly, parallelization may not
220 > * lead to much actual parallelism if all processors are busy
221 > * performing unrelated tasks.
222 > *
223 > * <p>All arguments to all task methods must be non-null.
224   *
225   * <p>This class is a member of the
226   * <a href="{@docRoot}/../technotes/guides/collections/index.html">
# Line 70 | Line 231 | import java.io.ObjectOutputStream;
231   * @param <K> the type of keys maintained by this map
232   * @param <V> the type of mapped values
233   */
234 < public class ConcurrentHashMap<K, V> extends AbstractMap<K, V>
235 <        implements ConcurrentMap<K, V>, Serializable {
234 > @SuppressWarnings({"unchecked", "rawtypes", "serial"})
235 > public class ConcurrentHashMap<K,V> implements ConcurrentMap<K,V>, Serializable {
236      private static final long serialVersionUID = 7249069246763182397L;
237  
238      /*
239 <     * The basic strategy is to subdivide the table among Segments,
240 <     * each of which itself is a concurrently readable hash table.  To
241 <     * reduce footprint, all but one segments are constructed only
242 <     * when first needed (see ensureSegment). To maintain visibility
243 <     * in the presence of lazy construction, accesses to segments as
244 <     * well as elements of segment's table must use volatile access,
245 <     * which is done via Unsafe within methods segmentAt etc
246 <     * below. These provide the functionality of AtomicReferenceArrays
247 <     * but reduce the levels of indirection. Additionally,
248 <     * volatile-writes of table elements and entry "next" fields
249 <     * within locked operations use the cheaper "lazySet" forms of
250 <     * writes (via putOrderedObject) because these writes are always
251 <     * followed by lock releases that maintain sequential consistency
252 <     * of table updates.
253 <     *
254 <     * Historical note: The previous version of this class relied
255 <     * heavily on "final" fields, which avoided some volatile reads at
256 <     * the expense of a large initial footprint.  Some remnants of
257 <     * that design (including forced construction of segment 0) exist
258 <     * to ensure serialization compatibility.
239 >     * Overview:
240 >     *
241 >     * The primary design goal of this hash table is to maintain
242 >     * concurrent readability (typically method get(), but also
243 >     * iterators and related methods) while minimizing update
244 >     * contention. Secondary goals are to keep space consumption about
245 >     * the same or better than java.util.HashMap, and to support high
246 >     * initial insertion rates on an empty table by many threads.
247 >     *
248 >     * Each key-value mapping is held in a Node.  Because Node key
249 >     * fields can contain special values, they are defined using plain
250 >     * Object types (not type "K"). This leads to a lot of explicit
251 >     * casting (and the use of class-wide warning suppressions).  It
252 >     * also allows some of the public methods to be factored into a
253 >     * smaller number of internal methods (although sadly not so for
254 >     * the five variants of put-related operations). The
255 >     * validation-based approach explained below leads to a lot of
256 >     * code sprawl because retry-control precludes factoring into
257 >     * smaller methods.
258 >     *
259 >     * The table is lazily initialized to a power-of-two size upon the
260 >     * first insertion.  Each bin in the table normally contains a
261 >     * list of Nodes (most often, the list has only zero or one Node).
262 >     * Table accesses require volatile/atomic reads, writes, and
263 >     * CASes.  Because there is no other way to arrange this without
264 >     * adding further indirections, we use intrinsics
265 >     * (sun.misc.Unsafe) operations.
266 >     *
267 >     * We use the top (sign) bit of Node hash fields for control
268 >     * purposes -- it is available anyway because of addressing
269 >     * constraints.  Nodes with negative hash fields are forwarding
270 >     * nodes to either TreeBins or resized tables.  The lower 31 bits
271 >     * of each normal Node's hash field contain a transformation of
272 >     * the key's hash code.
273 >     *
274 >     * Insertion (via put or its variants) of the first node in an
275 >     * empty bin is performed by just CASing it to the bin.  This is
276 >     * by far the most common case for put operations under most
277 >     * key/hash distributions.  Other update operations (insert,
278 >     * delete, and replace) require locks.  We do not want to waste
279 >     * the space required to associate a distinct lock object with
280 >     * each bin, so instead use the first node of a bin list itself as
281 >     * a lock. Locking support for these locks relies on builtin
282 >     * "synchronized" monitors.
283 >     *
284 >     * Using the first node of a list as a lock does not by itself
285 >     * suffice though: When a node is locked, any update must first
286 >     * validate that it is still the first node after locking it, and
287 >     * retry if not. Because new nodes are always appended to lists,
288 >     * once a node is first in a bin, it remains first until deleted
289 >     * or the bin becomes invalidated (upon resizing).
290 >     *
291 >     * The main disadvantage of per-bin locks is that other update
292 >     * operations on other nodes in a bin list protected by the same
293 >     * lock can stall, for example when user equals() or mapping
294 >     * functions take a long time.  However, statistically, under
295 >     * random hash codes, this is not a common problem.  Ideally, the
296 >     * frequency of nodes in bins follows a Poisson distribution
297 >     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
298 >     * parameter of about 0.5 on average, given the resizing threshold
299 >     * of 0.75, although with a large variance because of resizing
300 >     * granularity. Ignoring variance, the expected occurrences of
301 >     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
302 >     * first values are:
303 >     *
304 >     * 0:    0.60653066
305 >     * 1:    0.30326533
306 >     * 2:    0.07581633
307 >     * 3:    0.01263606
308 >     * 4:    0.00157952
309 >     * 5:    0.00015795
310 >     * 6:    0.00001316
311 >     * 7:    0.00000094
312 >     * 8:    0.00000006
313 >     * more: less than 1 in ten million
314 >     *
315 >     * Lock contention probability for two threads accessing distinct
316 >     * elements is roughly 1 / (8 * #elements) under random hashes.
317 >     *
318 >     * Actual hash code distributions encountered in practice
319 >     * sometimes deviate significantly from uniform randomness.  This
320 >     * includes the case when N > (1<<30), so some keys MUST collide.
321 >     * Similarly for dumb or hostile usages in which multiple keys are
322 >     * designed to have identical hash codes. Also, although we guard
323 >     * against the worst effects of this (see method spread), sets of
324 >     * hashes may differ only in bits that do not impact their bin
325 >     * index for a given power-of-two mask.  So we use a secondary
326 >     * strategy that applies when the number of nodes in a bin exceeds
327 >     * a threshold, and at least one of the keys implements
328 >     * Comparable.  These TreeBins use a balanced tree to hold nodes
329 >     * (a specialized form of red-black trees), bounding search time
330 >     * to O(log N).  Each search step in a TreeBin is at least twice as
331 >     * slow as in a regular list, but given that N cannot exceed
332 >     * (1<<64) (before running out of addresses) this bounds search
333 >     * steps, lock hold times, etc, to reasonable constants (roughly
334 >     * 100 nodes inspected per operation worst case) so long as keys
335 >     * are Comparable (which is very common -- String, Long, etc).
336 >     * TreeBin nodes (TreeNodes) also maintain the same "next"
337 >     * traversal pointers as regular nodes, so can be traversed in
338 >     * iterators in the same way.
339 >     *
340 >     * The table is resized when occupancy exceeds a percentage
341 >     * threshold (nominally, 0.75, but see below).  Any thread
342 >     * noticing an overfull bin may assist in resizing after the
343 >     * initiating thread allocates and sets up the replacement
344 >     * array. However, rather than stalling, these other threads may
345 >     * proceed with insertions etc.  The use of TreeBins shields us
346 >     * from the worst case effects of overfilling while resizes are in
347 >     * progress.  Resizing proceeds by transferring bins, one by one,
348 >     * from the table to the next table. To enable concurrency, the
349 >     * next table must be (incrementally) prefilled with place-holders
350 >     * serving as reverse forwarders to the old table.  Because we are
351 >     * using power-of-two expansion, the elements from each bin must
352 >     * either stay at same index, or move with a power of two
353 >     * offset. We eliminate unnecessary node creation by catching
354 >     * cases where old nodes can be reused because their next fields
355 >     * won't change.  On average, only about one-sixth of them need
356 >     * cloning when a table doubles. The nodes they replace will be
357 >     * garbage collectable as soon as they are no longer referenced by
358 >     * any reader thread that may be in the midst of concurrently
359 >     * traversing table.  Upon transfer, the old table bin contains
360 >     * only a special forwarding node (with hash field "MOVED") that
361 >     * contains the next table as its key. On encountering a
362 >     * forwarding node, access and update operations restart, using
363 >     * the new table.
364 >     *
365 >     * Each bin transfer requires its bin lock, which can stall
366 >     * waiting for locks while resizing. However, because other
367 >     * threads can join in and help resize rather than contend for
368 >     * locks, average aggregate waits become shorter as resizing
369 >     * progresses.  The transfer operation must also ensure that all
370 >     * accessible bins in both the old and new table are usable by any
371 >     * traversal.  This is arranged by proceeding from the last bin
372 >     * (table.length - 1) up towards the first.  Upon seeing a
373 >     * forwarding node, traversals (see class Traverser) arrange to
374 >     * move to the new table without revisiting nodes.  However, to
375 >     * ensure that no intervening nodes are skipped, bin splitting can
376 >     * only begin after the associated reverse-forwarders are in
377 >     * place.
378 >     *
379 >     * The traversal scheme also applies to partial traversals of
380 >     * ranges of bins (via an alternate Traverser constructor)
381 >     * to support partitioned aggregate operations.  Also, read-only
382 >     * operations give up if ever forwarded to a null table, which
383 >     * provides support for shutdown-style clearing, which is also not
384 >     * currently implemented.
385 >     *
386 >     * Lazy table initialization minimizes footprint until first use,
387 >     * and also avoids resizings when the first operation is from a
388 >     * putAll, constructor with map argument, or deserialization.
389 >     * These cases attempt to override the initial capacity settings,
390 >     * but harmlessly fail to take effect in cases of races.
391 >     *
392 >     * The element count is maintained using a specialization of
393 >     * LongAdder. We need to incorporate a specialization rather than
394 >     * just use a LongAdder in order to access implicit
395 >     * contention-sensing that leads to creation of multiple
396 >     * Cells.  The counter mechanics avoid contention on
397 >     * updates but can encounter cache thrashing if read too
398 >     * frequently during concurrent access. To avoid reading so often,
399 >     * resizing under contention is attempted only upon adding to a
400 >     * bin already holding two or more nodes. Under uniform hash
401 >     * distributions, the probability of this occurring at threshold
402 >     * is around 13%, meaning that only about 1 in 8 puts check
403 >     * threshold (and after resizing, many fewer do so). The bulk
404 >     * putAll operation further reduces contention by only committing
405 >     * count updates upon these size checks.
406 >     *
407 >     * Maintaining API and serialization compatibility with previous
408 >     * versions of this class introduces several oddities. Mainly: We
409 >     * leave untouched but unused constructor arguments refering to
410 >     * concurrencyLevel. We accept a loadFactor constructor argument,
411 >     * but apply it only to initial table capacity (which is the only
412 >     * time that we can guarantee to honor it.) We also declare an
413 >     * unused "Segment" class that is instantiated in minimal form
414 >     * only when serializing.
415       */
416  
417      /* ---------------- Constants -------------- */
418  
419      /**
420 <     * The default initial capacity for this table,
421 <     * used when not otherwise specified in a constructor.
420 >     * The largest possible table capacity.  This value must be
421 >     * exactly 1<<30 to stay within Java array allocation and indexing
422 >     * bounds for power of two table sizes, and is further required
423 >     * because the top two bits of 32bit hash fields are used for
424 >     * control purposes.
425       */
426 <    static final int DEFAULT_INITIAL_CAPACITY = 16;
426 >    private static final int MAXIMUM_CAPACITY = 1 << 30;
427  
428      /**
429 <     * The default load factor for this table, used when not
430 <     * otherwise specified in a constructor.
429 >     * The default initial table capacity.  Must be a power of 2
430 >     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
431       */
432 <    static final float DEFAULT_LOAD_FACTOR = 0.75f;
432 >    private static final int DEFAULT_CAPACITY = 16;
433  
434      /**
435 <     * The default concurrency level for this table, used when not
436 <     * otherwise specified in a constructor.
435 >     * The largest possible (non-power of two) array size.
436 >     * Needed by toArray and related methods.
437       */
438 <    static final int DEFAULT_CONCURRENCY_LEVEL = 16;
438 >    static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
439  
440      /**
441 <     * The maximum capacity, used if a higher value is implicitly
442 <     * specified by either of the constructors with arguments.  MUST
123 <     * be a power of two <= 1<<30 to ensure that entries are indexable
124 <     * using ints.
441 >     * The default concurrency level for this table. Unused but
442 >     * defined for compatibility with previous versions of this class.
443       */
444 <    static final int MAXIMUM_CAPACITY = 1 << 30;
444 >    private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
445  
446      /**
447 <     * The minimum capacity for per-segment tables.  Must be a power
448 <     * of two, at least two to avoid immediate resizing on next use
449 <     * after lazy construction.
447 >     * The load factor for this table. Overrides of this value in
448 >     * constructors affect only the initial table capacity.  The
449 >     * actual floating point value isn't normally used -- it is
450 >     * simpler to use expressions such as {@code n - (n >>> 2)} for
451 >     * the associated resizing threshold.
452       */
453 <    static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
453 >    private static final float LOAD_FACTOR = 0.75f;
454  
455      /**
456 <     * The maximum number of segments to allow; used to bound
457 <     * constructor arguments. Must be power of two less than 1 << 24.
456 >     * The bin count threshold for using a tree rather than list for a
457 >     * bin.  The value reflects the approximate break-even point for
458 >     * using tree-based operations.
459       */
460 <    static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
460 >    private static final int TREE_THRESHOLD = 8;
461  
462      /**
463 <     * Number of unsynchronized retries in size and containsValue
464 <     * methods before resorting to locking. This is used to avoid
465 <     * unbounded retries if tables undergo continuous modification
466 <     * which would make it impossible to obtain an accurate result.
463 >     * Minimum number of rebinnings per transfer step. Ranges are
464 >     * subdivided to allow multiple resizer threads.  This value
465 >     * serves as a lower bound to avoid resizers encountering
466 >     * excessive memory contention.  The value should be at least
467 >     * DEFAULT_CAPACITY.
468       */
469 <    static final int RETRIES_BEFORE_LOCK = 2;
469 >    private static final int MIN_TRANSFER_STRIDE = 16;
470 >
471 >    /*
472 >     * Encodings for Node hash fields. See above for explanation.
473 >     */
474 >    static final int MOVED     = 0x80000000; // hash field for forwarding nodes
475 >    static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
476 >
477 >    /** Number of CPUS, to place bounds on some sizings */
478 >    static final int NCPU = Runtime.getRuntime().availableProcessors();
479 >
480 >    /** For serialization compatibility. */
481 >    private static final ObjectStreamField[] serialPersistentFields = {
482 >        new ObjectStreamField("segments", Segment[].class),
483 >        new ObjectStreamField("segmentMask", Integer.TYPE),
484 >        new ObjectStreamField("segmentShift", Integer.TYPE)
485 >    };
486 >
487 >    /**
488 >     * A padded cell for distributing counts.  Adapted from LongAdder
489 >     * and Striped64.  See their internal docs for explanation.
490 >     */
491 >    @sun.misc.Contended static final class Cell {
492 >        volatile long value;
493 >        Cell(long x) { value = x; }
494 >    }
495  
496      /* ---------------- Fields -------------- */
497  
498      /**
499 <     * Mask value for indexing into segments. The upper bits of a
500 <     * key's hash code are used to choose the segment.
499 >     * The array of bins. Lazily initialized upon first insertion.
500 >     * Size is always a power of two. Accessed directly by iterators.
501 >     */
502 >    transient volatile Node<K,V>[] table;
503 >
504 >    /**
505 >     * The next table to use; non-null only while resizing.
506 >     */
507 >    private transient volatile Node<K,V>[] nextTable;
508 >
509 >    /**
510 >     * Base counter value, used mainly when there is no contention,
511 >     * but also as a fallback during table initialization
512 >     * races. Updated via CAS.
513 >     */
514 >    private transient volatile long baseCount;
515 >
516 >    /**
517 >     * Table initialization and resizing control.  When negative, the
518 >     * table is being initialized or resized: -1 for initialization,
519 >     * else -(1 + the number of active resizing threads).  Otherwise,
520 >     * when table is null, holds the initial table size to use upon
521 >     * creation, or 0 for default. After initialization, holds the
522 >     * next element count value upon which to resize the table.
523 >     */
524 >    private transient volatile int sizeCtl;
525 >
526 >    /**
527 >     * The next table index (plus one) to split while resizing.
528 >     */
529 >    private transient volatile int transferIndex;
530 >
531 >    /**
532 >     * The least available table index to split while resizing.
533       */
534 <    final int segmentMask;
534 >    private transient volatile int transferOrigin;
535  
536      /**
537 <     * Shift value for indexing within segments.
537 >     * Spinlock (locked via CAS) used when resizing and/or creating Cells.
538       */
539 <    final int segmentShift;
539 >    private transient volatile int cellsBusy;
540  
541      /**
542 <     * The segments, each of which is a specialized hash table.
542 >     * Table of counter cells. When non-null, size is a power of 2.
543 >     */
544 >    private transient volatile Cell[] counterCells;
545 >
546 >    // views
547 >    private transient KeySetView<K,V> keySet;
548 >    private transient ValuesView<K,V> values;
549 >    private transient EntrySetView<K,V> entrySet;
550 >
551 >    /* ---------------- Table element access -------------- */
552 >
553 >    /*
554 >     * Volatile access methods are used for table elements as well as
555 >     * elements of in-progress next table while resizing.  Uses are
556 >     * null checked by callers, and implicitly bounds-checked, relying
557 >     * on the invariants that tab arrays have non-zero size, and all
558 >     * indices are masked with (tab.length - 1) which is never
559 >     * negative and always less than length. Note that, to be correct
560 >     * wrt arbitrary concurrency errors by users, bounds checks must
561 >     * operate on local variables, which accounts for some odd-looking
562 >     * inline assignments below.
563       */
165    final Segment<K,V>[] segments;
564  
565 <    transient Set<K> keySet;
566 <    transient Set<Map.Entry<K,V>> entrySet;
567 <    transient Collection<V> values;
565 >    static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
566 >        return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
567 >    }
568 >
569 >    static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
570 >                                        Node<K,V> c, Node<K,V> v) {
571 >        return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
572 >    }
573 >
574 >    static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
575 >        U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
576 >    }
577 >
578 >    /* ---------------- Nodes -------------- */
579  
580      /**
581 <     * ConcurrentHashMap list entry. Note that this is never exported
582 <     * out as a user-visible Map.Entry.
581 >     * Key-value entry.  This class is never exported out as a
582 >     * user-mutable Map.Entry (i.e., one supporting setValue; see
583 >     * MapEntry below), but can be used for read-only traversals used
584 >     * in bulk tasks.  Nodes with a hash field of MOVED are special,
585 >     * and do not contain user keys or values (and are never
586 >     * exported).  Otherwise, keys and vals are never null.
587       */
588 <    static final class HashEntry<K,V> {
588 >    static class Node<K,V> implements Map.Entry<K,V> {
589          final int hash;
590 <        final K key;
591 <        volatile V value;
592 <        volatile HashEntry<K,V> next;
590 >        final Object key;
591 >        volatile V val;
592 >        Node<K,V> next;
593  
594 <        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
594 >        Node(int hash, Object key, V val, Node<K,V> next) {
595              this.hash = hash;
596              this.key = key;
597 <            this.value = value;
597 >            this.val = val;
598              this.next = next;
599          }
600  
601 <        /**
602 <         * Sets next field with volatile write semantics.  (See above
603 <         * about use of putOrderedObject.)
604 <         */
605 <        final void setNext(HashEntry<K,V> n) {
606 <            UNSAFE.putOrderedObject(this, nextOffset, n);
601 >        public final K getKey()       { return (K)key; }
602 >        public final V getValue()     { return val; }
603 >        public final int hashCode()   { return key.hashCode() ^ val.hashCode(); }
604 >        public final String toString(){ return key + "=" + val; }
605 >        public final V setValue(V value) {
606 >            throw new UnsupportedOperationException();
607          }
608  
609 <        // Unsafe mechanics
610 <        static final sun.misc.Unsafe UNSAFE;
611 <        static final long nextOffset;
612 <        static {
613 <            try {
614 <                UNSAFE = sun.misc.Unsafe.getUnsafe();
615 <                Class k = HashEntry.class;
203 <                nextOffset = UNSAFE.objectFieldOffset
204 <                    (k.getDeclaredField("next"));
205 <            } catch (Exception e) {
206 <                throw new Error(e);
207 <            }
609 >        public final boolean equals(Object o) {
610 >            Object k, v, u; Map.Entry<?,?> e;
611 >            return ((o instanceof Map.Entry) &&
612 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
613 >                    (v = e.getValue()) != null &&
614 >                    (k == key || k.equals(key)) &&
615 >                    (v == (u = val) || v.equals(u)));
616          }
617      }
618  
619      /**
620 <     * Gets the ith element of given table (if nonnull) with volatile
213 <     * read semantics. Note: This is manually integrated into a few
214 <     * performance-sensitive methods to reduce call overhead.
620 >     * Exported Entry for EntryIterator
621       */
622 <    @SuppressWarnings("unchecked")
623 <    static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
624 <        return (tab == null) ? null :
625 <            (HashEntry<K,V>) UNSAFE.getObjectVolatile
626 <            (tab, ((long)i << TSHIFT) + TBASE);
622 >    static final class MapEntry<K,V> implements Map.Entry<K,V> {
623 >        final K key; // non-null
624 >        V val;       // non-null
625 >        final ConcurrentHashMap<K,V> map;
626 >        MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
627 >            this.key = key;
628 >            this.val = val;
629 >            this.map = map;
630 >        }
631 >        public K getKey()        { return key; }
632 >        public V getValue()      { return val; }
633 >        public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
634 >        public String toString() { return key + "=" + val; }
635 >
636 >        public boolean equals(Object o) {
637 >            Object k, v; Map.Entry<?,?> e;
638 >            return ((o instanceof Map.Entry) &&
639 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
640 >                    (v = e.getValue()) != null &&
641 >                    (k == key || k.equals(key)) &&
642 >                    (v == val || v.equals(val)));
643 >        }
644 >
645 >        /**
646 >         * Sets our entry's value and writes through to the map. The
647 >         * value to return is somewhat arbitrary here. Since we do not
648 >         * necessarily track asynchronous changes, the most recent
649 >         * "previous" value could be different from what we return (or
650 >         * could even have been removed, in which case the put will
651 >         * re-establish). We do not and cannot guarantee more.
652 >         */
653 >        public V setValue(V value) {
654 >            if (value == null) throw new NullPointerException();
655 >            V v = val;
656 >            val = value;
657 >            map.put(key, value);
658 >            return v;
659 >        }
660      }
661  
662 +
663 +    /* ---------------- TreeBins -------------- */
664 +
665      /**
666 <     * Sets the ith element of given table, with volatile write
225 <     * semantics. (See above about use of putOrderedObject.)
666 >     * Nodes for use in TreeBins
667       */
668 <    static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
669 <                                       HashEntry<K,V> e) {
670 <        UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
668 >    static final class TreeNode<K,V> extends Node<K,V> {
669 >        TreeNode<K,V> parent;  // red-black tree links
670 >        TreeNode<K,V> left;
671 >        TreeNode<K,V> right;
672 >        TreeNode<K,V> prev;    // needed to unlink next upon deletion
673 >        boolean red;
674 >
675 >        TreeNode(int hash, Object key, V val, Node<K,V> next,
676 >                 TreeNode<K,V> parent) {
677 >            super(hash, key, val, next);
678 >            this.parent = parent;
679 >        }
680      }
681  
682      /**
683 <     * Applies a supplemental hash function to a given hashCode, which
684 <     * defends against poor quality hash functions.  This is critical
685 <     * because ConcurrentHashMap uses power-of-two length hash tables,
686 <     * that otherwise encounter collisions for hashCodes that do not
687 <     * differ in lower or upper bits.
688 <     */
689 <    private static int hash(int h) {
690 <        // Spread bits to regularize both segment and index locations,
691 <        // using variant of single-word Wang/Jenkins hash.
692 <        h += (h <<  15) ^ 0xffffcd7d;
693 <        h ^= (h >>> 10);
694 <        h += (h <<   3);
695 <        h ^= (h >>>  6);
696 <        h += (h <<   2) + (h << 14);
697 <        return h ^ (h >>> 16);
683 >     * Returns a Class for the given object of the form "class C
684 >     * implements Comparable<C>", if one exists, else null.  See below
685 >     * for explanation.
686 >     */
687 >    static Class<?> comparableClassFor(Object x) {
688 >        Class<?> c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p;
689 >        if ((c = x.getClass()) == String.class) // bypass checks
690 >            return c;
691 >        if ((cmpc = Comparable.class).isAssignableFrom(c)) {
692 >            while (cmpc.isAssignableFrom(s = c.getSuperclass()))
693 >                c = s; // find topmost comparable class
694 >            if ((ts = c.getGenericInterfaces()) != null) {
695 >                for (int i = 0; i < ts.length; ++i) {
696 >                    if (((t = ts[i]) instanceof ParameterizedType) &&
697 >                        ((p = (ParameterizedType)t).getRawType() == cmpc) &&
698 >                        (as = p.getActualTypeArguments()) != null &&
699 >                        as.length == 1 && as[0] == c) // type arg is c
700 >                        return c;
701 >                }
702 >            }
703 >        }
704 >        return null;
705      }
706  
707      /**
708 <     * Segments are specialized versions of hash tables.  This
709 <     * subclasses from ReentrantLock opportunistically, just to
710 <     * simplify some locking and avoid separate construction.
708 >     * A specialized form of red-black tree for use in bins
709 >     * whose size exceeds a threshold.
710 >     *
711 >     * TreeBins use a special form of comparison for search and
712 >     * related operations (which is the main reason we cannot use
713 >     * existing collections such as TreeMaps). TreeBins contain
714 >     * Comparable elements, but may contain others, as well as
715 >     * elements that are Comparable but not necessarily Comparable
716 >     * for the same T, so we cannot invoke compareTo among them. To
717 >     * handle this, the tree is ordered primarily by hash value, then
718 >     * by Comparable.compareTo order if applicable.  On lookup at a
719 >     * node, if elements are not comparable or compare as 0 then both
720 >     * left and right children may need to be searched in the case of
721 >     * tied hash values. (This corresponds to the full list search
722 >     * that would be necessary if all elements were non-Comparable and
723 >     * had tied hashes.)  The red-black balancing code is updated from
724 >     * pre-jdk-collections
725 >     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
726 >     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
727 >     * Algorithms" (CLR).
728 >     *
729 >     * TreeBins also maintain a separate locking discipline than
730 >     * regular bins. Because they are forwarded via special MOVED
731 >     * nodes at bin heads (which can never change once established),
732 >     * we cannot use those nodes as locks. Instead, TreeBin extends
733 >     * StampedLock to support a form of read-write lock. For update
734 >     * operations and table validation, the exclusive form of lock
735 >     * behaves in the same way as bin-head locks. However, lookups use
736 >     * shared read-lock mechanics to allow multiple readers in the
737 >     * absence of writers.  Additionally, these lookups do not ever
738 >     * block: While the lock is not available, they proceed along the
739 >     * slow traversal path (via next-pointers) until the lock becomes
740 >     * available or the list is exhausted, whichever comes
741 >     * first. These cases are not fast, but maximize aggregate
742 >     * expected throughput.
743       */
744 <    static final class Segment<K,V> extends ReentrantLock implements Serializable {
256 <        /*
257 <         * Segments maintain a table of entry lists that are always
258 <         * kept in a consistent state, so can be read (via volatile
259 <         * reads of segments and tables) without locking.  This
260 <         * requires replicating nodes when necessary during table
261 <         * resizing, so the old lists can be traversed by readers
262 <         * still using old version of table.
263 <         *
264 <         * This class defines only mutative methods requiring locking.
265 <         * Except as noted, the methods of this class perform the
266 <         * per-segment versions of ConcurrentHashMap methods.  (Other
267 <         * methods are integrated directly into ConcurrentHashMap
268 <         * methods.) These mutative methods use a form of controlled
269 <         * spinning on contention via methods scanAndLock and
270 <         * scanAndLockForPut. These intersperse tryLocks with
271 <         * traversals to locate nodes.  The main benefit is to absorb
272 <         * cache misses (which are very common for hash tables) while
273 <         * obtaining locks so that traversal is faster once
274 <         * acquired. We do not actually use the found nodes since they
275 <         * must be re-acquired under lock anyway to ensure sequential
276 <         * consistency of updates (and in any case may be undetectably
277 <         * stale), but they will normally be much faster to re-locate.
278 <         * Also, scanAndLockForPut speculatively creates a fresh node
279 <         * to use in put if no node is found.
280 <         */
281 <
744 >    static final class TreeBin<K,V> extends StampedLock {
745          private static final long serialVersionUID = 2249069246763182397L;
746 +        transient TreeNode<K,V> root;  // root of tree
747 +        transient TreeNode<K,V> first; // head of next-pointer list
748  
749 <        /**
750 <         * The maximum number of times to tryLock in a prescan before
751 <         * possibly blocking on acquire in preparation for a locked
752 <         * segment operation. On multiprocessors, using a bounded
753 <         * number of retries maintains cache acquired while locating
754 <         * nodes.
755 <         */
756 <        static final int MAX_SCAN_RETRIES =
757 <            Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
749 >        /** From CLR */
750 >        private void rotateLeft(TreeNode<K,V> p) {
751 >            if (p != null) {
752 >                TreeNode<K,V> r = p.right, pp, rl;
753 >                if ((rl = p.right = r.left) != null)
754 >                    rl.parent = p;
755 >                if ((pp = r.parent = p.parent) == null)
756 >                    root = r;
757 >                else if (pp.left == p)
758 >                    pp.left = r;
759 >                else
760 >                    pp.right = r;
761 >                r.left = p;
762 >                p.parent = r;
763 >            }
764 >        }
765  
766 <        /**
767 <         * The per-segment table. Elements are accessed via
768 <         * entryAt/setEntryAt providing volatile semantics.
769 <         */
770 <        transient volatile HashEntry<K,V>[] table;
766 >        /** From CLR */
767 >        private void rotateRight(TreeNode<K,V> p) {
768 >            if (p != null) {
769 >                TreeNode<K,V> l = p.left, pp, lr;
770 >                if ((lr = p.left = l.right) != null)
771 >                    lr.parent = p;
772 >                if ((pp = l.parent = p.parent) == null)
773 >                    root = l;
774 >                else if (pp.right == p)
775 >                    pp.right = l;
776 >                else
777 >                    pp.left = l;
778 >                l.right = p;
779 >                p.parent = l;
780 >            }
781 >        }
782  
783          /**
784 <         * The number of elements. Accessed only either within locks
785 <         * or among other volatile reads that maintain visibility.
784 >         * Returns the TreeNode (or null if not found) for the given key
785 >         * starting at given root.
786           */
787 <        transient int count;
787 >        final TreeNode<K,V> getTreeNode(int h, Object k, TreeNode<K,V> p,
788 >                                        Class<?> cc) {
789 >            while (p != null) {
790 >                int dir, ph; Object pk;
791 >                if ((ph = p.hash) != h)
792 >                    dir = (h < ph) ? -1 : 1;
793 >                else if ((pk = p.key) == k || k.equals(pk))
794 >                    return p;
795 >                else if (cc == null || comparableClassFor(pk) != cc ||
796 >                         (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
797 >                    TreeNode<K,V> r, pr; // check both sides
798 >                    if ((pr = p.right) != null && h >= pr.hash &&
799 >                        (r = getTreeNode(h, k, pr, cc)) != null)
800 >                        return r;
801 >                    else // continue left
802 >                        dir = -1;
803 >                }
804 >                p = (dir > 0) ? p.right : p.left;
805 >            }
806 >            return null;
807 >        }
808  
809          /**
810 <         * The total number of mutative operations in this segment.
811 <         * Even though this may overflows 32 bits, it provides
812 <         * sufficient accuracy for stability checks in CHM isEmpty()
310 <         * and size() methods.  Accessed only either within locks or
311 <         * among other volatile reads that maintain visibility.
810 >         * Wrapper for getTreeNode used by CHM.get. Tries to obtain
811 >         * read-lock to call getTreeNode, but during failure to get
812 >         * lock, searches along next links.
813           */
814 <        transient int modCount;
814 >        final V getValue(int h, Object k) {
815 >            Class<?> cc = comparableClassFor(k);
816 >            Node<K,V> r = null;
817 >            for (Node<K,V> e = first; e != null; e = e.next) {
818 >                long s;
819 >                if ((s = tryReadLock()) != 0L) {
820 >                    try {
821 >                        r = getTreeNode(h, k, root, cc);
822 >                    } finally {
823 >                        unlockRead(s);
824 >                    }
825 >                    break;
826 >                }
827 >                else if (e.hash == h && k.equals(e.key)) {
828 >                    r = e;
829 >                    break;
830 >                }
831 >            }
832 >            return r == null ? null : r.val;
833 >        }
834  
835          /**
836 <         * The table is rehashed when its size exceeds this threshold.
837 <         * (The value of this field is always <tt>(int)(capacity *
318 <         * loadFactor)</tt>.)
836 >         * Finds or adds a node.
837 >         * @return null if added
838           */
839 <        transient int threshold;
839 >        final TreeNode<K,V> putTreeNode(int h, Object k, V v) {
840 >            Class<?> cc = comparableClassFor(k);
841 >            TreeNode<K,V> pp = root, p = null;
842 >            int dir = 0;
843 >            while (pp != null) { // find existing node or leaf to insert at
844 >                int ph; Object pk;
845 >                p = pp;
846 >                if ((ph = p.hash) != h)
847 >                    dir = (h < ph) ? -1 : 1;
848 >                else if ((pk = p.key) == k || k.equals(pk))
849 >                    return p;
850 >                else if (cc == null || comparableClassFor(pk) != cc ||
851 >                         (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
852 >                    TreeNode<K,V> r, pr;
853 >                    if ((pr = p.right) != null && h >= pr.hash &&
854 >                        (r = getTreeNode(h, k, pr, cc)) != null)
855 >                        return r;
856 >                    else // continue left
857 >                        dir = -1;
858 >                }
859 >                pp = (dir > 0) ? p.right : p.left;
860 >            }
861 >
862 >            TreeNode<K,V> f = first;
863 >            TreeNode<K,V> x = first = new TreeNode<K,V>(h, k, v, f, p);
864 >            if (p == null)
865 >                root = x;
866 >            else { // attach and rebalance; adapted from CLR
867 >                TreeNode<K,V> xp, xpp;
868 >                if (f != null)
869 >                    f.prev = x;
870 >                if (dir <= 0)
871 >                    p.left = x;
872 >                else
873 >                    p.right = x;
874 >                x.red = true;
875 >                while (x != null && (xp = x.parent) != null && xp.red &&
876 >                       (xpp = xp.parent) != null) {
877 >                    TreeNode<K,V> xppl = xpp.left;
878 >                    if (xp == xppl) {
879 >                        TreeNode<K,V> y = xpp.right;
880 >                        if (y != null && y.red) {
881 >                            y.red = false;
882 >                            xp.red = false;
883 >                            xpp.red = true;
884 >                            x = xpp;
885 >                        }
886 >                        else {
887 >                            if (x == xp.right) {
888 >                                rotateLeft(x = xp);
889 >                                xpp = (xp = x.parent) == null ? null : xp.parent;
890 >                            }
891 >                            if (xp != null) {
892 >                                xp.red = false;
893 >                                if (xpp != null) {
894 >                                    xpp.red = true;
895 >                                    rotateRight(xpp);
896 >                                }
897 >                            }
898 >                        }
899 >                    }
900 >                    else {
901 >                        TreeNode<K,V> y = xppl;
902 >                        if (y != null && y.red) {
903 >                            y.red = false;
904 >                            xp.red = false;
905 >                            xpp.red = true;
906 >                            x = xpp;
907 >                        }
908 >                        else {
909 >                            if (x == xp.left) {
910 >                                rotateRight(x = xp);
911 >                                xpp = (xp = x.parent) == null ? null : xp.parent;
912 >                            }
913 >                            if (xp != null) {
914 >                                xp.red = false;
915 >                                if (xpp != null) {
916 >                                    xpp.red = true;
917 >                                    rotateLeft(xpp);
918 >                                }
919 >                            }
920 >                        }
921 >                    }
922 >                }
923 >                TreeNode<K,V> r = root;
924 >                if (r != null && r.red)
925 >                    r.red = false;
926 >            }
927 >            return null;
928 >        }
929  
930          /**
931 <         * The load factor for the hash table.  Even though this value
932 <         * is same for all segments, it is replicated to avoid needing
933 <         * links to outer object.
934 <         * @serial
931 >         * Removes the given node, that must be present before this
932 >         * call.  This is messier than typical red-black deletion code
933 >         * because we cannot swap the contents of an interior node
934 >         * with a leaf successor that is pinned by "next" pointers
935 >         * that are accessible independently of lock. So instead we
936 >         * swap the tree linkages.
937           */
938 <        final float loadFactor;
938 >        final void deleteTreeNode(TreeNode<K,V> p) {
939 >            TreeNode<K,V> next = (TreeNode<K,V>)p.next;
940 >            TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
941 >            if (pred == null)
942 >                first = next;
943 >            else
944 >                pred.next = next;
945 >            if (next != null)
946 >                next.prev = pred;
947 >            TreeNode<K,V> replacement;
948 >            TreeNode<K,V> pl = p.left;
949 >            TreeNode<K,V> pr = p.right;
950 >            if (pl != null && pr != null) {
951 >                TreeNode<K,V> s = pr, sl;
952 >                while ((sl = s.left) != null) // find successor
953 >                    s = sl;
954 >                boolean c = s.red; s.red = p.red; p.red = c; // swap colors
955 >                TreeNode<K,V> sr = s.right;
956 >                TreeNode<K,V> pp = p.parent;
957 >                if (s == pr) { // p was s's direct parent
958 >                    p.parent = s;
959 >                    s.right = p;
960 >                }
961 >                else {
962 >                    TreeNode<K,V> sp = s.parent;
963 >                    if ((p.parent = sp) != null) {
964 >                        if (s == sp.left)
965 >                            sp.left = p;
966 >                        else
967 >                            sp.right = p;
968 >                    }
969 >                    if ((s.right = pr) != null)
970 >                        pr.parent = s;
971 >                }
972 >                p.left = null;
973 >                if ((p.right = sr) != null)
974 >                    sr.parent = p;
975 >                if ((s.left = pl) != null)
976 >                    pl.parent = s;
977 >                if ((s.parent = pp) == null)
978 >                    root = s;
979 >                else if (p == pp.left)
980 >                    pp.left = s;
981 >                else
982 >                    pp.right = s;
983 >                replacement = sr;
984 >            }
985 >            else
986 >                replacement = (pl != null) ? pl : pr;
987 >            TreeNode<K,V> pp = p.parent;
988 >            if (replacement == null) {
989 >                if (pp == null) {
990 >                    root = null;
991 >                    return;
992 >                }
993 >                replacement = p;
994 >            }
995 >            else {
996 >                replacement.parent = pp;
997 >                if (pp == null)
998 >                    root = replacement;
999 >                else if (p == pp.left)
1000 >                    pp.left = replacement;
1001 >                else
1002 >                    pp.right = replacement;
1003 >                p.left = p.right = p.parent = null;
1004 >            }
1005 >            if (!p.red) { // rebalance, from CLR
1006 >                TreeNode<K,V> x = replacement;
1007 >                while (x != null) {
1008 >                    TreeNode<K,V> xp, xpl;
1009 >                    if (x.red || (xp = x.parent) == null) {
1010 >                        x.red = false;
1011 >                        break;
1012 >                    }
1013 >                    if (x == (xpl = xp.left)) {
1014 >                        TreeNode<K,V> sib = xp.right;
1015 >                        if (sib != null && sib.red) {
1016 >                            sib.red = false;
1017 >                            xp.red = true;
1018 >                            rotateLeft(xp);
1019 >                            sib = (xp = x.parent) == null ? null : xp.right;
1020 >                        }
1021 >                        if (sib == null)
1022 >                            x = xp;
1023 >                        else {
1024 >                            TreeNode<K,V> sl = sib.left, sr = sib.right;
1025 >                            if ((sr == null || !sr.red) &&
1026 >                                (sl == null || !sl.red)) {
1027 >                                sib.red = true;
1028 >                                x = xp;
1029 >                            }
1030 >                            else {
1031 >                                if (sr == null || !sr.red) {
1032 >                                    if (sl != null)
1033 >                                        sl.red = false;
1034 >                                    sib.red = true;
1035 >                                    rotateRight(sib);
1036 >                                    sib = (xp = x.parent) == null ?
1037 >                                        null : xp.right;
1038 >                                }
1039 >                                if (sib != null) {
1040 >                                    sib.red = (xp == null) ? false : xp.red;
1041 >                                    if ((sr = sib.right) != null)
1042 >                                        sr.red = false;
1043 >                                }
1044 >                                if (xp != null) {
1045 >                                    xp.red = false;
1046 >                                    rotateLeft(xp);
1047 >                                }
1048 >                                x = root;
1049 >                            }
1050 >                        }
1051 >                    }
1052 >                    else { // symmetric
1053 >                        TreeNode<K,V> sib = xpl;
1054 >                        if (sib != null && sib.red) {
1055 >                            sib.red = false;
1056 >                            xp.red = true;
1057 >                            rotateRight(xp);
1058 >                            sib = (xp = x.parent) == null ? null : xp.left;
1059 >                        }
1060 >                        if (sib == null)
1061 >                            x = xp;
1062 >                        else {
1063 >                            TreeNode<K,V> sl = sib.left, sr = sib.right;
1064 >                            if ((sl == null || !sl.red) &&
1065 >                                (sr == null || !sr.red)) {
1066 >                                sib.red = true;
1067 >                                x = xp;
1068 >                            }
1069 >                            else {
1070 >                                if (sl == null || !sl.red) {
1071 >                                    if (sr != null)
1072 >                                        sr.red = false;
1073 >                                    sib.red = true;
1074 >                                    rotateLeft(sib);
1075 >                                    sib = (xp = x.parent) == null ?
1076 >                                        null : xp.left;
1077 >                                }
1078 >                                if (sib != null) {
1079 >                                    sib.red = (xp == null) ? false : xp.red;
1080 >                                    if ((sl = sib.left) != null)
1081 >                                        sl.red = false;
1082 >                                }
1083 >                                if (xp != null) {
1084 >                                    xp.red = false;
1085 >                                    rotateRight(xp);
1086 >                                }
1087 >                                x = root;
1088 >                            }
1089 >                        }
1090 >                    }
1091 >                }
1092 >            }
1093 >            if (p == replacement && (pp = p.parent) != null) {
1094 >                if (p == pp.left) // detach pointers
1095 >                    pp.left = null;
1096 >                else if (p == pp.right)
1097 >                    pp.right = null;
1098 >                p.parent = null;
1099 >            }
1100 >        }
1101 >    }
1102  
1103 <        Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
1104 <            this.loadFactor = lf;
1105 <            this.threshold = threshold;
1106 <            this.table = tab;
1103 >    /* ---------------- Collision reduction methods -------------- */
1104 >
1105 >    /**
1106 >     * Spreads higher bits to lower, and also forces top bit to 0.
1107 >     * Because the table uses power-of-two masking, sets of hashes
1108 >     * that vary only in bits above the current mask will always
1109 >     * collide. (Among known examples are sets of Float keys holding
1110 >     * consecutive whole numbers in small tables.)  To counter this,
1111 >     * we apply a transform that spreads the impact of higher bits
1112 >     * downward. There is a tradeoff between speed, utility, and
1113 >     * quality of bit-spreading. Because many common sets of hashes
1114 >     * are already reasonably distributed across bits (so don't benefit
1115 >     * from spreading), and because we use trees to handle large sets
1116 >     * of collisions in bins, we don't need excessively high quality.
1117 >     */
1118 >    private static final int spread(int h) {
1119 >        h ^= (h >>> 18) ^ (h >>> 12);
1120 >        return (h ^ (h >>> 10)) & HASH_BITS;
1121 >    }
1122 >
1123 >    /**
1124 >     * Replaces a list bin with a tree bin if key is comparable.  Call
1125 >     * only when locked.
1126 >     */
1127 >    private final void replaceWithTreeBin(Node<K,V>[] tab, int index, Object key) {
1128 >        if (tab != null && comparableClassFor(key) != null) {
1129 >            TreeBin<K,V> t = new TreeBin<K,V>();
1130 >            for (Node<K,V> e = tabAt(tab, index); e != null; e = e.next)
1131 >                t.putTreeNode(e.hash, e.key, e.val);
1132 >            setTabAt(tab, index, new Node<K,V>(MOVED, t, null, null));
1133          }
1134 +    }
1135  
1136 <        final V put(K key, int hash, V value, boolean onlyIfAbsent) {
1137 <            HashEntry<K,V> node = tryLock() ? null :
1138 <                scanAndLockForPut(key, hash, value);
1139 <            V oldValue;
1140 <            try {
1141 <                HashEntry<K,V>[] tab = table;
1142 <                int index = (tab.length - 1) & hash;
1143 <                HashEntry<K,V> first = entryAt(tab, index);
1144 <                for (HashEntry<K,V> e = first;;) {
1145 <                    if (e != null) {
1146 <                        K k;
1147 <                        if ((k = e.key) == key ||
1148 <                            (e.hash == hash && key.equals(k))) {
1149 <                            oldValue = e.value;
1150 <                            if (!onlyIfAbsent) {
1151 <                                e.value = value;
1152 <                                ++modCount;
1136 >    /* ---------------- Internal access and update methods -------------- */
1137 >
1138 >    /** Implementation for get and containsKey */
1139 >    private final V internalGet(Object k) {
1140 >        int h = spread(k.hashCode());
1141 >        V v = null;
1142 >        Node<K,V>[] tab; Node<K,V> e;
1143 >        if ((tab = table) != null &&
1144 >            (e = tabAt(tab, (tab.length - 1) & h)) != null) {
1145 >            for (;;) {
1146 >                int eh; Object ek;
1147 >                if ((eh = e.hash) < 0) {
1148 >                    if ((ek = e.key) instanceof TreeBin) { // search TreeBin
1149 >                        v = ((TreeBin<K,V>)ek).getValue(h, k);
1150 >                        break;
1151 >                    }
1152 >                    else if (!(ek instanceof Node[]) ||    // try new table
1153 >                             (e = tabAt(tab = (Node<K,V>[])ek,
1154 >                                        (tab.length - 1) & h)) == null)
1155 >                        break;
1156 >                }
1157 >                else if (eh == h && ((ek = e.key) == k || k.equals(ek))) {
1158 >                    v = e.val;
1159 >                    break;
1160 >                }
1161 >                else if ((e = e.next) == null)
1162 >                    break;
1163 >            }
1164 >        }
1165 >        return v;
1166 >    }
1167 >
1168 >    /**
1169 >     * Implementation for the four public remove/replace methods:
1170 >     * Replaces node value with v, conditional upon match of cv if
1171 >     * non-null.  If resulting value is null, delete.
1172 >     */
1173 >    private final V internalReplace(Object k, V v, Object cv) {
1174 >        int h = spread(k.hashCode());
1175 >        V oldVal = null;
1176 >        for (Node<K,V>[] tab = table;;) {
1177 >            Node<K,V> f; int i, fh; Object fk;
1178 >            if (tab == null ||
1179 >                (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1180 >                break;
1181 >            else if ((fh = f.hash) < 0) {
1182 >                if ((fk = f.key) instanceof TreeBin) {
1183 >                    TreeBin<K,V> t = (TreeBin<K,V>)fk;
1184 >                    long stamp = t.writeLock();
1185 >                    boolean validated = false;
1186 >                    boolean deleted = false;
1187 >                    try {
1188 >                        if (tabAt(tab, i) == f) {
1189 >                            validated = true;
1190 >                            Class<?> cc = comparableClassFor(k);
1191 >                            TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
1192 >                            if (p != null) {
1193 >                                V pv = p.val;
1194 >                                if (cv == null || cv == pv || cv.equals(pv)) {
1195 >                                    oldVal = pv;
1196 >                                    if (v != null)
1197 >                                        p.val = v;
1198 >                                    else {
1199 >                                        deleted = true;
1200 >                                        t.deleteTreeNode(p);
1201 >                                    }
1202 >                                }
1203                              }
354                            break;
1204                          }
1205 <                        e = e.next;
1205 >                    } finally {
1206 >                        t.unlockWrite(stamp);
1207                      }
1208 <                    else {
1209 <                        if (node != null)
1210 <                            node.setNext(first);
361 <                        else
362 <                            node = new HashEntry<K,V>(hash, key, value, first);
363 <                        int c = count + 1;
364 <                        if (c > threshold && tab.length < MAXIMUM_CAPACITY)
365 <                            rehash(node);
366 <                        else
367 <                            setEntryAt(tab, index, node);
368 <                        ++modCount;
369 <                        count = c;
370 <                        oldValue = null;
1208 >                    if (validated) {
1209 >                        if (deleted)
1210 >                            addCount(-1L, -1);
1211                          break;
1212                      }
1213                  }
1214 <            } finally {
1215 <                unlock();
1214 >                else
1215 >                    tab = (Node<K,V>[])fk;
1216 >            }
1217 >            else {
1218 >                boolean validated = false;
1219 >                boolean deleted = false;
1220 >                synchronized (f) {
1221 >                    if (tabAt(tab, i) == f) {
1222 >                        validated = true;
1223 >                        for (Node<K,V> e = f, pred = null;;) {
1224 >                            Object ek;
1225 >                            if (e.hash == h &&
1226 >                                ((ek = e.key) == k || k.equals(ek))) {
1227 >                                V ev = e.val;
1228 >                                if (cv == null || cv == ev || cv.equals(ev)) {
1229 >                                    oldVal = ev;
1230 >                                    if (v != null)
1231 >                                        e.val = v;
1232 >                                    else {
1233 >                                        deleted = true;
1234 >                                        Node<K,V> en = e.next;
1235 >                                        if (pred != null)
1236 >                                            pred.next = en;
1237 >                                        else
1238 >                                            setTabAt(tab, i, en);
1239 >                                    }
1240 >                                }
1241 >                                break;
1242 >                            }
1243 >                            pred = e;
1244 >                            if ((e = e.next) == null)
1245 >                                break;
1246 >                        }
1247 >                    }
1248 >                }
1249 >                if (validated) {
1250 >                    if (deleted)
1251 >                        addCount(-1L, -1);
1252 >                    break;
1253 >                }
1254              }
377            return oldValue;
1255          }
1256 +        return oldVal;
1257 +    }
1258  
1259 <        /**
1260 <         * Doubles size of table and repacks entries, also adding the
1261 <         * given node to new table
1262 <         */
1263 <        @SuppressWarnings("unchecked")
1264 <        private void rehash(HashEntry<K,V> node) {
1265 <            /*
1266 <             * Reclassify nodes in each list to new table.  Because we
1267 <             * are using power-of-two expansion, the elements from
1268 <             * each bin must either stay at same index, or move with a
1269 <             * power of two offset. We eliminate unnecessary node
1270 <             * creation by catching cases where old nodes can be
1271 <             * reused because their next fields won't change.
1272 <             * Statistically, at the default threshold, only about
1273 <             * one-sixth of them need cloning when a table
1274 <             * doubles. The nodes they replace will be garbage
1275 <             * collectable as soon as they are no longer referenced by
1276 <             * any reader thread that may be in the midst of
1277 <             * concurrently traversing table. Entry accesses use plain
1278 <             * array indexing because they are followed by volatile
1279 <             * table write.
1280 <             */
1281 <            HashEntry<K,V>[] oldTable = table;
1282 <            int oldCapacity = oldTable.length;
1283 <            int newCapacity = oldCapacity << 1;
1284 <            threshold = (int)(newCapacity * loadFactor);
1285 <            HashEntry<K,V>[] newTable =
1286 <                (HashEntry<K,V>[]) new HashEntry[newCapacity];
1287 <            int sizeMask = newCapacity - 1;
1288 <            for (int i = 0; i < oldCapacity ; i++) {
1289 <                HashEntry<K,V> e = oldTable[i];
1290 <                if (e != null) {
1291 <                    HashEntry<K,V> next = e.next;
1292 <                    int idx = e.hash & sizeMask;
1293 <                    if (next == null)   //  Single node on list
1294 <                        newTable[idx] = e;
1295 <                    else { // Reuse consecutive sequence at same slot
1296 <                        HashEntry<K,V> lastRun = e;
1297 <                        int lastIdx = idx;
1298 <                        for (HashEntry<K,V> last = next;
1299 <                             last != null;
1300 <                             last = last.next) {
1301 <                            int k = last.hash & sizeMask;
1302 <                            if (k != lastIdx) {
1303 <                                lastIdx = k;
425 <                                lastRun = last;
426 <                            }
427 <                        }
428 <                        newTable[lastIdx] = lastRun;
429 <                        // Clone remaining nodes
430 <                        for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
431 <                            V v = p.value;
432 <                            int h = p.hash;
433 <                            int k = h & sizeMask;
434 <                            HashEntry<K,V> n = newTable[k];
435 <                            newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
1259 >    /*
1260 >     * Internal versions of insertion methods
1261 >     * All have the same basic structure as the first (internalPut):
1262 >     *  1. If table uninitialized, create
1263 >     *  2. If bin empty, try to CAS new node
1264 >     *  3. If bin stale, use new table
1265 >     *  4. if bin converted to TreeBin, validate and relay to TreeBin methods
1266 >     *  5. Lock and validate; if valid, scan and add or update
1267 >     *
1268 >     * The putAll method differs mainly in attempting to pre-allocate
1269 >     * enough table space, and also more lazily performs count updates
1270 >     * and checks.
1271 >     *
1272 >     * Most of the function-accepting methods can't be factored nicely
1273 >     * because they require different functional forms, so instead
1274 >     * sprawl out similar mechanics.
1275 >     */
1276 >
1277 >    /** Implementation for put and putIfAbsent */
1278 >    private final V internalPut(K k, V v, boolean onlyIfAbsent) {
1279 >        if (k == null || v == null) throw new NullPointerException();
1280 >        int h = spread(k.hashCode());
1281 >        int len = 0;
1282 >        for (Node<K,V>[] tab = table;;) {
1283 >            int i, fh; Node<K,V> f; Object fk;
1284 >            if (tab == null)
1285 >                tab = initTable();
1286 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1287 >                if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null)))
1288 >                    break;                   // no lock when adding to empty bin
1289 >            }
1290 >            else if ((fh = f.hash) < 0) {
1291 >                if ((fk = f.key) instanceof TreeBin) {
1292 >                    TreeBin<K,V> t = (TreeBin<K,V>)fk;
1293 >                    long stamp = t.writeLock();
1294 >                    V oldVal = null;
1295 >                    try {
1296 >                        if (tabAt(tab, i) == f) {
1297 >                            len = 2;
1298 >                            TreeNode<K,V> p = t.putTreeNode(h, k, v);
1299 >                            if (p != null) {
1300 >                                oldVal = p.val;
1301 >                                if (!onlyIfAbsent)
1302 >                                    p.val = v;
1303 >                            }
1304                          }
1305 +                    } finally {
1306 +                        t.unlockWrite(stamp);
1307 +                    }
1308 +                    if (len != 0) {
1309 +                        if (oldVal != null)
1310 +                            return oldVal;
1311 +                        break;
1312                      }
1313                  }
1314 +                else
1315 +                    tab = (Node<K,V>[])fk;
1316 +            }
1317 +            else {
1318 +                V oldVal = null;
1319 +                synchronized (f) {
1320 +                    if (tabAt(tab, i) == f) {
1321 +                        len = 1;
1322 +                        for (Node<K,V> e = f;; ++len) {
1323 +                            Object ek;
1324 +                            if (e.hash == h &&
1325 +                                ((ek = e.key) == k || k.equals(ek))) {
1326 +                                oldVal = e.val;
1327 +                                if (!onlyIfAbsent)
1328 +                                    e.val = v;
1329 +                                break;
1330 +                            }
1331 +                            Node<K,V> last = e;
1332 +                            if ((e = e.next) == null) {
1333 +                                last.next = new Node<K,V>(h, k, v, null);
1334 +                                if (len > TREE_THRESHOLD)
1335 +                                    replaceWithTreeBin(tab, i, k);
1336 +                                break;
1337 +                            }
1338 +                        }
1339 +                    }
1340 +                }
1341 +                if (len != 0) {
1342 +                    if (oldVal != null)
1343 +                        return oldVal;
1344 +                    break;
1345 +                }
1346              }
440            int nodeIndex = node.hash & sizeMask; // add the new node
441            node.setNext(newTable[nodeIndex]);
442            newTable[nodeIndex] = node;
443            table = newTable;
1347          }
1348 +        addCount(1L, len);
1349 +        return null;
1350 +    }
1351  
1352 <        /**
1353 <         * Scans for a node containing given key while trying to
1354 <         * acquire lock, creating and returning one if not found. Upon
1355 <         * return, guarantees that lock is held. Unlike in most
1356 <         * methods, calls to method equals are not screened: Since
1357 <         * traversal speed doesn't matter, we might as well help warm
1358 <         * up the associated code and accesses as well.
1359 <         *
1360 <         * @return a new node if key not found, else null
1361 <         */
1362 <        private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
1363 <            HashEntry<K,V> first = entryForHash(this, hash);
1364 <            HashEntry<K,V> e = first;
1365 <            HashEntry<K,V> node = null;
1366 <            int retries = -1; // negative while locating node
1367 <            while (!tryLock()) {
1368 <                HashEntry<K,V> f; // to recheck first below
1369 <                if (retries < 0) {
1370 <                    if (e == null) {
1371 <                        if (node == null) // speculatively create node
1372 <                            node = new HashEntry<K,V>(hash, key, value, null);
1373 <                        retries = 0;
1352 >    /** Implementation for computeIfAbsent */
1353 >    private final V internalComputeIfAbsent(K k, Function<? super K, ? extends V> mf) {
1354 >        if (k == null || mf == null)
1355 >            throw new NullPointerException();
1356 >        int h = spread(k.hashCode());
1357 >        V val = null;
1358 >        int len = 0;
1359 >        for (Node<K,V>[] tab = table;;) {
1360 >            Node<K,V> f; int i; Object fk;
1361 >            if (tab == null)
1362 >                tab = initTable();
1363 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1364 >                Node<K,V> node = new Node<K,V>(h, k, null, null);
1365 >                synchronized (node) {
1366 >                    if (casTabAt(tab, i, null, node)) {
1367 >                        len = 1;
1368 >                        try {
1369 >                            if ((val = mf.apply(k)) != null)
1370 >                                node.val = val;
1371 >                        } finally {
1372 >                            if (val == null)
1373 >                                setTabAt(tab, i, null);
1374 >                        }
1375                      }
469                    else if (key.equals(e.key))
470                        retries = 0;
471                    else
472                        e = e.next;
1376                  }
1377 <                else if (++retries > MAX_SCAN_RETRIES) {
475 <                    lock();
1377 >                if (len != 0)
1378                      break;
1379 +            }
1380 +            else if (f.hash < 0) {
1381 +                if ((fk = f.key) instanceof TreeBin) {
1382 +                    TreeBin<K,V> t = (TreeBin<K,V>)fk;
1383 +                    long stamp = t.writeLock();
1384 +                    boolean added = false;
1385 +                    try {
1386 +                        if (tabAt(tab, i) == f) {
1387 +                            len = 2;
1388 +                            Class<?> cc = comparableClassFor(k);
1389 +                            TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
1390 +                            if (p != null)
1391 +                                val = p.val;
1392 +                            else if ((val = mf.apply(k)) != null) {
1393 +                                added = true;
1394 +                                t.putTreeNode(h, k, val);
1395 +                            }
1396 +                        }
1397 +                    } finally {
1398 +                        t.unlockWrite(stamp);
1399 +                    }
1400 +                    if (len != 0) {
1401 +                        if (!added)
1402 +                            return val;
1403 +                        break;
1404 +                    }
1405 +                }
1406 +                else
1407 +                    tab = (Node<K,V>[])fk;
1408 +            }
1409 +            else {
1410 +                boolean added = false;
1411 +                synchronized (f) {
1412 +                    if (tabAt(tab, i) == f) {
1413 +                        len = 1;
1414 +                        for (Node<K,V> e = f;; ++len) {
1415 +                            Object ek; V ev;
1416 +                            if (e.hash == h &&
1417 +                                ((ek = e.key) == k || k.equals(ek))) {
1418 +                                val = e.val;
1419 +                                break;
1420 +                            }
1421 +                            Node<K,V> last = e;
1422 +                            if ((e = e.next) == null) {
1423 +                                if ((val = mf.apply(k)) != null) {
1424 +                                    added = true;
1425 +                                    last.next = new Node<K,V>(h, k, val, null);
1426 +                                    if (len > TREE_THRESHOLD)
1427 +                                        replaceWithTreeBin(tab, i, k);
1428 +                                }
1429 +                                break;
1430 +                            }
1431 +                        }
1432 +                    }
1433                  }
1434 <                else if ((retries & 1) == 0 &&
1435 <                         (f = entryForHash(this, hash)) != first) {
1436 <                    e = first = f; // re-traverse if entry changed
1437 <                    retries = -1;
1434 >                if (len != 0) {
1435 >                    if (!added)
1436 >                        return val;
1437 >                    break;
1438                  }
1439              }
484            return node;
1440          }
1441 +        if (val != null)
1442 +            addCount(1L, len);
1443 +        return val;
1444 +    }
1445  
1446 <        /**
1447 <         * Scans for a node containing the given key while trying to
1448 <         * acquire lock for a remove or replace operation. Upon
1449 <         * return, guarantees that lock is held.  Note that we must
1450 <         * lock even if the key is not found, to ensure sequential
1451 <         * consistency of updates.
1452 <         */
1453 <        private void scanAndLock(Object key, int hash) {
1454 <            // similar to but simpler than scanAndLockForPut
1455 <            HashEntry<K,V> first = entryForHash(this, hash);
1456 <            HashEntry<K,V> e = first;
1457 <            int retries = -1;
1458 <            while (!tryLock()) {
1459 <                HashEntry<K,V> f;
1460 <                if (retries < 0) {
1461 <                    if (e == null || key.equals(e.key))
1462 <                        retries = 0;
1463 <                    else
1464 <                        e = e.next;
1446 >    /** Implementation for compute */
1447 >    private final V internalCompute(K k, boolean onlyIfPresent,
1448 >                                    BiFunction<? super K, ? super V, ? extends V> mf) {
1449 >        if (k == null || mf == null)
1450 >            throw new NullPointerException();
1451 >        int h = spread(k.hashCode());
1452 >        V val = null;
1453 >        int delta = 0;
1454 >        int len = 0;
1455 >        for (Node<K,V>[] tab = table;;) {
1456 >            Node<K,V> f; int i, fh; Object fk;
1457 >            if (tab == null)
1458 >                tab = initTable();
1459 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1460 >                if (onlyIfPresent)
1461 >                    break;
1462 >                Node<K,V> node = new Node<K,V>(h, k, null, null);
1463 >                synchronized (node) {
1464 >                    if (casTabAt(tab, i, null, node)) {
1465 >                        try {
1466 >                            len = 1;
1467 >                            if ((val = mf.apply(k, null)) != null) {
1468 >                                node.val = val;
1469 >                                delta = 1;
1470 >                            }
1471 >                        } finally {
1472 >                            if (delta == 0)
1473 >                                setTabAt(tab, i, null);
1474 >                        }
1475 >                    }
1476                  }
1477 <                else if (++retries > MAX_SCAN_RETRIES) {
508 <                    lock();
1477 >                if (len != 0)
1478                      break;
1479 +            }
1480 +            else if ((fh = f.hash) < 0) {
1481 +                if ((fk = f.key) instanceof TreeBin) {
1482 +                    TreeBin<K,V> t = (TreeBin<K,V>)fk;
1483 +                    long stamp = t.writeLock();
1484 +                    try {
1485 +                        if (tabAt(tab, i) == f) {
1486 +                            len = 2;
1487 +                            Class<?> cc = comparableClassFor(k);
1488 +                            TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
1489 +                            if (p != null || !onlyIfPresent) {
1490 +                                V pv = (p == null) ? null : p.val;
1491 +                                if ((val = mf.apply(k, pv)) != null) {
1492 +                                    if (p != null)
1493 +                                        p.val = val;
1494 +                                    else {
1495 +                                        delta = 1;
1496 +                                        t.putTreeNode(h, k, val);
1497 +                                    }
1498 +                                }
1499 +                                else if (p != null) {
1500 +                                    delta = -1;
1501 +                                    t.deleteTreeNode(p);
1502 +                                }
1503 +                            }
1504 +                        }
1505 +                    } finally {
1506 +                        t.unlockWrite(stamp);
1507 +                    }
1508 +                    if (len != 0)
1509 +                        break;
1510                  }
1511 <                else if ((retries & 1) == 0 &&
1512 <                         (f = entryForHash(this, hash)) != first) {
1513 <                    e = first = f;
1514 <                    retries = -1;
1511 >                else
1512 >                    tab = (Node<K,V>[])fk;
1513 >            }
1514 >            else {
1515 >                synchronized (f) {
1516 >                    if (tabAt(tab, i) == f) {
1517 >                        len = 1;
1518 >                        for (Node<K,V> e = f, pred = null;; ++len) {
1519 >                            Object ek;
1520 >                            if (e.hash == h &&
1521 >                                ((ek = e.key) == k || k.equals(ek))) {
1522 >                                val = mf.apply(k, e.val);
1523 >                                if (val != null)
1524 >                                    e.val = val;
1525 >                                else {
1526 >                                    delta = -1;
1527 >                                    Node<K,V> en = e.next;
1528 >                                    if (pred != null)
1529 >                                        pred.next = en;
1530 >                                    else
1531 >                                        setTabAt(tab, i, en);
1532 >                                }
1533 >                                break;
1534 >                            }
1535 >                            pred = e;
1536 >                            if ((e = e.next) == null) {
1537 >                                if (!onlyIfPresent &&
1538 >                                    (val = mf.apply(k, null)) != null) {
1539 >                                    pred.next = new Node<K,V>(h, k, val, null);
1540 >                                    delta = 1;
1541 >                                    if (len > TREE_THRESHOLD)
1542 >                                        replaceWithTreeBin(tab, i, k);
1543 >                                }
1544 >                                break;
1545 >                            }
1546 >                        }
1547 >                    }
1548                  }
1549 +                if (len != 0)
1550 +                    break;
1551              }
1552          }
1553 +        if (delta != 0)
1554 +            addCount((long)delta, len);
1555 +        return val;
1556 +    }
1557  
1558 <        /**
1559 <         * Remove; match on key only if value null, else match both.
1560 <         */
1561 <        final V remove(Object key, int hash, Object value) {
1562 <            if (!tryLock())
1563 <                scanAndLock(key, hash);
1564 <            V oldValue = null;
1565 <            try {
1566 <                HashEntry<K,V>[] tab = table;
1567 <                int index = (tab.length - 1) & hash;
1568 <                HashEntry<K,V> e = entryAt(tab, index);
1569 <                HashEntry<K,V> pred = null;
1570 <                while (e != null) {
1571 <                    K k;
1572 <                    HashEntry<K,V> next = e.next;
1573 <                    if ((k = e.key) == key ||
1574 <                        (e.hash == hash && key.equals(k))) {
1575 <                        V v = e.value;
1576 <                        if (value == null || value == v || value.equals(v)) {
1577 <                            if (pred == null)
1578 <                                setEntryAt(tab, index, next);
1579 <                            else
1580 <                                pred.setNext(next);
1581 <                            ++modCount;
1582 <                            --count;
1583 <                            oldValue = v;
1558 >    /** Implementation for merge */
1559 >    private final V internalMerge(K k, V v,
1560 >                                  BiFunction<? super V, ? super V, ? extends V> mf) {
1561 >        if (k == null || v == null || mf == null)
1562 >            throw new NullPointerException();
1563 >        int h = spread(k.hashCode());
1564 >        V val = null;
1565 >        int delta = 0;
1566 >        int len = 0;
1567 >        for (Node<K,V>[] tab = table;;) {
1568 >            int i; Node<K,V> f; Object fk;
1569 >            if (tab == null)
1570 >                tab = initTable();
1571 >            else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1572 >                if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) {
1573 >                    delta = 1;
1574 >                    val = v;
1575 >                    break;
1576 >                }
1577 >            }
1578 >            else if (f.hash < 0) {
1579 >                if ((fk = f.key) instanceof TreeBin) {
1580 >                    TreeBin<K,V> t = (TreeBin<K,V>)fk;
1581 >                    long stamp = t.writeLock();
1582 >                    try {
1583 >                        if (tabAt(tab, i) == f) {
1584 >                            len = 2;
1585 >                            Class<?> cc = comparableClassFor(k);
1586 >                            TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
1587 >                            val = (p == null) ? v : mf.apply(p.val, v);
1588 >                            if (val != null) {
1589 >                                if (p != null)
1590 >                                    p.val = val;
1591 >                                else {
1592 >                                    delta = 1;
1593 >                                    t.putTreeNode(h, k, val);
1594 >                                }
1595 >                            }
1596 >                            else if (p != null) {
1597 >                                delta = -1;
1598 >                                t.deleteTreeNode(p);
1599 >                            }
1600                          }
1601 +                    } finally {
1602 +                        t.unlockWrite(stamp);
1603 +                    }
1604 +                    if (len != 0)
1605                          break;
1606 +                }
1607 +                else
1608 +                    tab = (Node<K,V>[])fk;
1609 +            }
1610 +            else {
1611 +                synchronized (f) {
1612 +                    if (tabAt(tab, i) == f) {
1613 +                        len = 1;
1614 +                        for (Node<K,V> e = f, pred = null;; ++len) {
1615 +                            Object ek;
1616 +                            if (e.hash == h &&
1617 +                                ((ek = e.key) == k || k.equals(ek))) {
1618 +                                val = mf.apply(e.val, v);
1619 +                                if (val != null)
1620 +                                    e.val = val;
1621 +                                else {
1622 +                                    delta = -1;
1623 +                                    Node<K,V> en = e.next;
1624 +                                    if (pred != null)
1625 +                                        pred.next = en;
1626 +                                    else
1627 +                                        setTabAt(tab, i, en);
1628 +                                }
1629 +                                break;
1630 +                            }
1631 +                            pred = e;
1632 +                            if ((e = e.next) == null) {
1633 +                                delta = 1;
1634 +                                val = v;
1635 +                                pred.next = new Node<K,V>(h, k, val, null);
1636 +                                if (len > TREE_THRESHOLD)
1637 +                                    replaceWithTreeBin(tab, i, k);
1638 +                                break;
1639 +                            }
1640 +                        }
1641                      }
548                    pred = e;
549                    e = next;
1642                  }
1643 <            } finally {
1644 <                unlock();
1643 >                if (len != 0)
1644 >                    break;
1645              }
554            return oldValue;
1646          }
1647 +        if (delta != 0)
1648 +            addCount((long)delta, len);
1649 +        return val;
1650 +    }
1651 +
1652 +    /** Implementation for putAll */
1653 +    private final void internalPutAll(Map<? extends K, ? extends V> m) {
1654 +        tryPresize(m.size());
1655 +        long delta = 0L;     // number of uncommitted additions
1656 +        boolean npe = false; // to throw exception on exit for nulls
1657 +        try {                // to clean up counts on other exceptions
1658 +            for (Map.Entry<?, ? extends V> entry : m.entrySet()) {
1659 +                Object k; V v;
1660 +                if (entry == null || (k = entry.getKey()) == null ||
1661 +                    (v = entry.getValue()) == null) {
1662 +                    npe = true;
1663 +                    break;
1664 +                }
1665 +                int h = spread(k.hashCode());
1666 +                for (Node<K,V>[] tab = table;;) {
1667 +                    int i; Node<K,V> f; int fh; Object fk;
1668 +                    if (tab == null)
1669 +                        tab = initTable();
1670 +                    else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1671 +                        if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) {
1672 +                            ++delta;
1673 +                            break;
1674 +                        }
1675 +                    }
1676 +                    else if ((fh = f.hash) < 0) {
1677 +                        if ((fk = f.key) instanceof TreeBin) {
1678 +                            TreeBin<K,V> t = (TreeBin<K,V>)fk;
1679 +                            long stamp = t.writeLock();
1680 +                            boolean validated = false;
1681 +                            try {
1682 +                                if (tabAt(tab, i) == f) {
1683 +                                    validated = true;
1684 +                                    Class<?> cc = comparableClassFor(k);
1685 +                                    TreeNode<K,V> p = t.getTreeNode(h, k,
1686 +                                                                    t.root, cc);
1687 +                                    if (p != null)
1688 +                                        p.val = v;
1689 +                                    else {
1690 +                                        ++delta;
1691 +                                        t.putTreeNode(h, k, v);
1692 +                                    }
1693 +                                }
1694 +                            } finally {
1695 +                                t.unlockWrite(stamp);
1696 +                            }
1697 +                            if (validated)
1698 +                                break;
1699 +                        }
1700 +                        else
1701 +                            tab = (Node<K,V>[])fk;
1702 +                    }
1703 +                    else {
1704 +                        int len = 0;
1705 +                        synchronized (f) {
1706 +                            if (tabAt(tab, i) == f) {
1707 +                                len = 1;
1708 +                                for (Node<K,V> e = f;; ++len) {
1709 +                                    Object ek;
1710 +                                    if (e.hash == h &&
1711 +                                        ((ek = e.key) == k || k.equals(ek))) {
1712 +                                        e.val = v;
1713 +                                        break;
1714 +                                    }
1715 +                                    Node<K,V> last = e;
1716 +                                    if ((e = e.next) == null) {
1717 +                                        ++delta;
1718 +                                        last.next = new Node<K,V>(h, k, v, null);
1719 +                                        if (len > TREE_THRESHOLD)
1720 +                                            replaceWithTreeBin(tab, i, k);
1721 +                                        break;
1722 +                                    }
1723 +                                }
1724 +                            }
1725 +                        }
1726 +                        if (len != 0) {
1727 +                            if (len > 1) {
1728 +                                addCount(delta, len);
1729 +                                delta = 0L;
1730 +                            }
1731 +                            break;
1732 +                        }
1733 +                    }
1734 +                }
1735 +            }
1736 +        } finally {
1737 +            if (delta != 0L)
1738 +                addCount(delta, 2);
1739 +        }
1740 +        if (npe)
1741 +            throw new NullPointerException();
1742 +    }
1743  
1744 <        final boolean replace(K key, int hash, V oldValue, V newValue) {
1745 <            if (!tryLock())
1746 <                scanAndLock(key, hash);
1747 <            boolean replaced = false;
1748 <            try {
1749 <                HashEntry<K,V> e;
1750 <                for (e = entryForHash(this, hash); e != null; e = e.next) {
1751 <                    K k;
1752 <                    if ((k = e.key) == key ||
1753 <                        (e.hash == hash && key.equals(k))) {
1754 <                        if (oldValue.equals(e.value)) {
1755 <                            e.value = newValue;
1756 <                            ++modCount;
1757 <                            replaced = true;
1744 >    /**
1745 >     * Implementation for clear. Steps through each bin, removing all
1746 >     * nodes.
1747 >     */
1748 >    private final void internalClear() {
1749 >        long delta = 0L; // negative number of deletions
1750 >        int i = 0;
1751 >        Node<K,V>[] tab = table;
1752 >        while (tab != null && i < tab.length) {
1753 >            Node<K,V> f = tabAt(tab, i);
1754 >            if (f == null)
1755 >                ++i;
1756 >            else if (f.hash < 0) {
1757 >                Object fk;
1758 >                if ((fk = f.key) instanceof TreeBin) {
1759 >                    TreeBin<K,V> t = (TreeBin<K,V>)fk;
1760 >                    long stamp = t.writeLock();
1761 >                    try {
1762 >                        if (tabAt(tab, i) == f) {
1763 >                            for (Node<K,V> p = t.first; p != null; p = p.next)
1764 >                                --delta;
1765 >                            t.first = null;
1766 >                            t.root = null;
1767 >                            ++i;
1768                          }
1769 <                        break;
1769 >                    } finally {
1770 >                        t.unlockWrite(stamp);
1771 >                    }
1772 >                }
1773 >                else
1774 >                    tab = (Node<K,V>[])fk;
1775 >            }
1776 >            else {
1777 >                synchronized (f) {
1778 >                    if (tabAt(tab, i) == f) {
1779 >                        for (Node<K,V> e = f; e != null; e = e.next)
1780 >                            --delta;
1781 >                        setTabAt(tab, i, null);
1782 >                        ++i;
1783                      }
1784                  }
575            } finally {
576                unlock();
1785              }
578            return replaced;
1786          }
1787 +        if (delta != 0L)
1788 +            addCount(delta, -1);
1789 +    }
1790  
1791 <        final V replace(K key, int hash, V value) {
1792 <            if (!tryLock())
1793 <                scanAndLock(key, hash);
1794 <            V oldValue = null;
1795 <            try {
1796 <                HashEntry<K,V> e;
1797 <                for (e = entryForHash(this, hash); e != null; e = e.next) {
1798 <                    K k;
1799 <                    if ((k = e.key) == key ||
1800 <                        (e.hash == hash && key.equals(k))) {
1801 <                        oldValue = e.value;
1802 <                        e.value = value;
1803 <                        ++modCount;
1791 >    /* ---------------- Table Initialization and Resizing -------------- */
1792 >
1793 >    /**
1794 >     * Returns a power of two table size for the given desired capacity.
1795 >     * See Hackers Delight, sec 3.2
1796 >     */
1797 >    private static final int tableSizeFor(int c) {
1798 >        int n = c - 1;
1799 >        n |= n >>> 1;
1800 >        n |= n >>> 2;
1801 >        n |= n >>> 4;
1802 >        n |= n >>> 8;
1803 >        n |= n >>> 16;
1804 >        return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
1805 >    }
1806 >
1807 >    /**
1808 >     * Initializes table, using the size recorded in sizeCtl.
1809 >     */
1810 >    private final Node<K,V>[] initTable() {
1811 >        Node<K,V>[] tab; int sc;
1812 >        while ((tab = table) == null) {
1813 >            if ((sc = sizeCtl) < 0)
1814 >                Thread.yield(); // lost initialization race; just spin
1815 >            else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
1816 >                try {
1817 >                    if ((tab = table) == null) {
1818 >                        int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
1819 >                        table = tab = (Node<K,V>[])new Node[n];
1820 >                        sc = n - (n >>> 2);
1821 >                    }
1822 >                } finally {
1823 >                    sizeCtl = sc;
1824 >                }
1825 >                break;
1826 >            }
1827 >        }
1828 >        return tab;
1829 >    }
1830 >
1831 >    /**
1832 >     * Adds to count, and if table is too small and not already
1833 >     * resizing, initiates transfer. If already resizing, helps
1834 >     * perform transfer if work is available.  Rechecks occupancy
1835 >     * after a transfer to see if another resize is already needed
1836 >     * because resizings are lagging additions.
1837 >     *
1838 >     * @param x the count to add
1839 >     * @param check if <0, don't check resize, if <= 1 only check if uncontended
1840 >     */
1841 >    private final void addCount(long x, int check) {
1842 >        Cell[] as; long b, s;
1843 >        if ((as = counterCells) != null ||
1844 >            !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
1845 >            Cell a; long v; int m;
1846 >            boolean uncontended = true;
1847 >            if (as == null || (m = as.length - 1) < 0 ||
1848 >                (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
1849 >                !(uncontended =
1850 >                  U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
1851 >                fullAddCount(x, uncontended);
1852 >                return;
1853 >            }
1854 >            if (check <= 1)
1855 >                return;
1856 >            s = sumCount();
1857 >        }
1858 >        if (check >= 0) {
1859 >            Node<K,V>[] tab, nt; int sc;
1860 >            while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
1861 >                   tab.length < MAXIMUM_CAPACITY) {
1862 >                if (sc < 0) {
1863 >                    if (sc == -1 || transferIndex <= transferOrigin ||
1864 >                        (nt = nextTable) == null)
1865                          break;
1866 +                    if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
1867 +                        transfer(tab, nt);
1868 +                }
1869 +                else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
1870 +                    transfer(tab, null);
1871 +                s = sumCount();
1872 +            }
1873 +        }
1874 +    }
1875 +
1876 +    /**
1877 +     * Tries to presize table to accommodate the given number of elements.
1878 +     *
1879 +     * @param size number of elements (doesn't need to be perfectly accurate)
1880 +     */
1881 +    private final void tryPresize(int size) {
1882 +        int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
1883 +            tableSizeFor(size + (size >>> 1) + 1);
1884 +        int sc;
1885 +        while ((sc = sizeCtl) >= 0) {
1886 +            Node<K,V>[] tab = table; int n;
1887 +            if (tab == null || (n = tab.length) == 0) {
1888 +                n = (sc > c) ? sc : c;
1889 +                if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
1890 +                    try {
1891 +                        if (table == tab) {
1892 +                            table = (Node<K,V>[])new Node[n];
1893 +                            sc = n - (n >>> 2);
1894 +                        }
1895 +                    } finally {
1896 +                        sizeCtl = sc;
1897                      }
1898                  }
597            } finally {
598                unlock();
1899              }
1900 <            return oldValue;
1900 >            else if (c <= sc || n >= MAXIMUM_CAPACITY)
1901 >                break;
1902 >            else if (tab == table &&
1903 >                     U.compareAndSwapInt(this, SIZECTL, sc, -2))
1904 >                transfer(tab, null);
1905          }
1906 +    }
1907  
1908 <        final void clear() {
1909 <            lock();
1908 >    /**
1909 >     * Moves and/or copies the nodes in each bin to new table. See
1910 >     * above for explanation.
1911 >     */
1912 >    private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
1913 >        int n = tab.length, stride;
1914 >        if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
1915 >            stride = MIN_TRANSFER_STRIDE; // subdivide range
1916 >        if (nextTab == null) {            // initiating
1917              try {
1918 <                HashEntry<K,V>[] tab = table;
1919 <                for (int i = 0; i < tab.length ; i++)
1920 <                    setEntryAt(tab, i, null);
1921 <                ++modCount;
1922 <                count = 0;
1923 <            } finally {
1924 <                unlock();
1918 >                nextTab = (Node<K,V>[])new Node[n << 1];
1919 >            } catch (Throwable ex) {      // try to cope with OOME
1920 >                sizeCtl = Integer.MAX_VALUE;
1921 >                return;
1922 >            }
1923 >            nextTable = nextTab;
1924 >            transferOrigin = n;
1925 >            transferIndex = n;
1926 >            Node<K,V> rev = new Node<K,V>(MOVED, tab, null, null);
1927 >            for (int k = n; k > 0;) {    // progressively reveal ready slots
1928 >                int nextk = (k > stride) ? k - stride : 0;
1929 >                for (int m = nextk; m < k; ++m)
1930 >                    nextTab[m] = rev;
1931 >                for (int m = n + nextk; m < n + k; ++m)
1932 >                    nextTab[m] = rev;
1933 >                U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
1934              }
1935          }
1936 +        int nextn = nextTab.length;
1937 +        Node<K,V> fwd = new Node<K,V>(MOVED, nextTab, null, null);
1938 +        boolean advance = true;
1939 +        for (int i = 0, bound = 0;;) {
1940 +            int nextIndex, nextBound; Node<K,V> f; Object fk;
1941 +            while (advance) {
1942 +                if (--i >= bound)
1943 +                    advance = false;
1944 +                else if ((nextIndex = transferIndex) <= transferOrigin) {
1945 +                    i = -1;
1946 +                    advance = false;
1947 +                }
1948 +                else if (U.compareAndSwapInt
1949 +                         (this, TRANSFERINDEX, nextIndex,
1950 +                          nextBound = (nextIndex > stride ?
1951 +                                       nextIndex - stride : 0))) {
1952 +                    bound = nextBound;
1953 +                    i = nextIndex - 1;
1954 +                    advance = false;
1955 +                }
1956 +            }
1957 +            if (i < 0 || i >= n || i + n >= nextn) {
1958 +                for (int sc;;) {
1959 +                    if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
1960 +                        if (sc == -1) {
1961 +                            nextTable = null;
1962 +                            table = nextTab;
1963 +                            sizeCtl = (n << 1) - (n >>> 1);
1964 +                        }
1965 +                        return;
1966 +                    }
1967 +                }
1968 +            }
1969 +            else if ((f = tabAt(tab, i)) == null) {
1970 +                if (casTabAt(tab, i, null, fwd)) {
1971 +                    setTabAt(nextTab, i, null);
1972 +                    setTabAt(nextTab, i + n, null);
1973 +                    advance = true;
1974 +                }
1975 +            }
1976 +            else if (f.hash >= 0) {
1977 +                synchronized (f) {
1978 +                    if (tabAt(tab, i) == f) {
1979 +                        int runBit = f.hash & n;
1980 +                        Node<K,V> lastRun = f, lo = null, hi = null;
1981 +                        for (Node<K,V> p = f.next; p != null; p = p.next) {
1982 +                            int b = p.hash & n;
1983 +                            if (b != runBit) {
1984 +                                runBit = b;
1985 +                                lastRun = p;
1986 +                            }
1987 +                        }
1988 +                        if (runBit == 0)
1989 +                            lo = lastRun;
1990 +                        else
1991 +                            hi = lastRun;
1992 +                        for (Node<K,V> p = f; p != lastRun; p = p.next) {
1993 +                            int ph = p.hash; Object pk = p.key; V pv = p.val;
1994 +                            if ((ph & n) == 0)
1995 +                                lo = new Node<K,V>(ph, pk, pv, lo);
1996 +                            else
1997 +                                hi = new Node<K,V>(ph, pk, pv, hi);
1998 +                        }
1999 +                        setTabAt(nextTab, i, lo);
2000 +                        setTabAt(nextTab, i + n, hi);
2001 +                        setTabAt(tab, i, fwd);
2002 +                        advance = true;
2003 +                    }
2004 +                }
2005 +            }
2006 +            else if ((fk = f.key) instanceof TreeBin) {
2007 +                TreeBin<K,V> t = (TreeBin<K,V>)fk;
2008 +                long stamp = t.writeLock();
2009 +                try {
2010 +                    if (tabAt(tab, i) == f) {
2011 +                        TreeNode<K,V> root;
2012 +                        Node<K,V> ln = null, hn = null;
2013 +                        if ((root = t.root) != null) {
2014 +                            Node<K,V> e, p; TreeNode<K,V> lr, rr; int lh;
2015 +                            TreeBin<K,V> lt = null, ht = null;
2016 +                            for (lr = root; lr.left != null; lr = lr.left);
2017 +                            for (rr = root; rr.right != null; rr = rr.right);
2018 +                            if ((lh = lr.hash) == rr.hash) { // move entire tree
2019 +                                if ((lh & n) == 0)
2020 +                                    lt = t;
2021 +                                else
2022 +                                    ht = t;
2023 +                            }
2024 +                            else {
2025 +                                lt = new TreeBin<K,V>();
2026 +                                ht = new TreeBin<K,V>();
2027 +                                int lc = 0, hc = 0;
2028 +                                for (e = t.first; e != null; e = e.next) {
2029 +                                    int h = e.hash;
2030 +                                    Object k = e.key; V v = e.val;
2031 +                                    if ((h & n) == 0) {
2032 +                                        ++lc;
2033 +                                        lt.putTreeNode(h, k, v);
2034 +                                    }
2035 +                                    else {
2036 +                                        ++hc;
2037 +                                        ht.putTreeNode(h, k, v);
2038 +                                    }
2039 +                                }
2040 +                                if (lc < TREE_THRESHOLD) { // throw away
2041 +                                    for (p = lt.first; p != null; p = p.next)
2042 +                                        ln = new Node<K,V>(p.hash, p.key,
2043 +                                                           p.val, ln);
2044 +                                    lt = null;
2045 +                                }
2046 +                                if (hc < TREE_THRESHOLD) {
2047 +                                    for (p = ht.first; p != null; p = p.next)
2048 +                                        hn = new Node<K,V>(p.hash, p.key,
2049 +                                                           p.val, hn);
2050 +                                    ht = null;
2051 +                                }
2052 +                            }
2053 +                            if (ln == null && lt != null)
2054 +                                ln = new Node<K,V>(MOVED, lt, null, null);
2055 +                            if (hn == null && ht != null)
2056 +                                hn = new Node<K,V>(MOVED, ht, null, null);
2057 +                        }
2058 +                        setTabAt(nextTab, i, ln);
2059 +                        setTabAt(nextTab, i + n, hn);
2060 +                        setTabAt(tab, i, fwd);
2061 +                        advance = true;
2062 +                    }
2063 +                } finally {
2064 +                    t.unlockWrite(stamp);
2065 +                }
2066 +            }
2067 +            else
2068 +                advance = true; // already processed
2069 +        }
2070      }
2071  
2072 <    // Accessing segments
2073 <
2074 <    /**
2075 <     * Gets the jth element of given segment array (if nonnull) with
2076 <     * volatile element access semantics via Unsafe. (The null check
2077 <     * can trigger harmlessly only during deserialization.) Note:
2078 <     * because each element of segments array is set only once (using
2079 <     * fully ordered writes), some performance-sensitive methods rely
2080 <     * on this method only as a recheck upon null reads.
2081 <     */
2082 <    @SuppressWarnings("unchecked")
2083 <    static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
629 <        long u = (j << SSHIFT) + SBASE;
630 <        return ss == null ? null :
631 <            (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
2072 >    /* ---------------- Counter support -------------- */
2073 >
2074 >    final long sumCount() {
2075 >        Cell[] as = counterCells; Cell a;
2076 >        long sum = baseCount;
2077 >        if (as != null) {
2078 >            for (int i = 0; i < as.length; ++i) {
2079 >                if ((a = as[i]) != null)
2080 >                    sum += a.value;
2081 >            }
2082 >        }
2083 >        return sum;
2084      }
2085  
2086 <    /**
2087 <     * Returns the segment for the given index, creating it and
2088 <     * recording in segment table (via CAS) if not already present.
2089 <     *
2090 <     * @param k the index
2091 <     * @return the segment
2092 <     */
2093 <    @SuppressWarnings("unchecked")
2094 <    private Segment<K,V> ensureSegment(int k) {
2095 <        final Segment<K,V>[] ss = this.segments;
2096 <        long u = (k << SSHIFT) + SBASE; // raw offset
2097 <        Segment<K,V> seg;
2098 <        if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
2099 <            Segment<K,V> proto = ss[0]; // use segment 0 as prototype
2100 <            int cap = proto.table.length;
2101 <            float lf = proto.loadFactor;
2102 <            int threshold = (int)(cap * lf);
2103 <            HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
2104 <            if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
2105 <                == null) { // recheck
2106 <                Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
2107 <                while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
2108 <                       == null) {
2109 <                    if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
2110 <                        break;
2086 >    // See LongAdder version for explanation
2087 >    private final void fullAddCount(long x, boolean wasUncontended) {
2088 >        int h;
2089 >        if ((h = ThreadLocalRandom.getProbe()) == 0) {
2090 >            ThreadLocalRandom.localInit();      // force initialization
2091 >            h = ThreadLocalRandom.getProbe();
2092 >            wasUncontended = true;
2093 >        }
2094 >        boolean collide = false;                // True if last slot nonempty
2095 >        for (;;) {
2096 >            Cell[] as; Cell a; int n; long v;
2097 >            if ((as = counterCells) != null && (n = as.length) > 0) {
2098 >                if ((a = as[(n - 1) & h]) == null) {
2099 >                    if (cellsBusy == 0) {            // Try to attach new Cell
2100 >                        Cell r = new Cell(x); // Optimistic create
2101 >                        if (cellsBusy == 0 &&
2102 >                            U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2103 >                            boolean created = false;
2104 >                            try {               // Recheck under lock
2105 >                                Cell[] rs; int m, j;
2106 >                                if ((rs = counterCells) != null &&
2107 >                                    (m = rs.length) > 0 &&
2108 >                                    rs[j = (m - 1) & h] == null) {
2109 >                                    rs[j] = r;
2110 >                                    created = true;
2111 >                                }
2112 >                            } finally {
2113 >                                cellsBusy = 0;
2114 >                            }
2115 >                            if (created)
2116 >                                break;
2117 >                            continue;           // Slot is now non-empty
2118 >                        }
2119 >                    }
2120 >                    collide = false;
2121 >                }
2122 >                else if (!wasUncontended)       // CAS already known to fail
2123 >                    wasUncontended = true;      // Continue after rehash
2124 >                else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2125 >                    break;
2126 >                else if (counterCells != as || n >= NCPU)
2127 >                    collide = false;            // At max size or stale
2128 >                else if (!collide)
2129 >                    collide = true;
2130 >                else if (cellsBusy == 0 &&
2131 >                         U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2132 >                    try {
2133 >                        if (counterCells == as) {// Expand table unless stale
2134 >                            Cell[] rs = new Cell[n << 1];
2135 >                            for (int i = 0; i < n; ++i)
2136 >                                rs[i] = as[i];
2137 >                            counterCells = rs;
2138 >                        }
2139 >                    } finally {
2140 >                        cellsBusy = 0;
2141 >                    }
2142 >                    collide = false;
2143 >                    continue;                   // Retry with expanded table
2144                  }
2145 +                h = ThreadLocalRandom.advanceProbe(h);
2146 +            }
2147 +            else if (cellsBusy == 0 && counterCells == as &&
2148 +                     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2149 +                boolean init = false;
2150 +                try {                           // Initialize table
2151 +                    if (counterCells == as) {
2152 +                        Cell[] rs = new Cell[2];
2153 +                        rs[h & 1] = new Cell(x);
2154 +                        counterCells = rs;
2155 +                        init = true;
2156 +                    }
2157 +                } finally {
2158 +                    cellsBusy = 0;
2159 +                }
2160 +                if (init)
2161 +                    break;
2162              }
2163 +            else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2164 +                break;                          // Fall back on using base
2165          }
662        return seg;
2166      }
2167  
2168 <    // Hash-based segment and entry accesses
2168 >    /* ----------------Table Traversal -------------- */
2169  
2170      /**
2171 <     * Gets the segment for the given hash code.
2172 <     */
2173 <    @SuppressWarnings("unchecked")
2174 <    private Segment<K,V> segmentForHash(int h) {
2175 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
2176 <        return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u);
2171 >     * Encapsulates traversal for methods such as containsValue; also
2172 >     * serves as a base class for other iterators and spliterators.
2173 >     *
2174 >     * Method advance visits once each still-valid node that was
2175 >     * reachable upon iterator construction. It might miss some that
2176 >     * were added to a bin after the bin was visited, which is OK wrt
2177 >     * consistency guarantees. Maintaining this property in the face
2178 >     * of possible ongoing resizes requires a fair amount of
2179 >     * bookkeeping state that is difficult to optimize away amidst
2180 >     * volatile accesses.  Even so, traversal maintains reasonable
2181 >     * throughput.
2182 >     *
2183 >     * Normally, iteration proceeds bin-by-bin traversing lists.
2184 >     * However, if the table has been resized, then all future steps
2185 >     * must traverse both the bin at the current index as well as at
2186 >     * (index + baseSize); and so on for further resizings. To
2187 >     * paranoically cope with potential sharing by users of iterators
2188 >     * across threads, iteration terminates if a bounds checks fails
2189 >     * for a table read.
2190 >     */
2191 >    static class Traverser<K,V> {
2192 >        Node<K,V>[] tab;        // current table; updated if resized
2193 >        Node<K,V> next;         // the next entry to use
2194 >        int index;              // index of bin to use next
2195 >        int baseIndex;          // current index of initial table
2196 >        int baseLimit;          // index bound for initial table
2197 >        final int baseSize;     // initial table size
2198 >
2199 >        Traverser(Node<K,V>[] tab, int size, int index, int limit) {
2200 >            this.tab = tab;
2201 >            this.baseSize = size;
2202 >            this.baseIndex = this.index = index;
2203 >            this.baseLimit = limit;
2204 >            this.next = null;
2205 >        }
2206 >
2207 >        /**
2208 >         * Advances if possible, returning next valid node, or null if none.
2209 >         */
2210 >        final Node<K,V> advance() {
2211 >            Node<K,V> e;
2212 >            if ((e = next) != null)
2213 >                e = e.next;
2214 >            for (;;) {
2215 >                Node<K,V>[] t; int i, n; Object ek;  // must use locals in checks
2216 >                if (e != null)
2217 >                    return next = e;
2218 >                if (baseIndex >= baseLimit || (t = tab) == null ||
2219 >                    (n = t.length) <= (i = index) || i < 0)
2220 >                    return next = null;
2221 >                if ((e = tabAt(t, index)) != null && e.hash < 0) {
2222 >                    if ((ek = e.key) instanceof TreeBin)
2223 >                        e = ((TreeBin<K,V>)ek).first;
2224 >                    else {
2225 >                        tab = (Node<K,V>[])ek;
2226 >                        e = null;
2227 >                        continue;
2228 >                    }
2229 >                }
2230 >                if ((index += baseSize) >= n)
2231 >                    index = ++baseIndex;    // visit upper slots if present
2232 >            }
2233 >        }
2234      }
2235  
2236      /**
2237 <     * Gets the table entry for the given segment and hash code.
2237 >     * Base of key, value, and entry Iterators. Adds fields to
2238 >     * Traverser to support iterator.remove
2239       */
2240 <    @SuppressWarnings("unchecked")
2241 <    static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
2242 <        HashEntry<K,V>[] tab;
2243 <        return (seg == null || (tab = seg.table) == null) ? null :
2244 <            (HashEntry<K,V>) UNSAFE.getObjectVolatile
2245 <            (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
2240 >    static class BaseIterator<K,V> extends Traverser<K,V> {
2241 >        final ConcurrentHashMap<K,V> map;
2242 >        Node<K,V> lastReturned;
2243 >        BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
2244 >                    ConcurrentHashMap<K,V> map) {
2245 >            super(tab, size, index, limit);
2246 >            this.map = map;
2247 >            advance();
2248 >        }
2249 >
2250 >        public final boolean hasNext() { return next != null; }
2251 >        public final boolean hasMoreElements() { return next != null; }
2252 >
2253 >        public final void remove() {
2254 >            Node<K,V> p;
2255 >            if ((p = lastReturned) == null)
2256 >                throw new IllegalStateException();
2257 >            lastReturned = null;
2258 >            map.internalReplace((K)p.key, null, null);
2259 >        }
2260 >    }
2261 >
2262 >    static final class KeyIterator<K,V> extends BaseIterator<K,V>
2263 >        implements Iterator<K>, Enumeration<K> {
2264 >        KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
2265 >                    ConcurrentHashMap<K,V> map) {
2266 >            super(tab, index, size, limit, map);
2267 >        }
2268 >
2269 >        public final K next() {
2270 >            Node<K,V> p;
2271 >            if ((p = next) == null)
2272 >                throw new NoSuchElementException();
2273 >            K k = (K)p.key;
2274 >            lastReturned = p;
2275 >            advance();
2276 >            return k;
2277 >        }
2278 >
2279 >        public final K nextElement() { return next(); }
2280 >    }
2281 >
2282 >    static final class ValueIterator<K,V> extends BaseIterator<K,V>
2283 >        implements Iterator<V>, Enumeration<V> {
2284 >        ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
2285 >                      ConcurrentHashMap<K,V> map) {
2286 >            super(tab, index, size, limit, map);
2287 >        }
2288 >
2289 >        public final V next() {
2290 >            Node<K,V> p;
2291 >            if ((p = next) == null)
2292 >                throw new NoSuchElementException();
2293 >            V v = p.val;
2294 >            lastReturned = p;
2295 >            advance();
2296 >            return v;
2297 >        }
2298 >
2299 >        public final V nextElement() { return next(); }
2300      }
2301  
2302 +    static final class EntryIterator<K,V> extends BaseIterator<K,V>
2303 +        implements Iterator<Map.Entry<K,V>> {
2304 +        EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
2305 +                      ConcurrentHashMap<K,V> map) {
2306 +            super(tab, index, size, limit, map);
2307 +        }
2308 +
2309 +        public final Map.Entry<K,V> next() {
2310 +            Node<K,V> p;
2311 +            if ((p = next) == null)
2312 +                throw new NoSuchElementException();
2313 +            K k = (K)p.key;
2314 +            V v = p.val;
2315 +            lastReturned = p;
2316 +            advance();
2317 +            return new MapEntry<K,V>(k, v, map);
2318 +        }
2319 +    }
2320 +
2321 +    static final class KeySpliterator<K,V> extends Traverser<K,V>
2322 +        implements Spliterator<K> {
2323 +        long est;               // size estimate
2324 +        KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
2325 +                       long est) {
2326 +            super(tab, size, index, limit);
2327 +            this.est = est;
2328 +        }
2329 +
2330 +        public Spliterator<K> trySplit() {
2331 +            int i, f, h;
2332 +            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
2333 +                new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
2334 +                                        f, est >>>= 1);
2335 +        }
2336 +
2337 +        public void forEachRemaining(Consumer<? super K> action) {
2338 +            if (action == null) throw new NullPointerException();
2339 +            for (Node<K,V> p; (p = advance()) != null;)
2340 +                action.accept((K)p.key);
2341 +        }
2342 +
2343 +        public boolean tryAdvance(Consumer<? super K> action) {
2344 +            if (action == null) throw new NullPointerException();
2345 +            Node<K,V> p;
2346 +            if ((p = advance()) == null)
2347 +                return false;
2348 +            action.accept((K)p.key);
2349 +            return true;
2350 +        }
2351 +
2352 +        public long estimateSize() { return est; }
2353 +
2354 +        public int characteristics() {
2355 +            return Spliterator.DISTINCT | Spliterator.CONCURRENT |
2356 +                Spliterator.NONNULL;
2357 +        }
2358 +    }
2359 +
2360 +    static final class ValueSpliterator<K,V> extends Traverser<K,V>
2361 +        implements Spliterator<V> {
2362 +        long est;               // size estimate
2363 +        ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
2364 +                         long est) {
2365 +            super(tab, size, index, limit);
2366 +            this.est = est;
2367 +        }
2368 +
2369 +        public Spliterator<V> trySplit() {
2370 +            int i, f, h;
2371 +            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
2372 +                new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
2373 +                                          f, est >>>= 1);
2374 +        }
2375 +
2376 +        public void forEachRemaining(Consumer<? super V> action) {
2377 +            if (action == null) throw new NullPointerException();
2378 +            for (Node<K,V> p; (p = advance()) != null;)
2379 +                action.accept(p.val);
2380 +        }
2381 +
2382 +        public boolean tryAdvance(Consumer<? super V> action) {
2383 +            if (action == null) throw new NullPointerException();
2384 +            Node<K,V> p;
2385 +            if ((p = advance()) == null)
2386 +                return false;
2387 +            action.accept(p.val);
2388 +            return true;
2389 +        }
2390 +
2391 +        public long estimateSize() { return est; }
2392 +
2393 +        public int characteristics() {
2394 +            return Spliterator.CONCURRENT | Spliterator.NONNULL;
2395 +        }
2396 +    }
2397 +
2398 +    static final class EntrySpliterator<K,V> extends Traverser<K,V>
2399 +        implements Spliterator<Map.Entry<K,V>> {
2400 +        final ConcurrentHashMap<K,V> map; // To export MapEntry
2401 +        long est;               // size estimate
2402 +        EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
2403 +                         long est, ConcurrentHashMap<K,V> map) {
2404 +            super(tab, size, index, limit);
2405 +            this.map = map;
2406 +            this.est = est;
2407 +        }
2408 +
2409 +        public Spliterator<Map.Entry<K,V>> trySplit() {
2410 +            int i, f, h;
2411 +            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
2412 +                new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
2413 +                                          f, est >>>= 1, map);
2414 +        }
2415 +
2416 +        public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
2417 +            if (action == null) throw new NullPointerException();
2418 +            for (Node<K,V> p; (p = advance()) != null; )
2419 +                action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
2420 +        }
2421 +
2422 +        public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
2423 +            if (action == null) throw new NullPointerException();
2424 +            Node<K,V> p;
2425 +            if ((p = advance()) == null)
2426 +                return false;
2427 +            action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
2428 +            return true;
2429 +        }
2430 +
2431 +        public long estimateSize() { return est; }
2432 +
2433 +        public int characteristics() {
2434 +            return Spliterator.DISTINCT | Spliterator.CONCURRENT |
2435 +                Spliterator.NONNULL;
2436 +        }
2437 +    }
2438 +
2439 +
2440      /* ---------------- Public operations -------------- */
2441  
2442      /**
2443 <     * Creates a new, empty map with the specified initial
691 <     * capacity, load factor and concurrency level.
692 <     *
693 <     * @param initialCapacity the initial capacity. The implementation
694 <     * performs internal sizing to accommodate this many elements.
695 <     * @param loadFactor  the load factor threshold, used to control resizing.
696 <     * Resizing may be performed when the average number of elements per
697 <     * bin exceeds this threshold.
698 <     * @param concurrencyLevel the estimated number of concurrently
699 <     * updating threads. The implementation performs internal sizing
700 <     * to try to accommodate this many threads.
701 <     * @throws IllegalArgumentException if the initial capacity is
702 <     * negative or the load factor or concurrencyLevel are
703 <     * nonpositive.
2443 >     * Creates a new, empty map with the default initial table size (16).
2444       */
2445 <    @SuppressWarnings("unchecked")
706 <    public ConcurrentHashMap(int initialCapacity,
707 <                             float loadFactor, int concurrencyLevel) {
708 <        if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
709 <            throw new IllegalArgumentException();
710 <        if (concurrencyLevel > MAX_SEGMENTS)
711 <            concurrencyLevel = MAX_SEGMENTS;
712 <        // Find power-of-two sizes best matching arguments
713 <        int sshift = 0;
714 <        int ssize = 1;
715 <        while (ssize < concurrencyLevel) {
716 <            ++sshift;
717 <            ssize <<= 1;
718 <        }
719 <        this.segmentShift = 32 - sshift;
720 <        this.segmentMask = ssize - 1;
721 <        if (initialCapacity > MAXIMUM_CAPACITY)
722 <            initialCapacity = MAXIMUM_CAPACITY;
723 <        int c = initialCapacity / ssize;
724 <        if (c * ssize < initialCapacity)
725 <            ++c;
726 <        int cap = MIN_SEGMENT_TABLE_CAPACITY;
727 <        while (cap < c)
728 <            cap <<= 1;
729 <        // create segments and segments[0]
730 <        Segment<K,V> s0 =
731 <            new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
732 <                             (HashEntry<K,V>[])new HashEntry[cap]);
733 <        Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
734 <        UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
735 <        this.segments = ss;
2445 >    public ConcurrentHashMap() {
2446      }
2447  
2448      /**
2449 <     * Creates a new, empty map with the specified initial capacity
2450 <     * and load factor and with the default concurrencyLevel (16).
2449 >     * Creates a new, empty map with an initial table size
2450 >     * accommodating the specified number of elements without the need
2451 >     * to dynamically resize.
2452       *
2453       * @param initialCapacity The implementation performs internal
2454       * sizing to accommodate this many elements.
2455 <     * @param loadFactor  the load factor threshold, used to control resizing.
2456 <     * Resizing may be performed when the average number of elements per
2457 <     * bin exceeds this threshold.
2455 >     * @throws IllegalArgumentException if the initial capacity of
2456 >     * elements is negative
2457 >     */
2458 >    public ConcurrentHashMap(int initialCapacity) {
2459 >        if (initialCapacity < 0)
2460 >            throw new IllegalArgumentException();
2461 >        int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2462 >                   MAXIMUM_CAPACITY :
2463 >                   tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2464 >        this.sizeCtl = cap;
2465 >    }
2466 >
2467 >    /**
2468 >     * Creates a new map with the same mappings as the given map.
2469 >     *
2470 >     * @param m the map
2471 >     */
2472 >    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
2473 >        this.sizeCtl = DEFAULT_CAPACITY;
2474 >        internalPutAll(m);
2475 >    }
2476 >
2477 >    /**
2478 >     * Creates a new, empty map with an initial table size based on
2479 >     * the given number of elements ({@code initialCapacity}) and
2480 >     * initial table density ({@code loadFactor}).
2481 >     *
2482 >     * @param initialCapacity the initial capacity. The implementation
2483 >     * performs internal sizing to accommodate this many elements,
2484 >     * given the specified load factor.
2485 >     * @param loadFactor the load factor (table density) for
2486 >     * establishing the initial table size
2487       * @throws IllegalArgumentException if the initial capacity of
2488       * elements is negative or the load factor is nonpositive
2489       *
2490       * @since 1.6
2491       */
2492      public ConcurrentHashMap(int initialCapacity, float loadFactor) {
2493 <        this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
2493 >        this(initialCapacity, loadFactor, 1);
2494      }
2495  
2496      /**
2497 <     * Creates a new, empty map with the specified initial capacity,
2498 <     * and with default load factor (0.75) and concurrencyLevel (16).
2497 >     * Creates a new, empty map with an initial table size based on
2498 >     * the given number of elements ({@code initialCapacity}), table
2499 >     * density ({@code loadFactor}), and number of concurrently
2500 >     * updating threads ({@code concurrencyLevel}).
2501       *
2502       * @param initialCapacity the initial capacity. The implementation
2503 <     * performs internal sizing to accommodate this many elements.
2504 <     * @throws IllegalArgumentException if the initial capacity of
2505 <     * elements is negative.
2503 >     * performs internal sizing to accommodate this many elements,
2504 >     * given the specified load factor.
2505 >     * @param loadFactor the load factor (table density) for
2506 >     * establishing the initial table size
2507 >     * @param concurrencyLevel the estimated number of concurrently
2508 >     * updating threads. The implementation may use this value as
2509 >     * a sizing hint.
2510 >     * @throws IllegalArgumentException if the initial capacity is
2511 >     * negative or the load factor or concurrencyLevel are
2512 >     * nonpositive
2513       */
2514 <    public ConcurrentHashMap(int initialCapacity) {
2515 <        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
2514 >    public ConcurrentHashMap(int initialCapacity,
2515 >                             float loadFactor, int concurrencyLevel) {
2516 >        if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2517 >            throw new IllegalArgumentException();
2518 >        if (initialCapacity < concurrencyLevel)   // Use at least as many bins
2519 >            initialCapacity = concurrencyLevel;   // as estimated threads
2520 >        long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2521 >        int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2522 >            MAXIMUM_CAPACITY : tableSizeFor((int)size);
2523 >        this.sizeCtl = cap;
2524      }
2525  
2526      /**
2527 <     * Creates a new, empty map with a default initial capacity (16),
2528 <     * load factor (0.75) and concurrencyLevel (16).
2527 >     * Creates a new {@link Set} backed by a ConcurrentHashMap
2528 >     * from the given type to {@code Boolean.TRUE}.
2529 >     *
2530 >     * @return the new set
2531       */
2532 <    public ConcurrentHashMap() {
2533 <        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
2532 >    public static <K> KeySetView<K,Boolean> newKeySet() {
2533 >        return new KeySetView<K,Boolean>
2534 >            (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2535      }
2536  
2537      /**
2538 <     * Creates a new map with the same mappings as the given map.
2539 <     * The map is created with a capacity of 1.5 times the number
780 <     * of mappings in the given map or 16 (whichever is greater),
781 <     * and a default load factor (0.75) and concurrencyLevel (16).
2538 >     * Creates a new {@link Set} backed by a ConcurrentHashMap
2539 >     * from the given type to {@code Boolean.TRUE}.
2540       *
2541 <     * @param m the map
2541 >     * @param initialCapacity The implementation performs internal
2542 >     * sizing to accommodate this many elements.
2543 >     * @throws IllegalArgumentException if the initial capacity of
2544 >     * elements is negative
2545 >     * @return the new set
2546       */
2547 <    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
2548 <        this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
2549 <                      DEFAULT_INITIAL_CAPACITY),
788 <             DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
789 <        putAll(m);
2547 >    public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2548 >        return new KeySetView<K,Boolean>
2549 >            (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2550      }
2551  
2552      /**
2553 <     * Returns <tt>true</tt> if this map contains no key-value mappings.
794 <     *
795 <     * @return <tt>true</tt> if this map contains no key-value mappings
2553 >     * {@inheritDoc}
2554       */
2555      public boolean isEmpty() {
2556 <        /*
799 <         * Sum per-segment modCounts to avoid mis-reporting when
800 <         * elements are concurrently added and removed in one segment
801 <         * while checking another, in which case the table was never
802 <         * actually empty at any point. (The sum ensures accuracy up
803 <         * through at least 1<<31 per-segment modifications before
804 <         * recheck.)  Methods size() and containsValue() use similar
805 <         * constructions for stability checks.
806 <         */
807 <        long sum = 0L;
808 <        final Segment<K,V>[] segments = this.segments;
809 <        for (int j = 0; j < segments.length; ++j) {
810 <            Segment<K,V> seg = segmentAt(segments, j);
811 <            if (seg != null) {
812 <                if (seg.count != 0)
813 <                    return false;
814 <                sum += seg.modCount;
815 <            }
816 <        }
817 <        if (sum != 0L) { // recheck unless no modifications
818 <            for (int j = 0; j < segments.length; ++j) {
819 <                Segment<K,V> seg = segmentAt(segments, j);
820 <                if (seg != null) {
821 <                    if (seg.count != 0)
822 <                        return false;
823 <                    sum -= seg.modCount;
824 <                }
825 <            }
826 <            if (sum != 0L)
827 <                return false;
828 <        }
829 <        return true;
2556 >        return sumCount() <= 0L; // ignore transient negative values
2557      }
2558  
2559      /**
2560 <     * Returns the number of key-value mappings in this map.  If the
834 <     * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
835 <     * <tt>Integer.MAX_VALUE</tt>.
836 <     *
837 <     * @return the number of key-value mappings in this map
2560 >     * {@inheritDoc}
2561       */
2562      public int size() {
2563 <        // Try a few times to get accurate count. On failure due to
2564 <        // continuous async changes in table, resort to locking.
2565 <        final Segment<K,V>[] segments = this.segments;
2566 <        int size;
2567 <        boolean overflow; // true if size overflows 32 bits
2568 <        long sum;         // sum of modCounts
2569 <        long last = 0L;   // previous sum
2570 <        int retries = -1; // first iteration isn't retry
2571 <        try {
2572 <            for (;;) {
2573 <                if (retries++ == RETRIES_BEFORE_LOCK) {
2574 <                    for (int j = 0; j < segments.length; ++j)
2575 <                        ensureSegment(j).lock(); // force creation
2576 <                }
2577 <                sum = 0L;
2578 <                size = 0;
2579 <                overflow = false;
2580 <                for (int j = 0; j < segments.length; ++j) {
858 <                    Segment<K,V> seg = segmentAt(segments, j);
859 <                    if (seg != null) {
860 <                        sum += seg.modCount;
861 <                        int c = seg.count;
862 <                        if (c < 0 || (size += c) < 0)
863 <                            overflow = true;
864 <                    }
865 <                }
866 <                if (sum == last)
867 <                    break;
868 <                last = sum;
869 <            }
870 <        } finally {
871 <            if (retries > RETRIES_BEFORE_LOCK) {
872 <                for (int j = 0; j < segments.length; ++j)
873 <                    segmentAt(segments, j).unlock();
874 <            }
875 <        }
876 <        return overflow ? Integer.MAX_VALUE : size;
2563 >        long n = sumCount();
2564 >        return ((n < 0L) ? 0 :
2565 >                (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2566 >                (int)n);
2567 >    }
2568 >
2569 >    /**
2570 >     * Returns the number of mappings. This method should be used
2571 >     * instead of {@link #size} because a ConcurrentHashMap may
2572 >     * contain more mappings than can be represented as an int. The
2573 >     * value returned is an estimate; the actual count may differ if
2574 >     * there are concurrent insertions or removals.
2575 >     *
2576 >     * @return the number of mappings
2577 >     */
2578 >    public long mappingCount() {
2579 >        long n = sumCount();
2580 >        return (n < 0L) ? 0L : n; // ignore transient negative values
2581      }
2582  
2583      /**
# Line 888 | Line 2592 | public class ConcurrentHashMap<K, V> ext
2592       * @throws NullPointerException if the specified key is null
2593       */
2594      public V get(Object key) {
2595 <        Segment<K,V> s; // manually integrate access methods to reduce overhead
2596 <        HashEntry<K,V>[] tab;
2597 <        int h = hash(key.hashCode());
2598 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
2599 <        if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
2600 <            (tab = s.table) != null) {
2601 <            for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
2602 <                     (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
2603 <                 e != null; e = e.next) {
2604 <                K k;
2605 <                if ((k = e.key) == key || (e.hash == h && key.equals(k)))
2606 <                    return e.value;
2607 <            }
2608 <        }
2609 <        return null;
2595 >        return internalGet(key);
2596 >    }
2597 >
2598 >    /**
2599 >     * Returns the value to which the specified key is mapped,
2600 >     * or the given defaultValue if this map contains no mapping for the key.
2601 >     *
2602 >     * @param key the key
2603 >     * @param defaultValue the value to return if this map contains
2604 >     * no mapping for the given key
2605 >     * @return the mapping for the key, if present; else the defaultValue
2606 >     * @throws NullPointerException if the specified key is null
2607 >     */
2608 >    public V getOrDefault(Object key, V defaultValue) {
2609 >        V v;
2610 >        return (v = internalGet(key)) == null ? defaultValue : v;
2611      }
2612  
2613      /**
2614       * Tests if the specified object is a key in this table.
2615       *
2616 <     * @param  key   possible key
2617 <     * @return <tt>true</tt> if and only if the specified object
2616 >     * @param  key possible key
2617 >     * @return {@code true} if and only if the specified object
2618       *         is a key in this table, as determined by the
2619 <     *         <tt>equals</tt> method; <tt>false</tt> otherwise.
2619 >     *         {@code equals} method; {@code false} otherwise
2620       * @throws NullPointerException if the specified key is null
2621       */
917    @SuppressWarnings("unchecked")
2622      public boolean containsKey(Object key) {
2623 <        Segment<K,V> s; // same as get() except no need for volatile value read
920 <        HashEntry<K,V>[] tab;
921 <        int h = hash(key.hashCode());
922 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
923 <        if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
924 <            (tab = s.table) != null) {
925 <            for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
926 <                     (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
927 <                 e != null; e = e.next) {
928 <                K k;
929 <                if ((k = e.key) == key || (e.hash == h && key.equals(k)))
930 <                    return true;
931 <            }
932 <        }
933 <        return false;
2623 >        return internalGet(key) != null;
2624      }
2625  
2626      /**
2627 <     * Returns <tt>true</tt> if this map maps one or more keys to the
2628 <     * specified value. Note: This method requires a full internal
2629 <     * traversal of the hash table, and so is much slower than
940 <     * method <tt>containsKey</tt>.
2627 >     * Returns {@code true} if this map maps one or more keys to the
2628 >     * specified value. Note: This method may require a full traversal
2629 >     * of the map, and is much slower than method {@code containsKey}.
2630       *
2631       * @param value value whose presence in this map is to be tested
2632 <     * @return <tt>true</tt> if this map maps one or more keys to the
2632 >     * @return {@code true} if this map maps one or more keys to the
2633       *         specified value
2634       * @throws NullPointerException if the specified value is null
2635       */
2636      public boolean containsValue(Object value) {
948        // Same idea as size()
2637          if (value == null)
2638              throw new NullPointerException();
2639 <        final Segment<K,V>[] segments = this.segments;
2640 <        boolean found = false;
2641 <        long last = 0L;   // previous sum
2642 <        int retries = -1;
2643 <        try {
2644 <            outer: for (;;) {
2645 <                if (retries++ == RETRIES_BEFORE_LOCK) {
958 <                    for (int j = 0; j < segments.length; ++j)
959 <                        ensureSegment(j).lock(); // force creation
960 <                }
961 <                long sum = 0L;
962 <                for (int j = 0; j < segments.length; ++j) {
963 <                    HashEntry<K,V>[] tab;
964 <                    Segment<K,V> seg = segmentAt(segments, j);
965 <                    if (seg != null && (tab = seg.table) != null) {
966 <                        for (int i = 0 ; i < tab.length; i++) {
967 <                            HashEntry<K,V> e;
968 <                            for (e = entryAt(tab, i); e != null; e = e.next) {
969 <                                V v = e.value;
970 <                                if (v != null && value.equals(v)) {
971 <                                    found = true;
972 <                                    break outer;
973 <                                }
974 <                            }
975 <                        }
976 <                        sum += seg.modCount;
977 <                    }
978 <                }
979 <                if (retries > 0 && sum == last)
980 <                    break;
981 <                last = sum;
982 <            }
983 <        } finally {
984 <            if (retries > RETRIES_BEFORE_LOCK) {
985 <                for (int j = 0; j < segments.length; ++j)
986 <                    segmentAt(segments, j).unlock();
2639 >        Node<K,V>[] t;
2640 >        if ((t = table) != null) {
2641 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
2642 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
2643 >                V v;
2644 >                if ((v = p.val) == value || value.equals(v))
2645 >                    return true;
2646              }
2647          }
2648 <        return found;
2648 >        return false;
2649      }
2650  
2651      /**
2652       * Legacy method testing if some key maps into the specified value
2653       * in this table.  This method is identical in functionality to
2654 <     * {@link #containsValue}, and exists solely to ensure
2654 >     * {@link #containsValue(Object)}, and exists solely to ensure
2655       * full compatibility with class {@link java.util.Hashtable},
2656       * which supported this method prior to introduction of the
2657       * Java Collections framework.
2658       *
2659       * @param  value a value to search for
2660 <     * @return <tt>true</tt> if and only if some key maps to the
2661 <     *         <tt>value</tt> argument in this table as
2662 <     *         determined by the <tt>equals</tt> method;
2663 <     *         <tt>false</tt> otherwise
2660 >     * @return {@code true} if and only if some key maps to the
2661 >     *         {@code value} argument in this table as
2662 >     *         determined by the {@code equals} method;
2663 >     *         {@code false} otherwise
2664       * @throws NullPointerException if the specified value is null
2665       */
2666 <    public boolean contains(Object value) {
2666 >    @Deprecated public boolean contains(Object value) {
2667          return containsValue(value);
2668      }
2669  
# Line 1012 | Line 2671 | public class ConcurrentHashMap<K, V> ext
2671       * Maps the specified key to the specified value in this table.
2672       * Neither the key nor the value can be null.
2673       *
2674 <     * <p> The value can be retrieved by calling the <tt>get</tt> method
2674 >     * <p>The value can be retrieved by calling the {@code get} method
2675       * with a key that is equal to the original key.
2676       *
2677       * @param key key with which the specified value is to be associated
2678       * @param value value to be associated with the specified key
2679 <     * @return the previous value associated with <tt>key</tt>, or
2680 <     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
2679 >     * @return the previous value associated with {@code key}, or
2680 >     *         {@code null} if there was no mapping for {@code key}
2681       * @throws NullPointerException if the specified key or value is null
2682       */
1024    @SuppressWarnings("unchecked")
2683      public V put(K key, V value) {
2684 <        Segment<K,V> s;
1027 <        if (value == null)
1028 <            throw new NullPointerException();
1029 <        int hash = hash(key.hashCode());
1030 <        int j = (hash >>> segmentShift) & segmentMask;
1031 <        if ((s = (Segment<K,V>)UNSAFE.getObject          // nonvolatile; recheck
1032 <             (segments, (j << SSHIFT) + SBASE)) == null) //  in ensureSegment
1033 <            s = ensureSegment(j);
1034 <        return s.put(key, hash, value, false);
2684 >        return internalPut(key, value, false);
2685      }
2686  
2687      /**
2688       * {@inheritDoc}
2689       *
2690       * @return the previous value associated with the specified key,
2691 <     *         or <tt>null</tt> if there was no mapping for the key
2691 >     *         or {@code null} if there was no mapping for the key
2692       * @throws NullPointerException if the specified key or value is null
2693       */
1044    @SuppressWarnings("unchecked")
2694      public V putIfAbsent(K key, V value) {
2695 <        Segment<K,V> s;
1047 <        if (value == null)
1048 <            throw new NullPointerException();
1049 <        int hash = hash(key.hashCode());
1050 <        int j = (hash >>> segmentShift) & segmentMask;
1051 <        if ((s = (Segment<K,V>)UNSAFE.getObject
1052 <             (segments, (j << SSHIFT) + SBASE)) == null)
1053 <            s = ensureSegment(j);
1054 <        return s.put(key, hash, value, true);
2695 >        return internalPut(key, value, true);
2696      }
2697  
2698      /**
# Line 1062 | Line 2703 | public class ConcurrentHashMap<K, V> ext
2703       * @param m mappings to be stored in this map
2704       */
2705      public void putAll(Map<? extends K, ? extends V> m) {
2706 <        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
2707 <            put(e.getKey(), e.getValue());
2706 >        internalPutAll(m);
2707 >    }
2708 >
2709 >    /**
2710 >     * If the specified key is not already associated with a value (or
2711 >     * is mapped to {@code null}), attempts to compute its value using
2712 >     * the given mapping function and enters it into this map unless
2713 >     * {@code null}. The entire method invocation is performed
2714 >     * atomically, so the function is applied at most once per key.
2715 >     * Some attempted update operations on this map by other threads
2716 >     * may be blocked while computation is in progress, so the
2717 >     * computation should be short and simple, and must not attempt to
2718 >     * update any other mappings of this Map.
2719 >     *
2720 >     * @param key key with which the specified value is to be associated
2721 >     * @param mappingFunction the function to compute a value
2722 >     * @return the current (existing or computed) value associated with
2723 >     *         the specified key, or null if the computed value is null
2724 >     * @throws NullPointerException if the specified key or mappingFunction
2725 >     *         is null
2726 >     * @throws IllegalStateException if the computation detectably
2727 >     *         attempts a recursive update to this map that would
2728 >     *         otherwise never complete
2729 >     * @throws RuntimeException or Error if the mappingFunction does so,
2730 >     *         in which case the mapping is left unestablished
2731 >     */
2732 >    public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
2733 >        return internalComputeIfAbsent(key, mappingFunction);
2734 >    }
2735 >
2736 >    /**
2737 >     * If the value for the specified key is present and non-null,
2738 >     * attempts to compute a new mapping given the key and its current
2739 >     * mapped value.  The entire method invocation is performed
2740 >     * atomically.  Some attempted update operations on this map by
2741 >     * other threads may be blocked while computation is in progress,
2742 >     * so the computation should be short and simple, and must not
2743 >     * attempt to update any other mappings of this Map.
2744 >     *
2745 >     * @param key key with which a value may be associated
2746 >     * @param remappingFunction the function to compute a value
2747 >     * @return the new value associated with the specified key, or null if none
2748 >     * @throws NullPointerException if the specified key or remappingFunction
2749 >     *         is null
2750 >     * @throws IllegalStateException if the computation detectably
2751 >     *         attempts a recursive update to this map that would
2752 >     *         otherwise never complete
2753 >     * @throws RuntimeException or Error if the remappingFunction does so,
2754 >     *         in which case the mapping is unchanged
2755 >     */
2756 >    public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2757 >        return internalCompute(key, true, remappingFunction);
2758 >    }
2759 >
2760 >    /**
2761 >     * Attempts to compute a mapping for the specified key and its
2762 >     * current mapped value (or {@code null} if there is no current
2763 >     * mapping). The entire method invocation is performed atomically.
2764 >     * Some attempted update operations on this map by other threads
2765 >     * may be blocked while computation is in progress, so the
2766 >     * computation should be short and simple, and must not attempt to
2767 >     * update any other mappings of this Map.
2768 >     *
2769 >     * @param key key with which the specified value is to be associated
2770 >     * @param remappingFunction the function to compute a value
2771 >     * @return the new value associated with the specified key, or null if none
2772 >     * @throws NullPointerException if the specified key or remappingFunction
2773 >     *         is null
2774 >     * @throws IllegalStateException if the computation detectably
2775 >     *         attempts a recursive update to this map that would
2776 >     *         otherwise never complete
2777 >     * @throws RuntimeException or Error if the remappingFunction does so,
2778 >     *         in which case the mapping is unchanged
2779 >     */
2780 >    public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2781 >        return internalCompute(key, false, remappingFunction);
2782 >    }
2783 >
2784 >    /**
2785 >     * If the specified key is not already associated with a
2786 >     * (non-null) value, associates it with the given value.
2787 >     * Otherwise, replaces the value with the results of the given
2788 >     * remapping function, or removes if {@code null}. The entire
2789 >     * method invocation is performed atomically.  Some attempted
2790 >     * update operations on this map by other threads may be blocked
2791 >     * while computation is in progress, so the computation should be
2792 >     * short and simple, and must not attempt to update any other
2793 >     * mappings of this Map.
2794 >     *
2795 >     * @param key key with which the specified value is to be associated
2796 >     * @param value the value to use if absent
2797 >     * @param remappingFunction the function to recompute a value if present
2798 >     * @return the new value associated with the specified key, or null if none
2799 >     * @throws NullPointerException if the specified key or the
2800 >     *         remappingFunction is null
2801 >     * @throws RuntimeException or Error if the remappingFunction does so,
2802 >     *         in which case the mapping is unchanged
2803 >     */
2804 >    public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2805 >        return internalMerge(key, value, remappingFunction);
2806      }
2807  
2808      /**
# Line 1071 | Line 2810 | public class ConcurrentHashMap<K, V> ext
2810       * This method does nothing if the key is not in the map.
2811       *
2812       * @param  key the key that needs to be removed
2813 <     * @return the previous value associated with <tt>key</tt>, or
2814 <     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
2813 >     * @return the previous value associated with {@code key}, or
2814 >     *         {@code null} if there was no mapping for {@code key}
2815       * @throws NullPointerException if the specified key is null
2816       */
2817      public V remove(Object key) {
2818 <        int hash = hash(key.hashCode());
1080 <        Segment<K,V> s = segmentForHash(hash);
1081 <        return s == null ? null : s.remove(key, hash, null);
2818 >        return internalReplace(key, null, null);
2819      }
2820  
2821      /**
# Line 1087 | Line 2824 | public class ConcurrentHashMap<K, V> ext
2824       * @throws NullPointerException if the specified key is null
2825       */
2826      public boolean remove(Object key, Object value) {
2827 <        int hash = hash(key.hashCode());
2828 <        Segment<K,V> s;
2829 <        return value != null && (s = segmentForHash(hash)) != null &&
1093 <            s.remove(key, hash, value) != null;
2827 >        if (key == null)
2828 >            throw new NullPointerException();
2829 >        return value != null && internalReplace(key, null, value) != null;
2830      }
2831  
2832      /**
# Line 1099 | Line 2835 | public class ConcurrentHashMap<K, V> ext
2835       * @throws NullPointerException if any of the arguments are null
2836       */
2837      public boolean replace(K key, V oldValue, V newValue) {
2838 <        int hash = hash(key.hashCode());
1103 <        if (oldValue == null || newValue == null)
2838 >        if (key == null || oldValue == null || newValue == null)
2839              throw new NullPointerException();
2840 <        Segment<K,V> s = segmentForHash(hash);
1106 <        return s != null && s.replace(key, hash, oldValue, newValue);
2840 >        return internalReplace(key, newValue, oldValue) != null;
2841      }
2842  
2843      /**
2844       * {@inheritDoc}
2845       *
2846       * @return the previous value associated with the specified key,
2847 <     *         or <tt>null</tt> if there was no mapping for the key
2847 >     *         or {@code null} if there was no mapping for the key
2848       * @throws NullPointerException if the specified key or value is null
2849       */
2850      public V replace(K key, V value) {
2851 <        int hash = hash(key.hashCode());
1118 <        if (value == null)
2851 >        if (key == null || value == null)
2852              throw new NullPointerException();
2853 <        Segment<K,V> s = segmentForHash(hash);
1121 <        return s == null ? null : s.replace(key, hash, value);
2853 >        return internalReplace(key, value, null);
2854      }
2855  
2856      /**
2857       * Removes all of the mappings from this map.
2858       */
2859      public void clear() {
2860 <        final Segment<K,V>[] segments = this.segments;
1129 <        for (int j = 0; j < segments.length; ++j) {
1130 <            Segment<K,V> s = segmentAt(segments, j);
1131 <            if (s != null)
1132 <                s.clear();
1133 <        }
2860 >        internalClear();
2861      }
2862  
2863      /**
2864       * Returns a {@link Set} view of the keys contained in this map.
2865       * The set is backed by the map, so changes to the map are
2866 <     * reflected in the set, and vice-versa.  The set supports element
2866 >     * reflected in the set, and vice-versa. The set supports element
2867       * removal, which removes the corresponding mapping from this map,
2868 <     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
2869 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
2870 <     * operations.  It does not support the <tt>add</tt> or
2871 <     * <tt>addAll</tt> operations.
2868 >     * via the {@code Iterator.remove}, {@code Set.remove},
2869 >     * {@code removeAll}, {@code retainAll}, and {@code clear}
2870 >     * operations.  It does not support the {@code add} or
2871 >     * {@code addAll} operations.
2872       *
2873 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
2873 >     * <p>The view's {@code iterator} is a "weakly consistent" iterator
2874       * that will never throw {@link ConcurrentModificationException},
2875       * and guarantees to traverse elements as they existed upon
2876       * construction of the iterator, and may (but is not guaranteed to)
2877       * reflect any modifications subsequent to construction.
2878 +     *
2879 +     * @return the set view
2880       */
2881 <    public Set<K> keySet() {
2882 <        Set<K> ks = keySet;
2883 <        return (ks != null) ? ks : (keySet = new KeySet());
2881 >    public KeySetView<K,V> keySet() {
2882 >        KeySetView<K,V> ks = keySet;
2883 >        return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
2884 >    }
2885 >
2886 >    /**
2887 >     * Returns a {@link Set} view of the keys in this map, using the
2888 >     * given common mapped value for any additions (i.e., {@link
2889 >     * Collection#add} and {@link Collection#addAll(Collection)}).
2890 >     * This is of course only appropriate if it is acceptable to use
2891 >     * the same value for all additions from this view.
2892 >     *
2893 >     * @param mappedValue the mapped value to use for any additions
2894 >     * @return the set view
2895 >     * @throws NullPointerException if the mappedValue is null
2896 >     */
2897 >    public KeySetView<K,V> keySet(V mappedValue) {
2898 >        if (mappedValue == null)
2899 >            throw new NullPointerException();
2900 >        return new KeySetView<K,V>(this, mappedValue);
2901      }
2902  
2903      /**
# Line 1159 | Line 2905 | public class ConcurrentHashMap<K, V> ext
2905       * The collection is backed by the map, so changes to the map are
2906       * reflected in the collection, and vice-versa.  The collection
2907       * supports element removal, which removes the corresponding
2908 <     * mapping from this map, via the <tt>Iterator.remove</tt>,
2909 <     * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
2910 <     * <tt>retainAll</tt>, and <tt>clear</tt> operations.  It does not
2911 <     * support the <tt>add</tt> or <tt>addAll</tt> operations.
2908 >     * mapping from this map, via the {@code Iterator.remove},
2909 >     * {@code Collection.remove}, {@code removeAll},
2910 >     * {@code retainAll}, and {@code clear} operations.  It does not
2911 >     * support the {@code add} or {@code addAll} operations.
2912       *
2913 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
2913 >     * <p>The view's {@code iterator} is a "weakly consistent" iterator
2914       * that will never throw {@link ConcurrentModificationException},
2915       * and guarantees to traverse elements as they existed upon
2916       * construction of the iterator, and may (but is not guaranteed to)
2917       * reflect any modifications subsequent to construction.
2918 +     *
2919 +     * @return the collection view
2920       */
2921      public Collection<V> values() {
2922 <        Collection<V> vs = values;
2923 <        return (vs != null) ? vs : (values = new Values());
2922 >        ValuesView<K,V> vs = values;
2923 >        return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
2924      }
2925  
2926      /**
# Line 1180 | Line 2928 | public class ConcurrentHashMap<K, V> ext
2928       * The set is backed by the map, so changes to the map are
2929       * reflected in the set, and vice-versa.  The set supports element
2930       * removal, which removes the corresponding mapping from the map,
2931 <     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
2932 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
2933 <     * operations.  It does not support the <tt>add</tt> or
1186 <     * <tt>addAll</tt> operations.
2931 >     * via the {@code Iterator.remove}, {@code Set.remove},
2932 >     * {@code removeAll}, {@code retainAll}, and {@code clear}
2933 >     * operations.
2934       *
2935 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
2935 >     * <p>The view's {@code iterator} is a "weakly consistent" iterator
2936       * that will never throw {@link ConcurrentModificationException},
2937       * and guarantees to traverse elements as they existed upon
2938       * construction of the iterator, and may (but is not guaranteed to)
2939       * reflect any modifications subsequent to construction.
2940 +     *
2941 +     * @return the set view
2942       */
2943      public Set<Map.Entry<K,V>> entrySet() {
2944 <        Set<Map.Entry<K,V>> es = entrySet;
2945 <        return (es != null) ? es : (entrySet = new EntrySet());
2944 >        EntrySetView<K,V> es = entrySet;
2945 >        return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
2946      }
2947  
2948      /**
# Line 1203 | Line 2952 | public class ConcurrentHashMap<K, V> ext
2952       * @see #keySet()
2953       */
2954      public Enumeration<K> keys() {
2955 <        return new KeyIterator();
2955 >        Node<K,V>[] t;
2956 >        int f = (t = table) == null ? 0 : t.length;
2957 >        return new KeyIterator<K,V>(t, f, 0, f, this);
2958      }
2959  
2960      /**
# Line 1213 | Line 2964 | public class ConcurrentHashMap<K, V> ext
2964       * @see #values()
2965       */
2966      public Enumeration<V> elements() {
2967 <        return new ValueIterator();
2967 >        Node<K,V>[] t;
2968 >        int f = (t = table) == null ? 0 : t.length;
2969 >        return new ValueIterator<K,V>(t, f, 0, f, this);
2970      }
2971  
2972 <    /* ---------------- Iterator Support -------------- */
2972 >    /**
2973 >     * Returns the hash code value for this {@link Map}, i.e.,
2974 >     * the sum of, for each key-value pair in the map,
2975 >     * {@code key.hashCode() ^ value.hashCode()}.
2976 >     *
2977 >     * @return the hash code value for this map
2978 >     */
2979 >    public int hashCode() {
2980 >        int h = 0;
2981 >        Node<K,V>[] t;
2982 >        if ((t = table) != null) {
2983 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
2984 >            for (Node<K,V> p; (p = it.advance()) != null; )
2985 >                h += p.key.hashCode() ^ p.val.hashCode();
2986 >        }
2987 >        return h;
2988 >    }
2989  
2990 <    abstract class HashIterator {
2991 <        int nextSegmentIndex;
2992 <        int nextTableIndex;
2993 <        HashEntry<K,V>[] currentTable;
2994 <        HashEntry<K, V> nextEntry;
2995 <        HashEntry<K, V> lastReturned;
2990 >    /**
2991 >     * Returns a string representation of this map.  The string
2992 >     * representation consists of a list of key-value mappings (in no
2993 >     * particular order) enclosed in braces ("{@code {}}").  Adjacent
2994 >     * mappings are separated by the characters {@code ", "} (comma
2995 >     * and space).  Each key-value mapping is rendered as the key
2996 >     * followed by an equals sign ("{@code =}") followed by the
2997 >     * associated value.
2998 >     *
2999 >     * @return a string representation of this map
3000 >     */
3001 >    public String toString() {
3002 >        Node<K,V>[] t;
3003 >        int f = (t = table) == null ? 0 : t.length;
3004 >        Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
3005 >        StringBuilder sb = new StringBuilder();
3006 >        sb.append('{');
3007 >        Node<K,V> p;
3008 >        if ((p = it.advance()) != null) {
3009 >            for (;;) {
3010 >                K k = (K)p.key;
3011 >                V v = p.val;
3012 >                sb.append(k == this ? "(this Map)" : k);
3013 >                sb.append('=');
3014 >                sb.append(v == this ? "(this Map)" : v);
3015 >                if ((p = it.advance()) == null)
3016 >                    break;
3017 >                sb.append(',').append(' ');
3018 >            }
3019 >        }
3020 >        return sb.append('}').toString();
3021 >    }
3022  
3023 <        HashIterator() {
3024 <            nextSegmentIndex = segments.length - 1;
3025 <            nextTableIndex = -1;
3026 <            advance();
3023 >    /**
3024 >     * Compares the specified object with this map for equality.
3025 >     * Returns {@code true} if the given object is a map with the same
3026 >     * mappings as this map.  This operation may return misleading
3027 >     * results if either map is concurrently modified during execution
3028 >     * of this method.
3029 >     *
3030 >     * @param o object to be compared for equality with this map
3031 >     * @return {@code true} if the specified object is equal to this map
3032 >     */
3033 >    public boolean equals(Object o) {
3034 >        if (o != this) {
3035 >            if (!(o instanceof Map))
3036 >                return false;
3037 >            Map<?,?> m = (Map<?,?>) o;
3038 >            Node<K,V>[] t;
3039 >            int f = (t = table) == null ? 0 : t.length;
3040 >            Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
3041 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
3042 >                V val = p.val;
3043 >                Object v = m.get(p.key);
3044 >                if (v == null || (v != val && !v.equals(val)))
3045 >                    return false;
3046 >            }
3047 >            for (Map.Entry<?,?> e : m.entrySet()) {
3048 >                Object mk, mv, v;
3049 >                if ((mk = e.getKey()) == null ||
3050 >                    (mv = e.getValue()) == null ||
3051 >                    (v = internalGet(mk)) == null ||
3052 >                    (mv != v && !mv.equals(v)))
3053 >                    return false;
3054 >            }
3055 >        }
3056 >        return true;
3057 >    }
3058 >
3059 >    /* ---------------- Serialization Support -------------- */
3060 >
3061 >    /**
3062 >     * Stripped-down version of helper class used in previous version,
3063 >     * declared for the sake of serialization compatibility
3064 >     */
3065 >    static class Segment<K,V> extends ReentrantLock implements Serializable {
3066 >        private static final long serialVersionUID = 2249069246763182397L;
3067 >        final float loadFactor;
3068 >        Segment(float lf) { this.loadFactor = lf; }
3069 >    }
3070 >
3071 >    /**
3072 >     * Saves the state of the {@code ConcurrentHashMap} instance to a
3073 >     * stream (i.e., serializes it).
3074 >     * @param s the stream
3075 >     * @serialData
3076 >     * the key (Object) and value (Object)
3077 >     * for each key-value mapping, followed by a null pair.
3078 >     * The key-value mappings are emitted in no particular order.
3079 >     */
3080 >    private void writeObject(java.io.ObjectOutputStream s)
3081 >        throws java.io.IOException {
3082 >        // For serialization compatibility
3083 >        // Emulate segment calculation from previous version of this class
3084 >        int sshift = 0;
3085 >        int ssize = 1;
3086 >        while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
3087 >            ++sshift;
3088 >            ssize <<= 1;
3089          }
3090 +        int segmentShift = 32 - sshift;
3091 +        int segmentMask = ssize - 1;
3092 +        Segment<K,V>[] segments = (Segment<K,V>[])
3093 +            new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3094 +        for (int i = 0; i < segments.length; ++i)
3095 +            segments[i] = new Segment<K,V>(LOAD_FACTOR);
3096 +        s.putFields().put("segments", segments);
3097 +        s.putFields().put("segmentShift", segmentShift);
3098 +        s.putFields().put("segmentMask", segmentMask);
3099 +        s.writeFields();
3100 +
3101 +        Node<K,V>[] t;
3102 +        if ((t = table) != null) {
3103 +            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
3104 +            for (Node<K,V> p; (p = it.advance()) != null; ) {
3105 +                s.writeObject(p.key);
3106 +                s.writeObject(p.val);
3107 +            }
3108 +        }
3109 +        s.writeObject(null);
3110 +        s.writeObject(null);
3111 +        segments = null; // throw away
3112 +    }
3113 +
3114 +    /**
3115 +     * Reconstitutes the instance from a stream (that is, deserializes it).
3116 +     * @param s the stream
3117 +     */
3118 +    private void readObject(java.io.ObjectInputStream s)
3119 +        throws java.io.IOException, ClassNotFoundException {
3120 +        s.defaultReadObject();
3121 +
3122 +        // Create all nodes, then place in table once size is known
3123 +        long size = 0L;
3124 +        Node<K,V> p = null;
3125 +        for (;;) {
3126 +            K k = (K) s.readObject();
3127 +            V v = (V) s.readObject();
3128 +            if (k != null && v != null) {
3129 +                int h = spread(k.hashCode());
3130 +                p = new Node<K,V>(h, k, v, p);
3131 +                ++size;
3132 +            }
3133 +            else
3134 +                break;
3135 +        }
3136 +        if (p != null) {
3137 +            boolean init = false;
3138 +            int n;
3139 +            if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3140 +                n = MAXIMUM_CAPACITY;
3141 +            else {
3142 +                int sz = (int)size;
3143 +                n = tableSizeFor(sz + (sz >>> 1) + 1);
3144 +            }
3145 +            int sc = sizeCtl;
3146 +            boolean collide = false;
3147 +            if (n > sc &&
3148 +                U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
3149 +                try {
3150 +                    if (table == null) {
3151 +                        init = true;
3152 +                        Node<K,V>[] tab = (Node<K,V>[])new Node[n];
3153 +                        int mask = n - 1;
3154 +                        while (p != null) {
3155 +                            int j = p.hash & mask;
3156 +                            Node<K,V> next = p.next;
3157 +                            Node<K,V> q = p.next = tabAt(tab, j);
3158 +                            setTabAt(tab, j, p);
3159 +                            if (!collide && q != null && q.hash == p.hash)
3160 +                                collide = true;
3161 +                            p = next;
3162 +                        }
3163 +                        table = tab;
3164 +                        addCount(size, -1);
3165 +                        sc = n - (n >>> 2);
3166 +                    }
3167 +                } finally {
3168 +                    sizeCtl = sc;
3169 +                }
3170 +                if (collide) { // rescan and convert to TreeBins
3171 +                    Node<K,V>[] tab = table;
3172 +                    for (int i = 0; i < tab.length; ++i) {
3173 +                        int c = 0;
3174 +                        for (Node<K,V> e = tabAt(tab, i); e != null; e = e.next) {
3175 +                            if (++c > TREE_THRESHOLD &&
3176 +                                (e.key instanceof Comparable)) {
3177 +                                replaceWithTreeBin(tab, i, e.key);
3178 +                                break;
3179 +                            }
3180 +                        }
3181 +                    }
3182 +                }
3183 +            }
3184 +            if (!init) { // Can only happen if unsafely published.
3185 +                while (p != null) {
3186 +                    internalPut((K)p.key, p.val, false);
3187 +                    p = p.next;
3188 +                }
3189 +            }
3190 +        }
3191 +    }
3192 +
3193 +    // -------------------------------------------------------
3194 +
3195 +    // Overrides of other default Map methods
3196 +
3197 +    public void forEach(BiConsumer<? super K, ? super V> action) {
3198 +        if (action == null) throw new NullPointerException();
3199 +        Node<K,V>[] t;
3200 +        if ((t = table) != null) {
3201 +            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
3202 +            for (Node<K,V> p; (p = it.advance()) != null; ) {
3203 +                action.accept((K)p.key, p.val);
3204 +            }
3205 +        }
3206 +    }
3207 +
3208 +    public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
3209 +        if (function == null) throw new NullPointerException();
3210 +        Node<K,V>[] t;
3211 +        if ((t = table) != null) {
3212 +            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
3213 +            for (Node<K,V> p; (p = it.advance()) != null; ) {
3214 +                K k = (K)p.key;
3215 +                internalPut(k, function.apply(k, p.val), false);
3216 +            }
3217 +        }
3218 +    }
3219 +
3220 +    // -------------------------------------------------------
3221 +
3222 +    // Parallel bulk operations
3223 +
3224 +    /**
3225 +     * Computes initial batch value for bulk tasks. The returned value
3226 +     * is approximately exp2 of the number of times (minus one) to
3227 +     * split task by two before executing leaf action. This value is
3228 +     * faster to compute and more convenient to use as a guide to
3229 +     * splitting than is the depth, since it is used while dividing by
3230 +     * two anyway.
3231 +     */
3232 +    final int batchFor(long b) {
3233 +        long n;
3234 +        if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3235 +            return 0;
3236 +        int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3237 +        return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3238 +    }
3239 +
3240 +    /**
3241 +     * Performs the given action for each (key, value).
3242 +     *
3243 +     * @param parallelismThreshold the (estimated) number of elements
3244 +     * needed for this operation to be executed in parallel
3245 +     * @param action the action
3246 +     */
3247 +    public void forEach(long parallelismThreshold,
3248 +                        BiConsumer<? super K,? super V> action) {
3249 +        if (action == null) throw new NullPointerException();
3250 +        new ForEachMappingTask<K,V>
3251 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3252 +             action).invoke();
3253 +    }
3254 +
3255 +    /**
3256 +     * Performs the given action for each non-null transformation
3257 +     * of each (key, value).
3258 +     *
3259 +     * @param parallelismThreshold the (estimated) number of elements
3260 +     * needed for this operation to be executed in parallel
3261 +     * @param transformer a function returning the transformation
3262 +     * for an element, or null if there is no transformation (in
3263 +     * which case the action is not applied)
3264 +     * @param action the action
3265 +     */
3266 +    public <U> void forEach(long parallelismThreshold,
3267 +                            BiFunction<? super K, ? super V, ? extends U> transformer,
3268 +                            Consumer<? super U> action) {
3269 +        if (transformer == null || action == null)
3270 +            throw new NullPointerException();
3271 +        new ForEachTransformedMappingTask<K,V,U>
3272 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3273 +             transformer, action).invoke();
3274 +    }
3275 +
3276 +    /**
3277 +     * Returns a non-null result from applying the given search
3278 +     * function on each (key, value), or null if none.  Upon
3279 +     * success, further element processing is suppressed and the
3280 +     * results of any other parallel invocations of the search
3281 +     * function are ignored.
3282 +     *
3283 +     * @param parallelismThreshold the (estimated) number of elements
3284 +     * needed for this operation to be executed in parallel
3285 +     * @param searchFunction a function returning a non-null
3286 +     * result on success, else null
3287 +     * @return a non-null result from applying the given search
3288 +     * function on each (key, value), or null if none
3289 +     */
3290 +    public <U> U search(long parallelismThreshold,
3291 +                        BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3292 +        if (searchFunction == null) throw new NullPointerException();
3293 +        return new SearchMappingsTask<K,V,U>
3294 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3295 +             searchFunction, new AtomicReference<U>()).invoke();
3296 +    }
3297 +
3298 +    /**
3299 +     * Returns the result of accumulating the given transformation
3300 +     * of all (key, value) pairs using the given reducer to
3301 +     * combine values, or null if none.
3302 +     *
3303 +     * @param parallelismThreshold the (estimated) number of elements
3304 +     * needed for this operation to be executed in parallel
3305 +     * @param transformer a function returning the transformation
3306 +     * for an element, or null if there is no transformation (in
3307 +     * which case it is not combined)
3308 +     * @param reducer a commutative associative combining function
3309 +     * @return the result of accumulating the given transformation
3310 +     * of all (key, value) pairs
3311 +     */
3312 +    public <U> U reduce(long parallelismThreshold,
3313 +                        BiFunction<? super K, ? super V, ? extends U> transformer,
3314 +                        BiFunction<? super U, ? super U, ? extends U> reducer) {
3315 +        if (transformer == null || reducer == null)
3316 +            throw new NullPointerException();
3317 +        return new MapReduceMappingsTask<K,V,U>
3318 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3319 +             null, transformer, reducer).invoke();
3320 +    }
3321 +
3322 +    /**
3323 +     * Returns the result of accumulating the given transformation
3324 +     * of all (key, value) pairs using the given reducer to
3325 +     * combine values, and the given basis as an identity value.
3326 +     *
3327 +     * @param parallelismThreshold the (estimated) number of elements
3328 +     * needed for this operation to be executed in parallel
3329 +     * @param transformer a function returning the transformation
3330 +     * for an element
3331 +     * @param basis the identity (initial default value) for the reduction
3332 +     * @param reducer a commutative associative combining function
3333 +     * @return the result of accumulating the given transformation
3334 +     * of all (key, value) pairs
3335 +     */
3336 +    public double reduceToDoubleIn(long parallelismThreshold,
3337 +                                   ToDoubleBiFunction<? super K, ? super V> transformer,
3338 +                                   double basis,
3339 +                                   DoubleBinaryOperator reducer) {
3340 +        if (transformer == null || reducer == null)
3341 +            throw new NullPointerException();
3342 +        return new MapReduceMappingsToDoubleTask<K,V>
3343 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3344 +             null, transformer, basis, reducer).invoke();
3345 +    }
3346 +
3347 +    /**
3348 +     * Returns the result of accumulating the given transformation
3349 +     * of all (key, value) pairs using the given reducer to
3350 +     * combine values, and the given basis as an identity value.
3351 +     *
3352 +     * @param parallelismThreshold the (estimated) number of elements
3353 +     * needed for this operation to be executed in parallel
3354 +     * @param transformer a function returning the transformation
3355 +     * for an element
3356 +     * @param basis the identity (initial default value) for the reduction
3357 +     * @param reducer a commutative associative combining function
3358 +     * @return the result of accumulating the given transformation
3359 +     * of all (key, value) pairs
3360 +     */
3361 +    public long reduceToLong(long parallelismThreshold,
3362 +                             ToLongBiFunction<? super K, ? super V> transformer,
3363 +                             long basis,
3364 +                             LongBinaryOperator reducer) {
3365 +        if (transformer == null || reducer == null)
3366 +            throw new NullPointerException();
3367 +        return new MapReduceMappingsToLongTask<K,V>
3368 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3369 +             null, transformer, basis, reducer).invoke();
3370 +    }
3371 +
3372 +    /**
3373 +     * Returns the result of accumulating the given transformation
3374 +     * of all (key, value) pairs using the given reducer to
3375 +     * combine values, and the given basis as an identity value.
3376 +     *
3377 +     * @param parallelismThreshold the (estimated) number of elements
3378 +     * needed for this operation to be executed in parallel
3379 +     * @param transformer a function returning the transformation
3380 +     * for an element
3381 +     * @param basis the identity (initial default value) for the reduction
3382 +     * @param reducer a commutative associative combining function
3383 +     * @return the result of accumulating the given transformation
3384 +     * of all (key, value) pairs
3385 +     */
3386 +    public int reduceToInt(long parallelismThreshold,
3387 +                           ToIntBiFunction<? super K, ? super V> transformer,
3388 +                           int basis,
3389 +                           IntBinaryOperator reducer) {
3390 +        if (transformer == null || reducer == null)
3391 +            throw new NullPointerException();
3392 +        return new MapReduceMappingsToIntTask<K,V>
3393 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3394 +             null, transformer, basis, reducer).invoke();
3395 +    }
3396 +
3397 +    /**
3398 +     * Performs the given action for each key.
3399 +     *
3400 +     * @param parallelismThreshold the (estimated) number of elements
3401 +     * needed for this operation to be executed in parallel
3402 +     * @param action the action
3403 +     */
3404 +    public void forEachKey(long parallelismThreshold,
3405 +                           Consumer<? super K> action) {
3406 +        if (action == null) throw new NullPointerException();
3407 +        new ForEachKeyTask<K,V>
3408 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3409 +             action).invoke();
3410 +    }
3411 +
3412 +    /**
3413 +     * Performs the given action for each non-null transformation
3414 +     * of each key.
3415 +     *
3416 +     * @param parallelismThreshold the (estimated) number of elements
3417 +     * needed for this operation to be executed in parallel
3418 +     * @param transformer a function returning the transformation
3419 +     * for an element, or null if there is no transformation (in
3420 +     * which case the action is not applied)
3421 +     * @param action the action
3422 +     */
3423 +    public <U> void forEachKey(long parallelismThreshold,
3424 +                               Function<? super K, ? extends U> transformer,
3425 +                               Consumer<? super U> action) {
3426 +        if (transformer == null || action == null)
3427 +            throw new NullPointerException();
3428 +        new ForEachTransformedKeyTask<K,V,U>
3429 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3430 +             transformer, action).invoke();
3431 +    }
3432 +
3433 +    /**
3434 +     * Returns a non-null result from applying the given search
3435 +     * function on each key, or null if none. Upon success,
3436 +     * further element processing is suppressed and the results of
3437 +     * any other parallel invocations of the search function are
3438 +     * ignored.
3439 +     *
3440 +     * @param parallelismThreshold the (estimated) number of elements
3441 +     * needed for this operation to be executed in parallel
3442 +     * @param searchFunction a function returning a non-null
3443 +     * result on success, else null
3444 +     * @return a non-null result from applying the given search
3445 +     * function on each key, or null if none
3446 +     */
3447 +    public <U> U searchKeys(long parallelismThreshold,
3448 +                            Function<? super K, ? extends U> searchFunction) {
3449 +        if (searchFunction == null) throw new NullPointerException();
3450 +        return new SearchKeysTask<K,V,U>
3451 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3452 +             searchFunction, new AtomicReference<U>()).invoke();
3453 +    }
3454 +
3455 +    /**
3456 +     * Returns the result of accumulating all keys using the given
3457 +     * reducer to combine values, or null if none.
3458 +     *
3459 +     * @param parallelismThreshold the (estimated) number of elements
3460 +     * needed for this operation to be executed in parallel
3461 +     * @param reducer a commutative associative combining function
3462 +     * @return the result of accumulating all keys using the given
3463 +     * reducer to combine values, or null if none
3464 +     */
3465 +    public K reduceKeys(long parallelismThreshold,
3466 +                        BiFunction<? super K, ? super K, ? extends K> reducer) {
3467 +        if (reducer == null) throw new NullPointerException();
3468 +        return new ReduceKeysTask<K,V>
3469 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3470 +             null, reducer).invoke();
3471 +    }
3472 +
3473 +    /**
3474 +     * Returns the result of accumulating the given transformation
3475 +     * of all keys using the given reducer to combine values, or
3476 +     * null if none.
3477 +     *
3478 +     * @param parallelismThreshold the (estimated) number of elements
3479 +     * needed for this operation to be executed in parallel
3480 +     * @param transformer a function returning the transformation
3481 +     * for an element, or null if there is no transformation (in
3482 +     * which case it is not combined)
3483 +     * @param reducer a commutative associative combining function
3484 +     * @return the result of accumulating the given transformation
3485 +     * of all keys
3486 +     */
3487 +    public <U> U reduceKeys(long parallelismThreshold,
3488 +                            Function<? super K, ? extends U> transformer,
3489 +         BiFunction<? super U, ? super U, ? extends U> reducer) {
3490 +        if (transformer == null || reducer == null)
3491 +            throw new NullPointerException();
3492 +        return new MapReduceKeysTask<K,V,U>
3493 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3494 +             null, transformer, reducer).invoke();
3495 +    }
3496 +
3497 +    /**
3498 +     * Returns the result of accumulating the given transformation
3499 +     * of all keys using the given reducer to combine values, and
3500 +     * the given basis as an identity value.
3501 +     *
3502 +     * @param parallelismThreshold the (estimated) number of elements
3503 +     * needed for this operation to be executed in parallel
3504 +     * @param transformer a function returning the transformation
3505 +     * for an element
3506 +     * @param basis the identity (initial default value) for the reduction
3507 +     * @param reducer a commutative associative combining function
3508 +     * @return the result of accumulating the given transformation
3509 +     * of all keys
3510 +     */
3511 +    public double reduceKeysToDouble(long parallelismThreshold,
3512 +                                     ToDoubleFunction<? super K> transformer,
3513 +                                     double basis,
3514 +                                     DoubleBinaryOperator reducer) {
3515 +        if (transformer == null || reducer == null)
3516 +            throw new NullPointerException();
3517 +        return new MapReduceKeysToDoubleTask<K,V>
3518 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3519 +             null, transformer, basis, reducer).invoke();
3520 +    }
3521 +
3522 +    /**
3523 +     * Returns the result of accumulating the given transformation
3524 +     * of all keys using the given reducer to combine values, and
3525 +     * the given basis as an identity value.
3526 +     *
3527 +     * @param parallelismThreshold the (estimated) number of elements
3528 +     * needed for this operation to be executed in parallel
3529 +     * @param transformer a function returning the transformation
3530 +     * for an element
3531 +     * @param basis the identity (initial default value) for the reduction
3532 +     * @param reducer a commutative associative combining function
3533 +     * @return the result of accumulating the given transformation
3534 +     * of all keys
3535 +     */
3536 +    public long reduceKeysToLong(long parallelismThreshold,
3537 +                                 ToLongFunction<? super K> transformer,
3538 +                                 long basis,
3539 +                                 LongBinaryOperator reducer) {
3540 +        if (transformer == null || reducer == null)
3541 +            throw new NullPointerException();
3542 +        return new MapReduceKeysToLongTask<K,V>
3543 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3544 +             null, transformer, basis, reducer).invoke();
3545 +    }
3546 +
3547 +    /**
3548 +     * Returns the result of accumulating the given transformation
3549 +     * of all keys using the given reducer to combine values, and
3550 +     * the given basis as an identity value.
3551 +     *
3552 +     * @param parallelismThreshold the (estimated) number of elements
3553 +     * needed for this operation to be executed in parallel
3554 +     * @param transformer a function returning the transformation
3555 +     * for an element
3556 +     * @param basis the identity (initial default value) for the reduction
3557 +     * @param reducer a commutative associative combining function
3558 +     * @return the result of accumulating the given transformation
3559 +     * of all keys
3560 +     */
3561 +    public int reduceKeysToInt(long parallelismThreshold,
3562 +                               ToIntFunction<? super K> transformer,
3563 +                               int basis,
3564 +                               IntBinaryOperator reducer) {
3565 +        if (transformer == null || reducer == null)
3566 +            throw new NullPointerException();
3567 +        return new MapReduceKeysToIntTask<K,V>
3568 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3569 +             null, transformer, basis, reducer).invoke();
3570 +    }
3571 +
3572 +    /**
3573 +     * Performs the given action for each value.
3574 +     *
3575 +     * @param parallelismThreshold the (estimated) number of elements
3576 +     * needed for this operation to be executed in parallel
3577 +     * @param action the action
3578 +     */
3579 +    public void forEachValue(long parallelismThreshold,
3580 +                             Consumer<? super V> action) {
3581 +        if (action == null)
3582 +            throw new NullPointerException();
3583 +        new ForEachValueTask<K,V>
3584 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3585 +             action).invoke();
3586 +    }
3587 +
3588 +    /**
3589 +     * Performs the given action for each non-null transformation
3590 +     * of each value.
3591 +     *
3592 +     * @param parallelismThreshold the (estimated) number of elements
3593 +     * needed for this operation to be executed in parallel
3594 +     * @param transformer a function returning the transformation
3595 +     * for an element, or null if there is no transformation (in
3596 +     * which case the action is not applied)
3597 +     * @param action the action
3598 +     */
3599 +    public <U> void forEachValue(long parallelismThreshold,
3600 +                                 Function<? super V, ? extends U> transformer,
3601 +                                 Consumer<? super U> action) {
3602 +        if (transformer == null || action == null)
3603 +            throw new NullPointerException();
3604 +        new ForEachTransformedValueTask<K,V,U>
3605 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3606 +             transformer, action).invoke();
3607 +    }
3608 +
3609 +    /**
3610 +     * Returns a non-null result from applying the given search
3611 +     * function on each value, or null if none.  Upon success,
3612 +     * further element processing is suppressed and the results of
3613 +     * any other parallel invocations of the search function are
3614 +     * ignored.
3615 +     *
3616 +     * @param parallelismThreshold the (estimated) number of elements
3617 +     * needed for this operation to be executed in parallel
3618 +     * @param searchFunction a function returning a non-null
3619 +     * result on success, else null
3620 +     * @return a non-null result from applying the given search
3621 +     * function on each value, or null if none
3622 +     */
3623 +    public <U> U searchValues(long parallelismThreshold,
3624 +                              Function<? super V, ? extends U> searchFunction) {
3625 +        if (searchFunction == null) throw new NullPointerException();
3626 +        return new SearchValuesTask<K,V,U>
3627 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3628 +             searchFunction, new AtomicReference<U>()).invoke();
3629 +    }
3630 +
3631 +    /**
3632 +     * Returns the result of accumulating all values using the
3633 +     * given reducer to combine values, or null if none.
3634 +     *
3635 +     * @param parallelismThreshold the (estimated) number of elements
3636 +     * needed for this operation to be executed in parallel
3637 +     * @param reducer a commutative associative combining function
3638 +     * @return the result of accumulating all values
3639 +     */
3640 +    public V reduceValues(long parallelismThreshold,
3641 +                          BiFunction<? super V, ? super V, ? extends V> reducer) {
3642 +        if (reducer == null) throw new NullPointerException();
3643 +        return new ReduceValuesTask<K,V>
3644 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3645 +             null, reducer).invoke();
3646 +    }
3647 +
3648 +    /**
3649 +     * Returns the result of accumulating the given transformation
3650 +     * of all values using the given reducer to combine values, or
3651 +     * null if none.
3652 +     *
3653 +     * @param parallelismThreshold the (estimated) number of elements
3654 +     * needed for this operation to be executed in parallel
3655 +     * @param transformer a function returning the transformation
3656 +     * for an element, or null if there is no transformation (in
3657 +     * which case it is not combined)
3658 +     * @param reducer a commutative associative combining function
3659 +     * @return the result of accumulating the given transformation
3660 +     * of all values
3661 +     */
3662 +    public <U> U reduceValues(long parallelismThreshold,
3663 +                              Function<? super V, ? extends U> transformer,
3664 +                              BiFunction<? super U, ? super U, ? extends U> reducer) {
3665 +        if (transformer == null || reducer == null)
3666 +            throw new NullPointerException();
3667 +        return new MapReduceValuesTask<K,V,U>
3668 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3669 +             null, transformer, reducer).invoke();
3670 +    }
3671 +
3672 +    /**
3673 +     * Returns the result of accumulating the given transformation
3674 +     * of all values using the given reducer to combine values,
3675 +     * and the given basis as an identity value.
3676 +     *
3677 +     * @param parallelismThreshold the (estimated) number of elements
3678 +     * needed for this operation to be executed in parallel
3679 +     * @param transformer a function returning the transformation
3680 +     * for an element
3681 +     * @param basis the identity (initial default value) for the reduction
3682 +     * @param reducer a commutative associative combining function
3683 +     * @return the result of accumulating the given transformation
3684 +     * of all values
3685 +     */
3686 +    public double reduceValuesToDouble(long parallelismThreshold,
3687 +                                       ToDoubleFunction<? super V> transformer,
3688 +                                       double basis,
3689 +                                       DoubleBinaryOperator reducer) {
3690 +        if (transformer == null || reducer == null)
3691 +            throw new NullPointerException();
3692 +        return new MapReduceValuesToDoubleTask<K,V>
3693 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3694 +             null, transformer, basis, reducer).invoke();
3695 +    }
3696 +
3697 +    /**
3698 +     * Returns the result of accumulating the given transformation
3699 +     * of all values using the given reducer to combine values,
3700 +     * and the given basis as an identity value.
3701 +     *
3702 +     * @param parallelismThreshold the (estimated) number of elements
3703 +     * needed for this operation to be executed in parallel
3704 +     * @param transformer a function returning the transformation
3705 +     * for an element
3706 +     * @param basis the identity (initial default value) for the reduction
3707 +     * @param reducer a commutative associative combining function
3708 +     * @return the result of accumulating the given transformation
3709 +     * of all values
3710 +     */
3711 +    public long reduceValuesToLong(long parallelismThreshold,
3712 +                                   ToLongFunction<? super V> transformer,
3713 +                                   long basis,
3714 +                                   LongBinaryOperator reducer) {
3715 +        if (transformer == null || reducer == null)
3716 +            throw new NullPointerException();
3717 +        return new MapReduceValuesToLongTask<K,V>
3718 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3719 +             null, transformer, basis, reducer).invoke();
3720 +    }
3721 +
3722 +    /**
3723 +     * Returns the result of accumulating the given transformation
3724 +     * of all values using the given reducer to combine values,
3725 +     * and the given basis as an identity value.
3726 +     *
3727 +     * @param parallelismThreshold the (estimated) number of elements
3728 +     * needed for this operation to be executed in parallel
3729 +     * @param transformer a function returning the transformation
3730 +     * for an element
3731 +     * @param basis the identity (initial default value) for the reduction
3732 +     * @param reducer a commutative associative combining function
3733 +     * @return the result of accumulating the given transformation
3734 +     * of all values
3735 +     */
3736 +    public int reduceValuesToInt(long parallelismThreshold,
3737 +                                 ToIntFunction<? super V> transformer,
3738 +                                 int basis,
3739 +                                 IntBinaryOperator reducer) {
3740 +        if (transformer == null || reducer == null)
3741 +            throw new NullPointerException();
3742 +        return new MapReduceValuesToIntTask<K,V>
3743 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3744 +             null, transformer, basis, reducer).invoke();
3745 +    }
3746 +
3747 +    /**
3748 +     * Performs the given action for each entry.
3749 +     *
3750 +     * @param parallelismThreshold the (estimated) number of elements
3751 +     * needed for this operation to be executed in parallel
3752 +     * @param action the action
3753 +     */
3754 +    public void forEachEntry(long parallelismThreshold,
3755 +                             Consumer<? super Map.Entry<K,V>> action) {
3756 +        if (action == null) throw new NullPointerException();
3757 +        new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
3758 +                                  action).invoke();
3759 +    }
3760 +
3761 +    /**
3762 +     * Performs the given action for each non-null transformation
3763 +     * of each entry.
3764 +     *
3765 +     * @param parallelismThreshold the (estimated) number of elements
3766 +     * needed for this operation to be executed in parallel
3767 +     * @param transformer a function returning the transformation
3768 +     * for an element, or null if there is no transformation (in
3769 +     * which case the action is not applied)
3770 +     * @param action the action
3771 +     */
3772 +    public <U> void forEachEntry(long parallelismThreshold,
3773 +                                 Function<Map.Entry<K,V>, ? extends U> transformer,
3774 +                                 Consumer<? super U> action) {
3775 +        if (transformer == null || action == null)
3776 +            throw new NullPointerException();
3777 +        new ForEachTransformedEntryTask<K,V,U>
3778 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3779 +             transformer, action).invoke();
3780 +    }
3781 +
3782 +    /**
3783 +     * Returns a non-null result from applying the given search
3784 +     * function on each entry, or null if none.  Upon success,
3785 +     * further element processing is suppressed and the results of
3786 +     * any other parallel invocations of the search function are
3787 +     * ignored.
3788 +     *
3789 +     * @param parallelismThreshold the (estimated) number of elements
3790 +     * needed for this operation to be executed in parallel
3791 +     * @param searchFunction a function returning a non-null
3792 +     * result on success, else null
3793 +     * @return a non-null result from applying the given search
3794 +     * function on each entry, or null if none
3795 +     */
3796 +    public <U> U searchEntries(long parallelismThreshold,
3797 +                               Function<Map.Entry<K,V>, ? extends U> searchFunction) {
3798 +        if (searchFunction == null) throw new NullPointerException();
3799 +        return new SearchEntriesTask<K,V,U>
3800 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3801 +             searchFunction, new AtomicReference<U>()).invoke();
3802 +    }
3803 +
3804 +    /**
3805 +     * Returns the result of accumulating all entries using the
3806 +     * given reducer to combine values, or null if none.
3807 +     *
3808 +     * @param parallelismThreshold the (estimated) number of elements
3809 +     * needed for this operation to be executed in parallel
3810 +     * @param reducer a commutative associative combining function
3811 +     * @return the result of accumulating all entries
3812 +     */
3813 +    public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
3814 +                                        BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
3815 +        if (reducer == null) throw new NullPointerException();
3816 +        return new ReduceEntriesTask<K,V>
3817 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3818 +             null, reducer).invoke();
3819 +    }
3820 +
3821 +    /**
3822 +     * Returns the result of accumulating the given transformation
3823 +     * of all entries using the given reducer to combine values,
3824 +     * or null if none.
3825 +     *
3826 +     * @param parallelismThreshold the (estimated) number of elements
3827 +     * needed for this operation to be executed in parallel
3828 +     * @param transformer a function returning the transformation
3829 +     * for an element, or null if there is no transformation (in
3830 +     * which case it is not combined)
3831 +     * @param reducer a commutative associative combining function
3832 +     * @return the result of accumulating the given transformation
3833 +     * of all entries
3834 +     */
3835 +    public <U> U reduceEntries(long parallelismThreshold,
3836 +                               Function<Map.Entry<K,V>, ? extends U> transformer,
3837 +                               BiFunction<? super U, ? super U, ? extends U> reducer) {
3838 +        if (transformer == null || reducer == null)
3839 +            throw new NullPointerException();
3840 +        return new MapReduceEntriesTask<K,V,U>
3841 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3842 +             null, transformer, reducer).invoke();
3843 +    }
3844 +
3845 +    /**
3846 +     * Returns the result of accumulating the given transformation
3847 +     * of all entries using the given reducer to combine values,
3848 +     * and the given basis as an identity value.
3849 +     *
3850 +     * @param parallelismThreshold the (estimated) number of elements
3851 +     * needed for this operation to be executed in parallel
3852 +     * @param transformer a function returning the transformation
3853 +     * for an element
3854 +     * @param basis the identity (initial default value) for the reduction
3855 +     * @param reducer a commutative associative combining function
3856 +     * @return the result of accumulating the given transformation
3857 +     * of all entries
3858 +     */
3859 +    public double reduceEntriesToDouble(long parallelismThreshold,
3860 +                                        ToDoubleFunction<Map.Entry<K,V>> transformer,
3861 +                                        double basis,
3862 +                                        DoubleBinaryOperator reducer) {
3863 +        if (transformer == null || reducer == null)
3864 +            throw new NullPointerException();
3865 +        return new MapReduceEntriesToDoubleTask<K,V>
3866 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3867 +             null, transformer, basis, reducer).invoke();
3868 +    }
3869 +
3870 +    /**
3871 +     * Returns the result of accumulating the given transformation
3872 +     * of all entries using the given reducer to combine values,
3873 +     * and the given basis as an identity value.
3874 +     *
3875 +     * @param parallelismThreshold the (estimated) number of elements
3876 +     * needed for this operation to be executed in parallel
3877 +     * @param transformer a function returning the transformation
3878 +     * for an element
3879 +     * @param basis the identity (initial default value) for the reduction
3880 +     * @param reducer a commutative associative combining function
3881 +     * @return the result of accumulating the given transformation
3882 +     * of all entries
3883 +     */
3884 +    public long reduceEntriesToLong(long parallelismThreshold,
3885 +                                    ToLongFunction<Map.Entry<K,V>> transformer,
3886 +                                    long basis,
3887 +                                    LongBinaryOperator reducer) {
3888 +        if (transformer == null || reducer == null)
3889 +            throw new NullPointerException();
3890 +        return new MapReduceEntriesToLongTask<K,V>
3891 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3892 +             null, transformer, basis, reducer).invoke();
3893 +    }
3894 +
3895 +    /**
3896 +     * Returns the result of accumulating the given transformation
3897 +     * of all entries using the given reducer to combine values,
3898 +     * and the given basis as an identity value.
3899 +     *
3900 +     * @param parallelismThreshold the (estimated) number of elements
3901 +     * needed for this operation to be executed in parallel
3902 +     * @param transformer a function returning the transformation
3903 +     * for an element
3904 +     * @param basis the identity (initial default value) for the reduction
3905 +     * @param reducer a commutative associative combining function
3906 +     * @return the result of accumulating the given transformation
3907 +     * of all entries
3908 +     */
3909 +    public int reduceEntriesToInt(long parallelismThreshold,
3910 +                                  ToIntFunction<Map.Entry<K,V>> transformer,
3911 +                                  int basis,
3912 +                                  IntBinaryOperator reducer) {
3913 +        if (transformer == null || reducer == null)
3914 +            throw new NullPointerException();
3915 +        return new MapReduceEntriesToIntTask<K,V>
3916 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3917 +             null, transformer, basis, reducer).invoke();
3918 +    }
3919 +
3920 +
3921 +    /* ----------------Views -------------- */
3922 +
3923 +    /**
3924 +     * Base class for views.
3925 +     */
3926 +    abstract static class CollectionView<K,V,E>
3927 +        implements Collection<E>, java.io.Serializable {
3928 +        private static final long serialVersionUID = 7249069246763182397L;
3929 +        final ConcurrentHashMap<K,V> map;
3930 +        CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
3931 +
3932 +        /**
3933 +         * Returns the map backing this view.
3934 +         *
3935 +         * @return the map backing this view
3936 +         */
3937 +        public ConcurrentHashMap<K,V> getMap() { return map; }
3938  
3939          /**
3940 <         * Sets nextEntry to first node of next non-empty table
3941 <         * (in backwards order, to simplify checks).
3940 >         * Removes all of the elements from this view, by removing all
3941 >         * the mappings from the map backing this view.
3942           */
3943 <        final void advance() {
3944 <            for (;;) {
3945 <                if (nextTableIndex >= 0) {
3946 <                    if ((nextEntry = entryAt(currentTable,
3947 <                                             nextTableIndex--)) != null)
3943 >        public final void clear()      { map.clear(); }
3944 >        public final int size()        { return map.size(); }
3945 >        public final boolean isEmpty() { return map.isEmpty(); }
3946 >
3947 >        // implementations below rely on concrete classes supplying these
3948 >        // abstract methods
3949 >        /**
3950 >         * Returns a "weakly consistent" iterator that will never
3951 >         * throw {@link ConcurrentModificationException}, and
3952 >         * guarantees to traverse elements as they existed upon
3953 >         * construction of the iterator, and may (but is not
3954 >         * guaranteed to) reflect any modifications subsequent to
3955 >         * construction.
3956 >         */
3957 >        public abstract Iterator<E> iterator();
3958 >        public abstract boolean contains(Object o);
3959 >        public abstract boolean remove(Object o);
3960 >
3961 >        private static final String oomeMsg = "Required array size too large";
3962 >
3963 >        public final Object[] toArray() {
3964 >            long sz = map.mappingCount();
3965 >            if (sz > MAX_ARRAY_SIZE)
3966 >                throw new OutOfMemoryError(oomeMsg);
3967 >            int n = (int)sz;
3968 >            Object[] r = new Object[n];
3969 >            int i = 0;
3970 >            for (E e : this) {
3971 >                if (i == n) {
3972 >                    if (n >= MAX_ARRAY_SIZE)
3973 >                        throw new OutOfMemoryError(oomeMsg);
3974 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3975 >                        n = MAX_ARRAY_SIZE;
3976 >                    else
3977 >                        n += (n >>> 1) + 1;
3978 >                    r = Arrays.copyOf(r, n);
3979 >                }
3980 >                r[i++] = e;
3981 >            }
3982 >            return (i == n) ? r : Arrays.copyOf(r, i);
3983 >        }
3984 >
3985 >        public final <T> T[] toArray(T[] a) {
3986 >            long sz = map.mappingCount();
3987 >            if (sz > MAX_ARRAY_SIZE)
3988 >                throw new OutOfMemoryError(oomeMsg);
3989 >            int m = (int)sz;
3990 >            T[] r = (a.length >= m) ? a :
3991 >                (T[])java.lang.reflect.Array
3992 >                .newInstance(a.getClass().getComponentType(), m);
3993 >            int n = r.length;
3994 >            int i = 0;
3995 >            for (E e : this) {
3996 >                if (i == n) {
3997 >                    if (n >= MAX_ARRAY_SIZE)
3998 >                        throw new OutOfMemoryError(oomeMsg);
3999 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4000 >                        n = MAX_ARRAY_SIZE;
4001 >                    else
4002 >                        n += (n >>> 1) + 1;
4003 >                    r = Arrays.copyOf(r, n);
4004 >                }
4005 >                r[i++] = (T)e;
4006 >            }
4007 >            if (a == r && i < n) {
4008 >                r[i] = null; // null-terminate
4009 >                return r;
4010 >            }
4011 >            return (i == n) ? r : Arrays.copyOf(r, i);
4012 >        }
4013 >
4014 >        /**
4015 >         * Returns a string representation of this collection.
4016 >         * The string representation consists of the string representations
4017 >         * of the collection's elements in the order they are returned by
4018 >         * its iterator, enclosed in square brackets ({@code "[]"}).
4019 >         * Adjacent elements are separated by the characters {@code ", "}
4020 >         * (comma and space).  Elements are converted to strings as by
4021 >         * {@link String#valueOf(Object)}.
4022 >         *
4023 >         * @return a string representation of this collection
4024 >         */
4025 >        public final String toString() {
4026 >            StringBuilder sb = new StringBuilder();
4027 >            sb.append('[');
4028 >            Iterator<E> it = iterator();
4029 >            if (it.hasNext()) {
4030 >                for (;;) {
4031 >                    Object e = it.next();
4032 >                    sb.append(e == this ? "(this Collection)" : e);
4033 >                    if (!it.hasNext())
4034                          break;
4035 +                    sb.append(',').append(' ');
4036                  }
4037 <                else if (nextSegmentIndex >= 0) {
4038 <                    Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
4039 <                    if (seg != null && (currentTable = seg.table) != null)
4040 <                        nextTableIndex = currentTable.length - 1;
4037 >            }
4038 >            return sb.append(']').toString();
4039 >        }
4040 >
4041 >        public final boolean containsAll(Collection<?> c) {
4042 >            if (c != this) {
4043 >                for (Object e : c) {
4044 >                    if (e == null || !contains(e))
4045 >                        return false;
4046                  }
1250                else
1251                    break;
4047              }
4048 +            return true;
4049          }
4050  
4051 <        final HashEntry<K,V> nextEntry() {
4052 <            HashEntry<K,V> e = nextEntry;
4053 <            if (e == null)
4054 <                throw new NoSuchElementException();
4055 <            lastReturned = e; // cannot assign until after null check
4056 <            if ((nextEntry = e.next) == null)
4057 <                advance();
4058 <            return e;
4051 >        public final boolean removeAll(Collection<?> c) {
4052 >            boolean modified = false;
4053 >            for (Iterator<E> it = iterator(); it.hasNext();) {
4054 >                if (c.contains(it.next())) {
4055 >                    it.remove();
4056 >                    modified = true;
4057 >                }
4058 >            }
4059 >            return modified;
4060          }
4061  
4062 <        public final boolean hasNext() { return nextEntry != null; }
4063 <        public final boolean hasMoreElements() { return nextEntry != null; }
4062 >        public final boolean retainAll(Collection<?> c) {
4063 >            boolean modified = false;
4064 >            for (Iterator<E> it = iterator(); it.hasNext();) {
4065 >                if (!c.contains(it.next())) {
4066 >                    it.remove();
4067 >                    modified = true;
4068 >                }
4069 >            }
4070 >            return modified;
4071 >        }
4072  
4073 <        public final void remove() {
4074 <            if (lastReturned == null)
4075 <                throw new IllegalStateException();
4076 <            ConcurrentHashMap.this.remove(lastReturned.key);
4077 <            lastReturned = null;
4073 >    }
4074 >
4075 >    /**
4076 >     * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4077 >     * which additions may optionally be enabled by mapping to a
4078 >     * common value.  This class cannot be directly instantiated.
4079 >     * See {@link #keySet() keySet()},
4080 >     * {@link #keySet(Object) keySet(V)},
4081 >     * {@link #newKeySet() newKeySet()},
4082 >     * {@link #newKeySet(int) newKeySet(int)}.
4083 >     */
4084 >    public static class KeySetView<K,V> extends CollectionView<K,V,K>
4085 >        implements Set<K>, java.io.Serializable {
4086 >        private static final long serialVersionUID = 7249069246763182397L;
4087 >        private final V value;
4088 >        KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4089 >            super(map);
4090 >            this.value = value;
4091 >        }
4092 >
4093 >        /**
4094 >         * Returns the default mapped value for additions,
4095 >         * or {@code null} if additions are not supported.
4096 >         *
4097 >         * @return the default mapped value for additions, or {@code null}
4098 >         * if not supported
4099 >         */
4100 >        public V getMappedValue() { return value; }
4101 >
4102 >        /**
4103 >         * {@inheritDoc}
4104 >         * @throws NullPointerException if the specified key is null
4105 >         */
4106 >        public boolean contains(Object o) { return map.containsKey(o); }
4107 >
4108 >        /**
4109 >         * Removes the key from this map view, by removing the key (and its
4110 >         * corresponding value) from the backing map.  This method does
4111 >         * nothing if the key is not in the map.
4112 >         *
4113 >         * @param  o the key to be removed from the backing map
4114 >         * @return {@code true} if the backing map contained the specified key
4115 >         * @throws NullPointerException if the specified key is null
4116 >         */
4117 >        public boolean remove(Object o) { return map.remove(o) != null; }
4118 >
4119 >        /**
4120 >         * @return an iterator over the keys of the backing map
4121 >         */
4122 >        public Iterator<K> iterator() {
4123 >            Node<K,V>[] t;
4124 >            ConcurrentHashMap<K,V> m = map;
4125 >            int f = (t = m.table) == null ? 0 : t.length;
4126 >            return new KeyIterator<K,V>(t, f, 0, f, m);
4127 >        }
4128 >
4129 >        /**
4130 >         * Adds the specified key to this set view by mapping the key to
4131 >         * the default mapped value in the backing map, if defined.
4132 >         *
4133 >         * @param e key to be added
4134 >         * @return {@code true} if this set changed as a result of the call
4135 >         * @throws NullPointerException if the specified key is null
4136 >         * @throws UnsupportedOperationException if no default mapped value
4137 >         * for additions was provided
4138 >         */
4139 >        public boolean add(K e) {
4140 >            V v;
4141 >            if ((v = value) == null)
4142 >                throw new UnsupportedOperationException();
4143 >            return map.internalPut(e, v, true) == null;
4144 >        }
4145 >
4146 >        /**
4147 >         * Adds all of the elements in the specified collection to this set,
4148 >         * as if by calling {@link #add} on each one.
4149 >         *
4150 >         * @param c the elements to be inserted into this set
4151 >         * @return {@code true} if this set changed as a result of the call
4152 >         * @throws NullPointerException if the collection or any of its
4153 >         * elements are {@code null}
4154 >         * @throws UnsupportedOperationException if no default mapped value
4155 >         * for additions was provided
4156 >         */
4157 >        public boolean addAll(Collection<? extends K> c) {
4158 >            boolean added = false;
4159 >            V v;
4160 >            if ((v = value) == null)
4161 >                throw new UnsupportedOperationException();
4162 >            for (K e : c) {
4163 >                if (map.internalPut(e, v, true) == null)
4164 >                    added = true;
4165 >            }
4166 >            return added;
4167 >        }
4168 >
4169 >        public int hashCode() {
4170 >            int h = 0;
4171 >            for (K e : this)
4172 >                h += e.hashCode();
4173 >            return h;
4174 >        }
4175 >
4176 >        public boolean equals(Object o) {
4177 >            Set<?> c;
4178 >            return ((o instanceof Set) &&
4179 >                    ((c = (Set<?>)o) == this ||
4180 >                     (containsAll(c) && c.containsAll(this))));
4181 >        }
4182 >
4183 >        public Spliterator<K> spliterator() {
4184 >            Node<K,V>[] t;
4185 >            ConcurrentHashMap<K,V> m = map;
4186 >            long n = m.sumCount();
4187 >            int f = (t = m.table) == null ? 0 : t.length;
4188 >            return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4189 >        }
4190 >
4191 >        public void forEach(Consumer<? super K> action) {
4192 >            if (action == null) throw new NullPointerException();
4193 >            Node<K,V>[] t;
4194 >            if ((t = map.table) != null) {
4195 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4196 >                for (Node<K,V> p; (p = it.advance()) != null; )
4197 >                    action.accept((K)p.key);
4198 >            }
4199          }
4200      }
4201  
4202 <    final class KeyIterator
4203 <        extends HashIterator
4204 <        implements Iterator<K>, Enumeration<K>
4205 <    {
4206 <        public final K next()        { return super.nextEntry().key; }
4207 <        public final K nextElement() { return super.nextEntry().key; }
4208 <    }
4209 <
4210 <    final class ValueIterator
4211 <        extends HashIterator
4212 <        implements Iterator<V>, Enumeration<V>
4213 <    {
4214 <        public final V next()        { return super.nextEntry().value; }
4215 <        public final V nextElement() { return super.nextEntry().value; }
4202 >    /**
4203 >     * A view of a ConcurrentHashMap as a {@link Collection} of
4204 >     * values, in which additions are disabled. This class cannot be
4205 >     * directly instantiated. See {@link #values()}.
4206 >     */
4207 >    static final class ValuesView<K,V> extends CollectionView<K,V,V>
4208 >        implements Collection<V>, java.io.Serializable {
4209 >        private static final long serialVersionUID = 2249069246763182397L;
4210 >        ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4211 >        public final boolean contains(Object o) {
4212 >            return map.containsValue(o);
4213 >        }
4214 >
4215 >        public final boolean remove(Object o) {
4216 >            if (o != null) {
4217 >                for (Iterator<V> it = iterator(); it.hasNext();) {
4218 >                    if (o.equals(it.next())) {
4219 >                        it.remove();
4220 >                        return true;
4221 >                    }
4222 >                }
4223 >            }
4224 >            return false;
4225 >        }
4226 >
4227 >        public final Iterator<V> iterator() {
4228 >            ConcurrentHashMap<K,V> m = map;
4229 >            Node<K,V>[] t;
4230 >            int f = (t = m.table) == null ? 0 : t.length;
4231 >            return new ValueIterator<K,V>(t, f, 0, f, m);
4232 >        }
4233 >
4234 >        public final boolean add(V e) {
4235 >            throw new UnsupportedOperationException();
4236 >        }
4237 >        public final boolean addAll(Collection<? extends V> c) {
4238 >            throw new UnsupportedOperationException();
4239 >        }
4240 >
4241 >        public Spliterator<V> spliterator() {
4242 >            Node<K,V>[] t;
4243 >            ConcurrentHashMap<K,V> m = map;
4244 >            long n = m.sumCount();
4245 >            int f = (t = m.table) == null ? 0 : t.length;
4246 >            return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4247 >        }
4248 >
4249 >        public void forEach(Consumer<? super V> action) {
4250 >            if (action == null) throw new NullPointerException();
4251 >            Node<K,V>[] t;
4252 >            if ((t = map.table) != null) {
4253 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4254 >                for (Node<K,V> p; (p = it.advance()) != null; )
4255 >                    action.accept(p.val);
4256 >            }
4257 >        }
4258      }
4259  
4260      /**
4261 <     * Custom Entry class used by EntryIterator.next(), that relays
4262 <     * setValue changes to the underlying map.
4263 <     */
4264 <    final class WriteThroughEntry
4265 <        extends AbstractMap.SimpleEntry<K,V>
4266 <    {
4267 <        WriteThroughEntry(K k, V v) {
4268 <            super(k,v);
4261 >     * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4262 >     * entries.  This class cannot be directly instantiated. See
4263 >     * {@link #entrySet()}.
4264 >     */
4265 >    static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4266 >        implements Set<Map.Entry<K,V>>, java.io.Serializable {
4267 >        private static final long serialVersionUID = 2249069246763182397L;
4268 >        EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4269 >
4270 >        public boolean contains(Object o) {
4271 >            Object k, v, r; Map.Entry<?,?> e;
4272 >            return ((o instanceof Map.Entry) &&
4273 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4274 >                    (r = map.get(k)) != null &&
4275 >                    (v = e.getValue()) != null &&
4276 >                    (v == r || v.equals(r)));
4277 >        }
4278 >
4279 >        public boolean remove(Object o) {
4280 >            Object k, v; Map.Entry<?,?> e;
4281 >            return ((o instanceof Map.Entry) &&
4282 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4283 >                    (v = e.getValue()) != null &&
4284 >                    map.remove(k, v));
4285          }
4286  
4287          /**
4288 <         * Sets our entry's value and writes through to the map. The
1305 <         * value to return is somewhat arbitrary here. Since a
1306 <         * WriteThroughEntry does not necessarily track asynchronous
1307 <         * changes, the most recent "previous" value could be
1308 <         * different from what we return (or could even have been
1309 <         * removed in which case the put will re-establish). We do not
1310 <         * and cannot guarantee more.
4288 >         * @return an iterator over the entries of the backing map
4289           */
4290 <        public V setValue(V value) {
4291 <            if (value == null) throw new NullPointerException();
4292 <            V v = super.setValue(value);
4293 <            ConcurrentHashMap.this.put(getKey(), value);
4294 <            return v;
4290 >        public Iterator<Map.Entry<K,V>> iterator() {
4291 >            ConcurrentHashMap<K,V> m = map;
4292 >            Node<K,V>[] t;
4293 >            int f = (t = m.table) == null ? 0 : t.length;
4294 >            return new EntryIterator<K,V>(t, f, 0, f, m);
4295 >        }
4296 >
4297 >        public boolean add(Entry<K,V> e) {
4298 >            return map.internalPut(e.getKey(), e.getValue(), false) == null;
4299          }
4300 +
4301 +        public boolean addAll(Collection<? extends Entry<K,V>> c) {
4302 +            boolean added = false;
4303 +            for (Entry<K,V> e : c) {
4304 +                if (add(e))
4305 +                    added = true;
4306 +            }
4307 +            return added;
4308 +        }
4309 +
4310 +        public final int hashCode() {
4311 +            int h = 0;
4312 +            Node<K,V>[] t;
4313 +            if ((t = map.table) != null) {
4314 +                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4315 +                for (Node<K,V> p; (p = it.advance()) != null; ) {
4316 +                    h += p.hashCode();
4317 +                }
4318 +            }
4319 +            return h;
4320 +        }
4321 +
4322 +        public final boolean equals(Object o) {
4323 +            Set<?> c;
4324 +            return ((o instanceof Set) &&
4325 +                    ((c = (Set<?>)o) == this ||
4326 +                     (containsAll(c) && c.containsAll(this))));
4327 +        }
4328 +
4329 +        public Spliterator<Map.Entry<K,V>> spliterator() {
4330 +            Node<K,V>[] t;
4331 +            ConcurrentHashMap<K,V> m = map;
4332 +            long n = m.sumCount();
4333 +            int f = (t = m.table) == null ? 0 : t.length;
4334 +            return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4335 +        }
4336 +
4337 +        public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4338 +            if (action == null) throw new NullPointerException();
4339 +            Node<K,V>[] t;
4340 +            if ((t = map.table) != null) {
4341 +                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4342 +                for (Node<K,V> p; (p = it.advance()) != null; )
4343 +                    action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
4344 +            }
4345 +        }
4346 +
4347      }
4348  
4349 <    final class EntryIterator
4350 <        extends HashIterator
4351 <        implements Iterator<Entry<K,V>>
4352 <    {
4353 <        public Map.Entry<K,V> next() {
4354 <            HashEntry<K,V> e = super.nextEntry();
4355 <            return new WriteThroughEntry(e.key, e.value);
4349 >    // -------------------------------------------------------
4350 >
4351 >    /**
4352 >     * Base class for bulk tasks. Repeats some fields and code from
4353 >     * class Traverser, because we need to subclass CountedCompleter.
4354 >     */
4355 >    abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4356 >        Node<K,V>[] tab;        // same as Traverser
4357 >        Node<K,V> next;
4358 >        int index;
4359 >        int baseIndex;
4360 >        int baseLimit;
4361 >        final int baseSize;
4362 >        int batch;              // split control
4363 >
4364 >        BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4365 >            super(par);
4366 >            this.batch = b;
4367 >            this.index = this.baseIndex = i;
4368 >            if ((this.tab = t) == null)
4369 >                this.baseSize = this.baseLimit = 0;
4370 >            else if (par == null)
4371 >                this.baseSize = this.baseLimit = t.length;
4372 >            else {
4373 >                this.baseLimit = f;
4374 >                this.baseSize = par.baseSize;
4375 >            }
4376 >        }
4377 >
4378 >        /**
4379 >         * Same as Traverser version
4380 >         */
4381 >        final Node<K,V> advance() {
4382 >            Node<K,V> e;
4383 >            if ((e = next) != null)
4384 >                e = e.next;
4385 >            for (;;) {
4386 >                Node<K,V>[] t; int i, n; Object ek;
4387 >                if (e != null)
4388 >                    return next = e;
4389 >                if (baseIndex >= baseLimit || (t = tab) == null ||
4390 >                    (n = t.length) <= (i = index) || i < 0)
4391 >                    return next = null;
4392 >                if ((e = tabAt(t, index)) != null && e.hash < 0) {
4393 >                    if ((ek = e.key) instanceof TreeBin)
4394 >                        e = ((TreeBin<K,V>)ek).first;
4395 >                    else {
4396 >                        tab = (Node<K,V>[])ek;
4397 >                        e = null;
4398 >                        continue;
4399 >                    }
4400 >                }
4401 >                if ((index += baseSize) >= n)
4402 >                    index = ++baseIndex;
4403 >            }
4404          }
4405      }
4406  
4407 <    final class KeySet extends AbstractSet<K> {
4408 <        public Iterator<K> iterator() {
4409 <            return new KeyIterator();
4407 >    /*
4408 >     * Task classes. Coded in a regular but ugly format/style to
4409 >     * simplify checks that each variant differs in the right way from
4410 >     * others. The null screenings exist because compilers cannot tell
4411 >     * that we've already null-checked task arguments, so we force
4412 >     * simplest hoisted bypass to help avoid convoluted traps.
4413 >     */
4414 >
4415 >    static final class ForEachKeyTask<K,V>
4416 >        extends BulkTask<K,V,Void> {
4417 >        final Consumer<? super K> action;
4418 >        ForEachKeyTask
4419 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4420 >             Consumer<? super K> action) {
4421 >            super(p, b, i, f, t);
4422 >            this.action = action;
4423 >        }
4424 >        public final void compute() {
4425 >            final Consumer<? super K> action;
4426 >            if ((action = this.action) != null) {
4427 >                for (int i = baseIndex, f, h; batch > 0 &&
4428 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4429 >                    addToPendingCount(1);
4430 >                    new ForEachKeyTask<K,V>
4431 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4432 >                         action).fork();
4433 >                }
4434 >                for (Node<K,V> p; (p = advance()) != null;)
4435 >                    action.accept((K)p.key);
4436 >                propagateCompletion();
4437 >            }
4438          }
4439 <        public int size() {
4440 <            return ConcurrentHashMap.this.size();
4439 >    }
4440 >
4441 >    static final class ForEachValueTask<K,V>
4442 >        extends BulkTask<K,V,Void> {
4443 >        final Consumer<? super V> action;
4444 >        ForEachValueTask
4445 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4446 >             Consumer<? super V> action) {
4447 >            super(p, b, i, f, t);
4448 >            this.action = action;
4449 >        }
4450 >        public final void compute() {
4451 >            final Consumer<? super V> action;
4452 >            if ((action = this.action) != null) {
4453 >                for (int i = baseIndex, f, h; batch > 0 &&
4454 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4455 >                    addToPendingCount(1);
4456 >                    new ForEachValueTask<K,V>
4457 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4458 >                         action).fork();
4459 >                }
4460 >                for (Node<K,V> p; (p = advance()) != null;)
4461 >                    action.accept(p.val);
4462 >                propagateCompletion();
4463 >            }
4464          }
4465 <        public boolean isEmpty() {
4466 <            return ConcurrentHashMap.this.isEmpty();
4465 >    }
4466 >
4467 >    static final class ForEachEntryTask<K,V>
4468 >        extends BulkTask<K,V,Void> {
4469 >        final Consumer<? super Entry<K,V>> action;
4470 >        ForEachEntryTask
4471 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4472 >             Consumer<? super Entry<K,V>> action) {
4473 >            super(p, b, i, f, t);
4474 >            this.action = action;
4475 >        }
4476 >        public final void compute() {
4477 >            final Consumer<? super Entry<K,V>> action;
4478 >            if ((action = this.action) != null) {
4479 >                for (int i = baseIndex, f, h; batch > 0 &&
4480 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4481 >                    addToPendingCount(1);
4482 >                    new ForEachEntryTask<K,V>
4483 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4484 >                         action).fork();
4485 >                }
4486 >                for (Node<K,V> p; (p = advance()) != null; )
4487 >                    action.accept(p);
4488 >                propagateCompletion();
4489 >            }
4490          }
4491 <        public boolean contains(Object o) {
4492 <            return ConcurrentHashMap.this.containsKey(o);
4491 >    }
4492 >
4493 >    static final class ForEachMappingTask<K,V>
4494 >        extends BulkTask<K,V,Void> {
4495 >        final BiConsumer<? super K, ? super V> action;
4496 >        ForEachMappingTask
4497 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4498 >             BiConsumer<? super K,? super V> action) {
4499 >            super(p, b, i, f, t);
4500 >            this.action = action;
4501 >        }
4502 >        public final void compute() {
4503 >            final BiConsumer<? super K, ? super V> action;
4504 >            if ((action = this.action) != null) {
4505 >                for (int i = baseIndex, f, h; batch > 0 &&
4506 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4507 >                    addToPendingCount(1);
4508 >                    new ForEachMappingTask<K,V>
4509 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4510 >                         action).fork();
4511 >                }
4512 >                for (Node<K,V> p; (p = advance()) != null; )
4513 >                    action.accept((K)p.key, p.val);
4514 >                propagateCompletion();
4515 >            }
4516          }
4517 <        public boolean remove(Object o) {
4518 <            return ConcurrentHashMap.this.remove(o) != null;
4517 >    }
4518 >
4519 >    static final class ForEachTransformedKeyTask<K,V,U>
4520 >        extends BulkTask<K,V,Void> {
4521 >        final Function<? super K, ? extends U> transformer;
4522 >        final Consumer<? super U> action;
4523 >        ForEachTransformedKeyTask
4524 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4525 >             Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
4526 >            super(p, b, i, f, t);
4527 >            this.transformer = transformer; this.action = action;
4528 >        }
4529 >        public final void compute() {
4530 >            final Function<? super K, ? extends U> transformer;
4531 >            final Consumer<? super U> action;
4532 >            if ((transformer = this.transformer) != null &&
4533 >                (action = this.action) != null) {
4534 >                for (int i = baseIndex, f, h; batch > 0 &&
4535 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4536 >                    addToPendingCount(1);
4537 >                    new ForEachTransformedKeyTask<K,V,U>
4538 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4539 >                         transformer, action).fork();
4540 >                }
4541 >                for (Node<K,V> p; (p = advance()) != null; ) {
4542 >                    U u;
4543 >                    if ((u = transformer.apply((K)p.key)) != null)
4544 >                        action.accept(u);
4545 >                }
4546 >                propagateCompletion();
4547 >            }
4548          }
4549 <        public void clear() {
4550 <            ConcurrentHashMap.this.clear();
4549 >    }
4550 >
4551 >    static final class ForEachTransformedValueTask<K,V,U>
4552 >        extends BulkTask<K,V,Void> {
4553 >        final Function<? super V, ? extends U> transformer;
4554 >        final Consumer<? super U> action;
4555 >        ForEachTransformedValueTask
4556 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4557 >             Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
4558 >            super(p, b, i, f, t);
4559 >            this.transformer = transformer; this.action = action;
4560 >        }
4561 >        public final void compute() {
4562 >            final Function<? super V, ? extends U> transformer;
4563 >            final Consumer<? super U> action;
4564 >            if ((transformer = this.transformer) != null &&
4565 >                (action = this.action) != null) {
4566 >                for (int i = baseIndex, f, h; batch > 0 &&
4567 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4568 >                    addToPendingCount(1);
4569 >                    new ForEachTransformedValueTask<K,V,U>
4570 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4571 >                         transformer, action).fork();
4572 >                }
4573 >                for (Node<K,V> p; (p = advance()) != null; ) {
4574 >                    U u;
4575 >                    if ((u = transformer.apply(p.val)) != null)
4576 >                        action.accept(u);
4577 >                }
4578 >                propagateCompletion();
4579 >            }
4580          }
4581      }
4582  
4583 <    final class Values extends AbstractCollection<V> {
4584 <        public Iterator<V> iterator() {
4585 <            return new ValueIterator();
4583 >    static final class ForEachTransformedEntryTask<K,V,U>
4584 >        extends BulkTask<K,V,Void> {
4585 >        final Function<Map.Entry<K,V>, ? extends U> transformer;
4586 >        final Consumer<? super U> action;
4587 >        ForEachTransformedEntryTask
4588 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4589 >             Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
4590 >            super(p, b, i, f, t);
4591 >            this.transformer = transformer; this.action = action;
4592 >        }
4593 >        public final void compute() {
4594 >            final Function<Map.Entry<K,V>, ? extends U> transformer;
4595 >            final Consumer<? super U> action;
4596 >            if ((transformer = this.transformer) != null &&
4597 >                (action = this.action) != null) {
4598 >                for (int i = baseIndex, f, h; batch > 0 &&
4599 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4600 >                    addToPendingCount(1);
4601 >                    new ForEachTransformedEntryTask<K,V,U>
4602 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4603 >                         transformer, action).fork();
4604 >                }
4605 >                for (Node<K,V> p; (p = advance()) != null; ) {
4606 >                    U u;
4607 >                    if ((u = transformer.apply(p)) != null)
4608 >                        action.accept(u);
4609 >                }
4610 >                propagateCompletion();
4611 >            }
4612          }
4613 <        public int size() {
4614 <            return ConcurrentHashMap.this.size();
4613 >    }
4614 >
4615 >    static final class ForEachTransformedMappingTask<K,V,U>
4616 >        extends BulkTask<K,V,Void> {
4617 >        final BiFunction<? super K, ? super V, ? extends U> transformer;
4618 >        final Consumer<? super U> action;
4619 >        ForEachTransformedMappingTask
4620 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4621 >             BiFunction<? super K, ? super V, ? extends U> transformer,
4622 >             Consumer<? super U> action) {
4623 >            super(p, b, i, f, t);
4624 >            this.transformer = transformer; this.action = action;
4625 >        }
4626 >        public final void compute() {
4627 >            final BiFunction<? super K, ? super V, ? extends U> transformer;
4628 >            final Consumer<? super U> action;
4629 >            if ((transformer = this.transformer) != null &&
4630 >                (action = this.action) != null) {
4631 >                for (int i = baseIndex, f, h; batch > 0 &&
4632 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4633 >                    addToPendingCount(1);
4634 >                    new ForEachTransformedMappingTask<K,V,U>
4635 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4636 >                         transformer, action).fork();
4637 >                }
4638 >                for (Node<K,V> p; (p = advance()) != null; ) {
4639 >                    U u;
4640 >                    if ((u = transformer.apply((K)p.key, p.val)) != null)
4641 >                        action.accept(u);
4642 >                }
4643 >                propagateCompletion();
4644 >            }
4645          }
4646 <        public boolean isEmpty() {
4647 <            return ConcurrentHashMap.this.isEmpty();
4646 >    }
4647 >
4648 >    static final class SearchKeysTask<K,V,U>
4649 >        extends BulkTask<K,V,U> {
4650 >        final Function<? super K, ? extends U> searchFunction;
4651 >        final AtomicReference<U> result;
4652 >        SearchKeysTask
4653 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4654 >             Function<? super K, ? extends U> searchFunction,
4655 >             AtomicReference<U> result) {
4656 >            super(p, b, i, f, t);
4657 >            this.searchFunction = searchFunction; this.result = result;
4658 >        }
4659 >        public final U getRawResult() { return result.get(); }
4660 >        public final void compute() {
4661 >            final Function<? super K, ? extends U> searchFunction;
4662 >            final AtomicReference<U> result;
4663 >            if ((searchFunction = this.searchFunction) != null &&
4664 >                (result = this.result) != null) {
4665 >                for (int i = baseIndex, f, h; batch > 0 &&
4666 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4667 >                    if (result.get() != null)
4668 >                        return;
4669 >                    addToPendingCount(1);
4670 >                    new SearchKeysTask<K,V,U>
4671 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4672 >                         searchFunction, result).fork();
4673 >                }
4674 >                while (result.get() == null) {
4675 >                    U u;
4676 >                    Node<K,V> p;
4677 >                    if ((p = advance()) == null) {
4678 >                        propagateCompletion();
4679 >                        break;
4680 >                    }
4681 >                    if ((u = searchFunction.apply((K)p.key)) != null) {
4682 >                        if (result.compareAndSet(null, u))
4683 >                            quietlyCompleteRoot();
4684 >                        break;
4685 >                    }
4686 >                }
4687 >            }
4688          }
4689 <        public boolean contains(Object o) {
4690 <            return ConcurrentHashMap.this.containsValue(o);
4689 >    }
4690 >
4691 >    static final class SearchValuesTask<K,V,U>
4692 >        extends BulkTask<K,V,U> {
4693 >        final Function<? super V, ? extends U> searchFunction;
4694 >        final AtomicReference<U> result;
4695 >        SearchValuesTask
4696 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4697 >             Function<? super V, ? extends U> searchFunction,
4698 >             AtomicReference<U> result) {
4699 >            super(p, b, i, f, t);
4700 >            this.searchFunction = searchFunction; this.result = result;
4701 >        }
4702 >        public final U getRawResult() { return result.get(); }
4703 >        public final void compute() {
4704 >            final Function<? super V, ? extends U> searchFunction;
4705 >            final AtomicReference<U> result;
4706 >            if ((searchFunction = this.searchFunction) != null &&
4707 >                (result = this.result) != null) {
4708 >                for (int i = baseIndex, f, h; batch > 0 &&
4709 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4710 >                    if (result.get() != null)
4711 >                        return;
4712 >                    addToPendingCount(1);
4713 >                    new SearchValuesTask<K,V,U>
4714 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4715 >                         searchFunction, result).fork();
4716 >                }
4717 >                while (result.get() == null) {
4718 >                    U u;
4719 >                    Node<K,V> p;
4720 >                    if ((p = advance()) == null) {
4721 >                        propagateCompletion();
4722 >                        break;
4723 >                    }
4724 >                    if ((u = searchFunction.apply(p.val)) != null) {
4725 >                        if (result.compareAndSet(null, u))
4726 >                            quietlyCompleteRoot();
4727 >                        break;
4728 >                    }
4729 >                }
4730 >            }
4731          }
4732 <        public void clear() {
4733 <            ConcurrentHashMap.this.clear();
4732 >    }
4733 >
4734 >    static final class SearchEntriesTask<K,V,U>
4735 >        extends BulkTask<K,V,U> {
4736 >        final Function<Entry<K,V>, ? extends U> searchFunction;
4737 >        final AtomicReference<U> result;
4738 >        SearchEntriesTask
4739 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4740 >             Function<Entry<K,V>, ? extends U> searchFunction,
4741 >             AtomicReference<U> result) {
4742 >            super(p, b, i, f, t);
4743 >            this.searchFunction = searchFunction; this.result = result;
4744 >        }
4745 >        public final U getRawResult() { return result.get(); }
4746 >        public final void compute() {
4747 >            final Function<Entry<K,V>, ? extends U> searchFunction;
4748 >            final AtomicReference<U> result;
4749 >            if ((searchFunction = this.searchFunction) != null &&
4750 >                (result = this.result) != null) {
4751 >                for (int i = baseIndex, f, h; batch > 0 &&
4752 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4753 >                    if (result.get() != null)
4754 >                        return;
4755 >                    addToPendingCount(1);
4756 >                    new SearchEntriesTask<K,V,U>
4757 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4758 >                         searchFunction, result).fork();
4759 >                }
4760 >                while (result.get() == null) {
4761 >                    U u;
4762 >                    Node<K,V> p;
4763 >                    if ((p = advance()) == null) {
4764 >                        propagateCompletion();
4765 >                        break;
4766 >                    }
4767 >                    if ((u = searchFunction.apply(p)) != null) {
4768 >                        if (result.compareAndSet(null, u))
4769 >                            quietlyCompleteRoot();
4770 >                        return;
4771 >                    }
4772 >                }
4773 >            }
4774          }
4775      }
4776  
4777 <    final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
4778 <        public Iterator<Map.Entry<K,V>> iterator() {
4779 <            return new EntryIterator();
4777 >    static final class SearchMappingsTask<K,V,U>
4778 >        extends BulkTask<K,V,U> {
4779 >        final BiFunction<? super K, ? super V, ? extends U> searchFunction;
4780 >        final AtomicReference<U> result;
4781 >        SearchMappingsTask
4782 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4783 >             BiFunction<? super K, ? super V, ? extends U> searchFunction,
4784 >             AtomicReference<U> result) {
4785 >            super(p, b, i, f, t);
4786 >            this.searchFunction = searchFunction; this.result = result;
4787 >        }
4788 >        public final U getRawResult() { return result.get(); }
4789 >        public final void compute() {
4790 >            final BiFunction<? super K, ? super V, ? extends U> searchFunction;
4791 >            final AtomicReference<U> result;
4792 >            if ((searchFunction = this.searchFunction) != null &&
4793 >                (result = this.result) != null) {
4794 >                for (int i = baseIndex, f, h; batch > 0 &&
4795 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4796 >                    if (result.get() != null)
4797 >                        return;
4798 >                    addToPendingCount(1);
4799 >                    new SearchMappingsTask<K,V,U>
4800 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4801 >                         searchFunction, result).fork();
4802 >                }
4803 >                while (result.get() == null) {
4804 >                    U u;
4805 >                    Node<K,V> p;
4806 >                    if ((p = advance()) == null) {
4807 >                        propagateCompletion();
4808 >                        break;
4809 >                    }
4810 >                    if ((u = searchFunction.apply((K)p.key, p.val)) != null) {
4811 >                        if (result.compareAndSet(null, u))
4812 >                            quietlyCompleteRoot();
4813 >                        break;
4814 >                    }
4815 >                }
4816 >            }
4817          }
4818 <        public boolean contains(Object o) {
4819 <            if (!(o instanceof Map.Entry))
4820 <                return false;
4821 <            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
4822 <            V v = ConcurrentHashMap.this.get(e.getKey());
4823 <            return v != null && v.equals(e.getValue());
4818 >    }
4819 >
4820 >    static final class ReduceKeysTask<K,V>
4821 >        extends BulkTask<K,V,K> {
4822 >        final BiFunction<? super K, ? super K, ? extends K> reducer;
4823 >        K result;
4824 >        ReduceKeysTask<K,V> rights, nextRight;
4825 >        ReduceKeysTask
4826 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4827 >             ReduceKeysTask<K,V> nextRight,
4828 >             BiFunction<? super K, ? super K, ? extends K> reducer) {
4829 >            super(p, b, i, f, t); this.nextRight = nextRight;
4830 >            this.reducer = reducer;
4831 >        }
4832 >        public final K getRawResult() { return result; }
4833 >        public final void compute() {
4834 >            final BiFunction<? super K, ? super K, ? extends K> reducer;
4835 >            if ((reducer = this.reducer) != null) {
4836 >                for (int i = baseIndex, f, h; batch > 0 &&
4837 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4838 >                    addToPendingCount(1);
4839 >                    (rights = new ReduceKeysTask<K,V>
4840 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
4841 >                      rights, reducer)).fork();
4842 >                }
4843 >                K r = null;
4844 >                for (Node<K,V> p; (p = advance()) != null; ) {
4845 >                    K u = (K)p.key;
4846 >                    r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
4847 >                }
4848 >                result = r;
4849 >                CountedCompleter<?> c;
4850 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
4851 >                    ReduceKeysTask<K,V>
4852 >                        t = (ReduceKeysTask<K,V>)c,
4853 >                        s = t.rights;
4854 >                    while (s != null) {
4855 >                        K tr, sr;
4856 >                        if ((sr = s.result) != null)
4857 >                            t.result = (((tr = t.result) == null) ? sr :
4858 >                                        reducer.apply(tr, sr));
4859 >                        s = t.rights = s.nextRight;
4860 >                    }
4861 >                }
4862 >            }
4863          }
4864 <        public boolean remove(Object o) {
4865 <            if (!(o instanceof Map.Entry))
4866 <                return false;
4867 <            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
4868 <            return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
4864 >    }
4865 >
4866 >    static final class ReduceValuesTask<K,V>
4867 >        extends BulkTask<K,V,V> {
4868 >        final BiFunction<? super V, ? super V, ? extends V> reducer;
4869 >        V result;
4870 >        ReduceValuesTask<K,V> rights, nextRight;
4871 >        ReduceValuesTask
4872 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4873 >             ReduceValuesTask<K,V> nextRight,
4874 >             BiFunction<? super V, ? super V, ? extends V> reducer) {
4875 >            super(p, b, i, f, t); this.nextRight = nextRight;
4876 >            this.reducer = reducer;
4877 >        }
4878 >        public final V getRawResult() { return result; }
4879 >        public final void compute() {
4880 >            final BiFunction<? super V, ? super V, ? extends V> reducer;
4881 >            if ((reducer = this.reducer) != null) {
4882 >                for (int i = baseIndex, f, h; batch > 0 &&
4883 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4884 >                    addToPendingCount(1);
4885 >                    (rights = new ReduceValuesTask<K,V>
4886 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
4887 >                      rights, reducer)).fork();
4888 >                }
4889 >                V r = null;
4890 >                for (Node<K,V> p; (p = advance()) != null; ) {
4891 >                    V v = p.val;
4892 >                    r = (r == null) ? v : reducer.apply(r, v);
4893 >                }
4894 >                result = r;
4895 >                CountedCompleter<?> c;
4896 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
4897 >                    ReduceValuesTask<K,V>
4898 >                        t = (ReduceValuesTask<K,V>)c,
4899 >                        s = t.rights;
4900 >                    while (s != null) {
4901 >                        V tr, sr;
4902 >                        if ((sr = s.result) != null)
4903 >                            t.result = (((tr = t.result) == null) ? sr :
4904 >                                        reducer.apply(tr, sr));
4905 >                        s = t.rights = s.nextRight;
4906 >                    }
4907 >                }
4908 >            }
4909          }
4910 <        public int size() {
4911 <            return ConcurrentHashMap.this.size();
4910 >    }
4911 >
4912 >    static final class ReduceEntriesTask<K,V>
4913 >        extends BulkTask<K,V,Map.Entry<K,V>> {
4914 >        final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
4915 >        Map.Entry<K,V> result;
4916 >        ReduceEntriesTask<K,V> rights, nextRight;
4917 >        ReduceEntriesTask
4918 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4919 >             ReduceEntriesTask<K,V> nextRight,
4920 >             BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4921 >            super(p, b, i, f, t); this.nextRight = nextRight;
4922 >            this.reducer = reducer;
4923 >        }
4924 >        public final Map.Entry<K,V> getRawResult() { return result; }
4925 >        public final void compute() {
4926 >            final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
4927 >            if ((reducer = this.reducer) != null) {
4928 >                for (int i = baseIndex, f, h; batch > 0 &&
4929 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4930 >                    addToPendingCount(1);
4931 >                    (rights = new ReduceEntriesTask<K,V>
4932 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
4933 >                      rights, reducer)).fork();
4934 >                }
4935 >                Map.Entry<K,V> r = null;
4936 >                for (Node<K,V> p; (p = advance()) != null; )
4937 >                    r = (r == null) ? p : reducer.apply(r, p);
4938 >                result = r;
4939 >                CountedCompleter<?> c;
4940 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
4941 >                    ReduceEntriesTask<K,V>
4942 >                        t = (ReduceEntriesTask<K,V>)c,
4943 >                        s = t.rights;
4944 >                    while (s != null) {
4945 >                        Map.Entry<K,V> tr, sr;
4946 >                        if ((sr = s.result) != null)
4947 >                            t.result = (((tr = t.result) == null) ? sr :
4948 >                                        reducer.apply(tr, sr));
4949 >                        s = t.rights = s.nextRight;
4950 >                    }
4951 >                }
4952 >            }
4953          }
4954 <        public boolean isEmpty() {
4955 <            return ConcurrentHashMap.this.isEmpty();
4954 >    }
4955 >
4956 >    static final class MapReduceKeysTask<K,V,U>
4957 >        extends BulkTask<K,V,U> {
4958 >        final Function<? super K, ? extends U> transformer;
4959 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
4960 >        U result;
4961 >        MapReduceKeysTask<K,V,U> rights, nextRight;
4962 >        MapReduceKeysTask
4963 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4964 >             MapReduceKeysTask<K,V,U> nextRight,
4965 >             Function<? super K, ? extends U> transformer,
4966 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
4967 >            super(p, b, i, f, t); this.nextRight = nextRight;
4968 >            this.transformer = transformer;
4969 >            this.reducer = reducer;
4970 >        }
4971 >        public final U getRawResult() { return result; }
4972 >        public final void compute() {
4973 >            final Function<? super K, ? extends U> transformer;
4974 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
4975 >            if ((transformer = this.transformer) != null &&
4976 >                (reducer = this.reducer) != null) {
4977 >                for (int i = baseIndex, f, h; batch > 0 &&
4978 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4979 >                    addToPendingCount(1);
4980 >                    (rights = new MapReduceKeysTask<K,V,U>
4981 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
4982 >                      rights, transformer, reducer)).fork();
4983 >                }
4984 >                U r = null;
4985 >                for (Node<K,V> p; (p = advance()) != null; ) {
4986 >                    U u;
4987 >                    if ((u = transformer.apply((K)p.key)) != null)
4988 >                        r = (r == null) ? u : reducer.apply(r, u);
4989 >                }
4990 >                result = r;
4991 >                CountedCompleter<?> c;
4992 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
4993 >                    MapReduceKeysTask<K,V,U>
4994 >                        t = (MapReduceKeysTask<K,V,U>)c,
4995 >                        s = t.rights;
4996 >                    while (s != null) {
4997 >                        U tr, sr;
4998 >                        if ((sr = s.result) != null)
4999 >                            t.result = (((tr = t.result) == null) ? sr :
5000 >                                        reducer.apply(tr, sr));
5001 >                        s = t.rights = s.nextRight;
5002 >                    }
5003 >                }
5004 >            }
5005          }
5006 <        public void clear() {
5007 <            ConcurrentHashMap.this.clear();
5006 >    }
5007 >
5008 >    static final class MapReduceValuesTask<K,V,U>
5009 >        extends BulkTask<K,V,U> {
5010 >        final Function<? super V, ? extends U> transformer;
5011 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5012 >        U result;
5013 >        MapReduceValuesTask<K,V,U> rights, nextRight;
5014 >        MapReduceValuesTask
5015 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5016 >             MapReduceValuesTask<K,V,U> nextRight,
5017 >             Function<? super V, ? extends U> transformer,
5018 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5019 >            super(p, b, i, f, t); this.nextRight = nextRight;
5020 >            this.transformer = transformer;
5021 >            this.reducer = reducer;
5022 >        }
5023 >        public final U getRawResult() { return result; }
5024 >        public final void compute() {
5025 >            final Function<? super V, ? extends U> transformer;
5026 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5027 >            if ((transformer = this.transformer) != null &&
5028 >                (reducer = this.reducer) != null) {
5029 >                for (int i = baseIndex, f, h; batch > 0 &&
5030 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5031 >                    addToPendingCount(1);
5032 >                    (rights = new MapReduceValuesTask<K,V,U>
5033 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5034 >                      rights, transformer, reducer)).fork();
5035 >                }
5036 >                U r = null;
5037 >                for (Node<K,V> p; (p = advance()) != null; ) {
5038 >                    U u;
5039 >                    if ((u = transformer.apply(p.val)) != null)
5040 >                        r = (r == null) ? u : reducer.apply(r, u);
5041 >                }
5042 >                result = r;
5043 >                CountedCompleter<?> c;
5044 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5045 >                    MapReduceValuesTask<K,V,U>
5046 >                        t = (MapReduceValuesTask<K,V,U>)c,
5047 >                        s = t.rights;
5048 >                    while (s != null) {
5049 >                        U tr, sr;
5050 >                        if ((sr = s.result) != null)
5051 >                            t.result = (((tr = t.result) == null) ? sr :
5052 >                                        reducer.apply(tr, sr));
5053 >                        s = t.rights = s.nextRight;
5054 >                    }
5055 >                }
5056 >            }
5057          }
5058      }
5059  
5060 <    /* ---------------- Serialization Support -------------- */
5060 >    static final class MapReduceEntriesTask<K,V,U>
5061 >        extends BulkTask<K,V,U> {
5062 >        final Function<Map.Entry<K,V>, ? extends U> transformer;
5063 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5064 >        U result;
5065 >        MapReduceEntriesTask<K,V,U> rights, nextRight;
5066 >        MapReduceEntriesTask
5067 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5068 >             MapReduceEntriesTask<K,V,U> nextRight,
5069 >             Function<Map.Entry<K,V>, ? extends U> transformer,
5070 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5071 >            super(p, b, i, f, t); this.nextRight = nextRight;
5072 >            this.transformer = transformer;
5073 >            this.reducer = reducer;
5074 >        }
5075 >        public final U getRawResult() { return result; }
5076 >        public final void compute() {
5077 >            final Function<Map.Entry<K,V>, ? extends U> transformer;
5078 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5079 >            if ((transformer = this.transformer) != null &&
5080 >                (reducer = this.reducer) != null) {
5081 >                for (int i = baseIndex, f, h; batch > 0 &&
5082 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5083 >                    addToPendingCount(1);
5084 >                    (rights = new MapReduceEntriesTask<K,V,U>
5085 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5086 >                      rights, transformer, reducer)).fork();
5087 >                }
5088 >                U r = null;
5089 >                for (Node<K,V> p; (p = advance()) != null; ) {
5090 >                    U u;
5091 >                    if ((u = transformer.apply(p)) != null)
5092 >                        r = (r == null) ? u : reducer.apply(r, u);
5093 >                }
5094 >                result = r;
5095 >                CountedCompleter<?> c;
5096 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5097 >                    MapReduceEntriesTask<K,V,U>
5098 >                        t = (MapReduceEntriesTask<K,V,U>)c,
5099 >                        s = t.rights;
5100 >                    while (s != null) {
5101 >                        U tr, sr;
5102 >                        if ((sr = s.result) != null)
5103 >                            t.result = (((tr = t.result) == null) ? sr :
5104 >                                        reducer.apply(tr, sr));
5105 >                        s = t.rights = s.nextRight;
5106 >                    }
5107 >                }
5108 >            }
5109 >        }
5110 >    }
5111  
5112 <    /**
5113 <     * Saves the state of the <tt>ConcurrentHashMap</tt> instance to a
5114 <     * stream (i.e., serializes it).
5115 <     * @param s the stream
5116 <     * @serialData
5117 <     * the key (Object) and value (Object)
5118 <     * for each key-value mapping, followed by a null pair.
5119 <     * The key-value mappings are emitted in no particular order.
5120 <     */
5121 <    private void writeObject(java.io.ObjectOutputStream s) throws IOException {
5122 <        // force all segments for serialization compatibility
5123 <        for (int k = 0; k < segments.length; ++k)
5124 <            ensureSegment(k);
5125 <        s.defaultWriteObject();
5126 <
5127 <        final Segment<K,V>[] segments = this.segments;
5128 <        for (int k = 0; k < segments.length; ++k) {
5129 <            Segment<K,V> seg = segmentAt(segments, k);
5130 <            seg.lock();
5131 <            try {
5132 <                HashEntry<K,V>[] tab = seg.table;
5133 <                for (int i = 0; i < tab.length; ++i) {
5134 <                    HashEntry<K,V> e;
5135 <                    for (e = entryAt(tab, i); e != null; e = e.next) {
5136 <                        s.writeObject(e.key);
5137 <                        s.writeObject(e.value);
5112 >    static final class MapReduceMappingsTask<K,V,U>
5113 >        extends BulkTask<K,V,U> {
5114 >        final BiFunction<? super K, ? super V, ? extends U> transformer;
5115 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5116 >        U result;
5117 >        MapReduceMappingsTask<K,V,U> rights, nextRight;
5118 >        MapReduceMappingsTask
5119 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5120 >             MapReduceMappingsTask<K,V,U> nextRight,
5121 >             BiFunction<? super K, ? super V, ? extends U> transformer,
5122 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5123 >            super(p, b, i, f, t); this.nextRight = nextRight;
5124 >            this.transformer = transformer;
5125 >            this.reducer = reducer;
5126 >        }
5127 >        public final U getRawResult() { return result; }
5128 >        public final void compute() {
5129 >            final BiFunction<? super K, ? super V, ? extends U> transformer;
5130 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5131 >            if ((transformer = this.transformer) != null &&
5132 >                (reducer = this.reducer) != null) {
5133 >                for (int i = baseIndex, f, h; batch > 0 &&
5134 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5135 >                    addToPendingCount(1);
5136 >                    (rights = new MapReduceMappingsTask<K,V,U>
5137 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5138 >                      rights, transformer, reducer)).fork();
5139 >                }
5140 >                U r = null;
5141 >                for (Node<K,V> p; (p = advance()) != null; ) {
5142 >                    U u;
5143 >                    if ((u = transformer.apply((K)p.key, p.val)) != null)
5144 >                        r = (r == null) ? u : reducer.apply(r, u);
5145 >                }
5146 >                result = r;
5147 >                CountedCompleter<?> c;
5148 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5149 >                    MapReduceMappingsTask<K,V,U>
5150 >                        t = (MapReduceMappingsTask<K,V,U>)c,
5151 >                        s = t.rights;
5152 >                    while (s != null) {
5153 >                        U tr, sr;
5154 >                        if ((sr = s.result) != null)
5155 >                            t.result = (((tr = t.result) == null) ? sr :
5156 >                                        reducer.apply(tr, sr));
5157 >                        s = t.rights = s.nextRight;
5158                      }
5159                  }
1427            } finally {
1428                seg.unlock();
5160              }
5161          }
1431        s.writeObject(null);
1432        s.writeObject(null);
5162      }
5163  
5164 <    /**
5165 <     * Reconstitutes the <tt>ConcurrentHashMap</tt> instance from a
5166 <     * stream (i.e., deserializes it).
5167 <     * @param s the stream
5168 <     */
5169 <    @SuppressWarnings("unchecked")
5170 <    private void readObject(java.io.ObjectInputStream s)
5171 <        throws IOException, ClassNotFoundException {
5172 <        s.defaultReadObject();
5164 >    static final class MapReduceKeysToDoubleTask<K,V>
5165 >        extends BulkTask<K,V,Double> {
5166 >        final ToDoubleFunction<? super K> transformer;
5167 >        final DoubleBinaryOperator reducer;
5168 >        final double basis;
5169 >        double result;
5170 >        MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5171 >        MapReduceKeysToDoubleTask
5172 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5173 >             MapReduceKeysToDoubleTask<K,V> nextRight,
5174 >             ToDoubleFunction<? super K> transformer,
5175 >             double basis,
5176 >             DoubleBinaryOperator reducer) {
5177 >            super(p, b, i, f, t); this.nextRight = nextRight;
5178 >            this.transformer = transformer;
5179 >            this.basis = basis; this.reducer = reducer;
5180 >        }
5181 >        public final Double getRawResult() { return result; }
5182 >        public final void compute() {
5183 >            final ToDoubleFunction<? super K> transformer;
5184 >            final DoubleBinaryOperator reducer;
5185 >            if ((transformer = this.transformer) != null &&
5186 >                (reducer = this.reducer) != null) {
5187 >                double r = this.basis;
5188 >                for (int i = baseIndex, f, h; batch > 0 &&
5189 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5190 >                    addToPendingCount(1);
5191 >                    (rights = new MapReduceKeysToDoubleTask<K,V>
5192 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5193 >                      rights, transformer, r, reducer)).fork();
5194 >                }
5195 >                for (Node<K,V> p; (p = advance()) != null; )
5196 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key));
5197 >                result = r;
5198 >                CountedCompleter<?> c;
5199 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5200 >                    MapReduceKeysToDoubleTask<K,V>
5201 >                        t = (MapReduceKeysToDoubleTask<K,V>)c,
5202 >                        s = t.rights;
5203 >                    while (s != null) {
5204 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5205 >                        s = t.rights = s.nextRight;
5206 >                    }
5207 >                }
5208 >            }
5209 >        }
5210 >    }
5211  
5212 <        // Re-initialize segments to be minimally sized, and let grow.
5213 <        int cap = MIN_SEGMENT_TABLE_CAPACITY;
5214 <        final Segment<K,V>[] segments = this.segments;
5215 <        for (int k = 0; k < segments.length; ++k) {
5216 <            Segment<K,V> seg = segments[k];
5217 <            if (seg != null) {
5218 <                seg.threshold = (int)(cap * seg.loadFactor);
5219 <                seg.table = (HashEntry<K,V>[]) new HashEntry[cap];
5212 >    static final class MapReduceValuesToDoubleTask<K,V>
5213 >        extends BulkTask<K,V,Double> {
5214 >        final ToDoubleFunction<? super V> transformer;
5215 >        final DoubleBinaryOperator reducer;
5216 >        final double basis;
5217 >        double result;
5218 >        MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5219 >        MapReduceValuesToDoubleTask
5220 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5221 >             MapReduceValuesToDoubleTask<K,V> nextRight,
5222 >             ToDoubleFunction<? super V> transformer,
5223 >             double basis,
5224 >             DoubleBinaryOperator reducer) {
5225 >            super(p, b, i, f, t); this.nextRight = nextRight;
5226 >            this.transformer = transformer;
5227 >            this.basis = basis; this.reducer = reducer;
5228 >        }
5229 >        public final Double getRawResult() { return result; }
5230 >        public final void compute() {
5231 >            final ToDoubleFunction<? super V> transformer;
5232 >            final DoubleBinaryOperator reducer;
5233 >            if ((transformer = this.transformer) != null &&
5234 >                (reducer = this.reducer) != null) {
5235 >                double r = this.basis;
5236 >                for (int i = baseIndex, f, h; batch > 0 &&
5237 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5238 >                    addToPendingCount(1);
5239 >                    (rights = new MapReduceValuesToDoubleTask<K,V>
5240 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5241 >                      rights, transformer, r, reducer)).fork();
5242 >                }
5243 >                for (Node<K,V> p; (p = advance()) != null; )
5244 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5245 >                result = r;
5246 >                CountedCompleter<?> c;
5247 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5248 >                    MapReduceValuesToDoubleTask<K,V>
5249 >                        t = (MapReduceValuesToDoubleTask<K,V>)c,
5250 >                        s = t.rights;
5251 >                    while (s != null) {
5252 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5253 >                        s = t.rights = s.nextRight;
5254 >                    }
5255 >                }
5256              }
5257          }
5258 +    }
5259  
5260 <        // Read the keys and values, and put the mappings in the table
5261 <        for (;;) {
5262 <            K key = (K) s.readObject();
5263 <            V value = (V) s.readObject();
5264 <            if (key == null)
5265 <                break;
5266 <            put(key, value);
5260 >    static final class MapReduceEntriesToDoubleTask<K,V>
5261 >        extends BulkTask<K,V,Double> {
5262 >        final ToDoubleFunction<Map.Entry<K,V>> transformer;
5263 >        final DoubleBinaryOperator reducer;
5264 >        final double basis;
5265 >        double result;
5266 >        MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5267 >        MapReduceEntriesToDoubleTask
5268 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5269 >             MapReduceEntriesToDoubleTask<K,V> nextRight,
5270 >             ToDoubleFunction<Map.Entry<K,V>> transformer,
5271 >             double basis,
5272 >             DoubleBinaryOperator reducer) {
5273 >            super(p, b, i, f, t); this.nextRight = nextRight;
5274 >            this.transformer = transformer;
5275 >            this.basis = basis; this.reducer = reducer;
5276 >        }
5277 >        public final Double getRawResult() { return result; }
5278 >        public final void compute() {
5279 >            final ToDoubleFunction<Map.Entry<K,V>> transformer;
5280 >            final DoubleBinaryOperator reducer;
5281 >            if ((transformer = this.transformer) != null &&
5282 >                (reducer = this.reducer) != null) {
5283 >                double r = this.basis;
5284 >                for (int i = baseIndex, f, h; batch > 0 &&
5285 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5286 >                    addToPendingCount(1);
5287 >                    (rights = new MapReduceEntriesToDoubleTask<K,V>
5288 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5289 >                      rights, transformer, r, reducer)).fork();
5290 >                }
5291 >                for (Node<K,V> p; (p = advance()) != null; )
5292 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5293 >                result = r;
5294 >                CountedCompleter<?> c;
5295 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5296 >                    MapReduceEntriesToDoubleTask<K,V>
5297 >                        t = (MapReduceEntriesToDoubleTask<K,V>)c,
5298 >                        s = t.rights;
5299 >                    while (s != null) {
5300 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5301 >                        s = t.rights = s.nextRight;
5302 >                    }
5303 >                }
5304 >            }
5305 >        }
5306 >    }
5307 >
5308 >    static final class MapReduceMappingsToDoubleTask<K,V>
5309 >        extends BulkTask<K,V,Double> {
5310 >        final ToDoubleBiFunction<? super K, ? super V> transformer;
5311 >        final DoubleBinaryOperator reducer;
5312 >        final double basis;
5313 >        double result;
5314 >        MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5315 >        MapReduceMappingsToDoubleTask
5316 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5317 >             MapReduceMappingsToDoubleTask<K,V> nextRight,
5318 >             ToDoubleBiFunction<? super K, ? super V> transformer,
5319 >             double basis,
5320 >             DoubleBinaryOperator reducer) {
5321 >            super(p, b, i, f, t); this.nextRight = nextRight;
5322 >            this.transformer = transformer;
5323 >            this.basis = basis; this.reducer = reducer;
5324 >        }
5325 >        public final Double getRawResult() { return result; }
5326 >        public final void compute() {
5327 >            final ToDoubleBiFunction<? super K, ? super V> transformer;
5328 >            final DoubleBinaryOperator reducer;
5329 >            if ((transformer = this.transformer) != null &&
5330 >                (reducer = this.reducer) != null) {
5331 >                double r = this.basis;
5332 >                for (int i = baseIndex, f, h; batch > 0 &&
5333 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5334 >                    addToPendingCount(1);
5335 >                    (rights = new MapReduceMappingsToDoubleTask<K,V>
5336 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5337 >                      rights, transformer, r, reducer)).fork();
5338 >                }
5339 >                for (Node<K,V> p; (p = advance()) != null; )
5340 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key, p.val));
5341 >                result = r;
5342 >                CountedCompleter<?> c;
5343 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5344 >                    MapReduceMappingsToDoubleTask<K,V>
5345 >                        t = (MapReduceMappingsToDoubleTask<K,V>)c,
5346 >                        s = t.rights;
5347 >                    while (s != null) {
5348 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5349 >                        s = t.rights = s.nextRight;
5350 >                    }
5351 >                }
5352 >            }
5353 >        }
5354 >    }
5355 >
5356 >    static final class MapReduceKeysToLongTask<K,V>
5357 >        extends BulkTask<K,V,Long> {
5358 >        final ToLongFunction<? super K> transformer;
5359 >        final LongBinaryOperator reducer;
5360 >        final long basis;
5361 >        long result;
5362 >        MapReduceKeysToLongTask<K,V> rights, nextRight;
5363 >        MapReduceKeysToLongTask
5364 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5365 >             MapReduceKeysToLongTask<K,V> nextRight,
5366 >             ToLongFunction<? super K> transformer,
5367 >             long basis,
5368 >             LongBinaryOperator reducer) {
5369 >            super(p, b, i, f, t); this.nextRight = nextRight;
5370 >            this.transformer = transformer;
5371 >            this.basis = basis; this.reducer = reducer;
5372 >        }
5373 >        public final Long getRawResult() { return result; }
5374 >        public final void compute() {
5375 >            final ToLongFunction<? super K> transformer;
5376 >            final LongBinaryOperator reducer;
5377 >            if ((transformer = this.transformer) != null &&
5378 >                (reducer = this.reducer) != null) {
5379 >                long r = this.basis;
5380 >                for (int i = baseIndex, f, h; batch > 0 &&
5381 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5382 >                    addToPendingCount(1);
5383 >                    (rights = new MapReduceKeysToLongTask<K,V>
5384 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5385 >                      rights, transformer, r, reducer)).fork();
5386 >                }
5387 >                for (Node<K,V> p; (p = advance()) != null; )
5388 >                    r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key));
5389 >                result = r;
5390 >                CountedCompleter<?> c;
5391 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5392 >                    MapReduceKeysToLongTask<K,V>
5393 >                        t = (MapReduceKeysToLongTask<K,V>)c,
5394 >                        s = t.rights;
5395 >                    while (s != null) {
5396 >                        t.result = reducer.applyAsLong(t.result, s.result);
5397 >                        s = t.rights = s.nextRight;
5398 >                    }
5399 >                }
5400 >            }
5401 >        }
5402 >    }
5403 >
5404 >    static final class MapReduceValuesToLongTask<K,V>
5405 >        extends BulkTask<K,V,Long> {
5406 >        final ToLongFunction<? super V> transformer;
5407 >        final LongBinaryOperator reducer;
5408 >        final long basis;
5409 >        long result;
5410 >        MapReduceValuesToLongTask<K,V> rights, nextRight;
5411 >        MapReduceValuesToLongTask
5412 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5413 >             MapReduceValuesToLongTask<K,V> nextRight,
5414 >             ToLongFunction<? super V> transformer,
5415 >             long basis,
5416 >             LongBinaryOperator reducer) {
5417 >            super(p, b, i, f, t); this.nextRight = nextRight;
5418 >            this.transformer = transformer;
5419 >            this.basis = basis; this.reducer = reducer;
5420 >        }
5421 >        public final Long getRawResult() { return result; }
5422 >        public final void compute() {
5423 >            final ToLongFunction<? super V> transformer;
5424 >            final LongBinaryOperator reducer;
5425 >            if ((transformer = this.transformer) != null &&
5426 >                (reducer = this.reducer) != null) {
5427 >                long r = this.basis;
5428 >                for (int i = baseIndex, f, h; batch > 0 &&
5429 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5430 >                    addToPendingCount(1);
5431 >                    (rights = new MapReduceValuesToLongTask<K,V>
5432 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5433 >                      rights, transformer, r, reducer)).fork();
5434 >                }
5435 >                for (Node<K,V> p; (p = advance()) != null; )
5436 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
5437 >                result = r;
5438 >                CountedCompleter<?> c;
5439 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5440 >                    MapReduceValuesToLongTask<K,V>
5441 >                        t = (MapReduceValuesToLongTask<K,V>)c,
5442 >                        s = t.rights;
5443 >                    while (s != null) {
5444 >                        t.result = reducer.applyAsLong(t.result, s.result);
5445 >                        s = t.rights = s.nextRight;
5446 >                    }
5447 >                }
5448 >            }
5449 >        }
5450 >    }
5451 >
5452 >    static final class MapReduceEntriesToLongTask<K,V>
5453 >        extends BulkTask<K,V,Long> {
5454 >        final ToLongFunction<Map.Entry<K,V>> transformer;
5455 >        final LongBinaryOperator reducer;
5456 >        final long basis;
5457 >        long result;
5458 >        MapReduceEntriesToLongTask<K,V> rights, nextRight;
5459 >        MapReduceEntriesToLongTask
5460 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5461 >             MapReduceEntriesToLongTask<K,V> nextRight,
5462 >             ToLongFunction<Map.Entry<K,V>> transformer,
5463 >             long basis,
5464 >             LongBinaryOperator reducer) {
5465 >            super(p, b, i, f, t); this.nextRight = nextRight;
5466 >            this.transformer = transformer;
5467 >            this.basis = basis; this.reducer = reducer;
5468 >        }
5469 >        public final Long getRawResult() { return result; }
5470 >        public final void compute() {
5471 >            final ToLongFunction<Map.Entry<K,V>> transformer;
5472 >            final LongBinaryOperator reducer;
5473 >            if ((transformer = this.transformer) != null &&
5474 >                (reducer = this.reducer) != null) {
5475 >                long r = this.basis;
5476 >                for (int i = baseIndex, f, h; batch > 0 &&
5477 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5478 >                    addToPendingCount(1);
5479 >                    (rights = new MapReduceEntriesToLongTask<K,V>
5480 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5481 >                      rights, transformer, r, reducer)).fork();
5482 >                }
5483 >                for (Node<K,V> p; (p = advance()) != null; )
5484 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p));
5485 >                result = r;
5486 >                CountedCompleter<?> c;
5487 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5488 >                    MapReduceEntriesToLongTask<K,V>
5489 >                        t = (MapReduceEntriesToLongTask<K,V>)c,
5490 >                        s = t.rights;
5491 >                    while (s != null) {
5492 >                        t.result = reducer.applyAsLong(t.result, s.result);
5493 >                        s = t.rights = s.nextRight;
5494 >                    }
5495 >                }
5496 >            }
5497 >        }
5498 >    }
5499 >
5500 >    static final class MapReduceMappingsToLongTask<K,V>
5501 >        extends BulkTask<K,V,Long> {
5502 >        final ToLongBiFunction<? super K, ? super V> transformer;
5503 >        final LongBinaryOperator reducer;
5504 >        final long basis;
5505 >        long result;
5506 >        MapReduceMappingsToLongTask<K,V> rights, nextRight;
5507 >        MapReduceMappingsToLongTask
5508 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5509 >             MapReduceMappingsToLongTask<K,V> nextRight,
5510 >             ToLongBiFunction<? super K, ? super V> transformer,
5511 >             long basis,
5512 >             LongBinaryOperator reducer) {
5513 >            super(p, b, i, f, t); this.nextRight = nextRight;
5514 >            this.transformer = transformer;
5515 >            this.basis = basis; this.reducer = reducer;
5516 >        }
5517 >        public final Long getRawResult() { return result; }
5518 >        public final void compute() {
5519 >            final ToLongBiFunction<? super K, ? super V> transformer;
5520 >            final LongBinaryOperator reducer;
5521 >            if ((transformer = this.transformer) != null &&
5522 >                (reducer = this.reducer) != null) {
5523 >                long r = this.basis;
5524 >                for (int i = baseIndex, f, h; batch > 0 &&
5525 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5526 >                    addToPendingCount(1);
5527 >                    (rights = new MapReduceMappingsToLongTask<K,V>
5528 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5529 >                      rights, transformer, r, reducer)).fork();
5530 >                }
5531 >                for (Node<K,V> p; (p = advance()) != null; )
5532 >                    r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key, p.val));
5533 >                result = r;
5534 >                CountedCompleter<?> c;
5535 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5536 >                    MapReduceMappingsToLongTask<K,V>
5537 >                        t = (MapReduceMappingsToLongTask<K,V>)c,
5538 >                        s = t.rights;
5539 >                    while (s != null) {
5540 >                        t.result = reducer.applyAsLong(t.result, s.result);
5541 >                        s = t.rights = s.nextRight;
5542 >                    }
5543 >                }
5544 >            }
5545 >        }
5546 >    }
5547 >
5548 >    static final class MapReduceKeysToIntTask<K,V>
5549 >        extends BulkTask<K,V,Integer> {
5550 >        final ToIntFunction<? super K> transformer;
5551 >        final IntBinaryOperator reducer;
5552 >        final int basis;
5553 >        int result;
5554 >        MapReduceKeysToIntTask<K,V> rights, nextRight;
5555 >        MapReduceKeysToIntTask
5556 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5557 >             MapReduceKeysToIntTask<K,V> nextRight,
5558 >             ToIntFunction<? super K> transformer,
5559 >             int basis,
5560 >             IntBinaryOperator reducer) {
5561 >            super(p, b, i, f, t); this.nextRight = nextRight;
5562 >            this.transformer = transformer;
5563 >            this.basis = basis; this.reducer = reducer;
5564 >        }
5565 >        public final Integer getRawResult() { return result; }
5566 >        public final void compute() {
5567 >            final ToIntFunction<? super K> transformer;
5568 >            final IntBinaryOperator reducer;
5569 >            if ((transformer = this.transformer) != null &&
5570 >                (reducer = this.reducer) != null) {
5571 >                int r = this.basis;
5572 >                for (int i = baseIndex, f, h; batch > 0 &&
5573 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5574 >                    addToPendingCount(1);
5575 >                    (rights = new MapReduceKeysToIntTask<K,V>
5576 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5577 >                      rights, transformer, r, reducer)).fork();
5578 >                }
5579 >                for (Node<K,V> p; (p = advance()) != null; )
5580 >                    r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key));
5581 >                result = r;
5582 >                CountedCompleter<?> c;
5583 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5584 >                    MapReduceKeysToIntTask<K,V>
5585 >                        t = (MapReduceKeysToIntTask<K,V>)c,
5586 >                        s = t.rights;
5587 >                    while (s != null) {
5588 >                        t.result = reducer.applyAsInt(t.result, s.result);
5589 >                        s = t.rights = s.nextRight;
5590 >                    }
5591 >                }
5592 >            }
5593 >        }
5594 >    }
5595 >
5596 >    static final class MapReduceValuesToIntTask<K,V>
5597 >        extends BulkTask<K,V,Integer> {
5598 >        final ToIntFunction<? super V> transformer;
5599 >        final IntBinaryOperator reducer;
5600 >        final int basis;
5601 >        int result;
5602 >        MapReduceValuesToIntTask<K,V> rights, nextRight;
5603 >        MapReduceValuesToIntTask
5604 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5605 >             MapReduceValuesToIntTask<K,V> nextRight,
5606 >             ToIntFunction<? super V> transformer,
5607 >             int basis,
5608 >             IntBinaryOperator reducer) {
5609 >            super(p, b, i, f, t); this.nextRight = nextRight;
5610 >            this.transformer = transformer;
5611 >            this.basis = basis; this.reducer = reducer;
5612 >        }
5613 >        public final Integer getRawResult() { return result; }
5614 >        public final void compute() {
5615 >            final ToIntFunction<? super V> transformer;
5616 >            final IntBinaryOperator reducer;
5617 >            if ((transformer = this.transformer) != null &&
5618 >                (reducer = this.reducer) != null) {
5619 >                int r = this.basis;
5620 >                for (int i = baseIndex, f, h; batch > 0 &&
5621 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5622 >                    addToPendingCount(1);
5623 >                    (rights = new MapReduceValuesToIntTask<K,V>
5624 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5625 >                      rights, transformer, r, reducer)).fork();
5626 >                }
5627 >                for (Node<K,V> p; (p = advance()) != null; )
5628 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
5629 >                result = r;
5630 >                CountedCompleter<?> c;
5631 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5632 >                    MapReduceValuesToIntTask<K,V>
5633 >                        t = (MapReduceValuesToIntTask<K,V>)c,
5634 >                        s = t.rights;
5635 >                    while (s != null) {
5636 >                        t.result = reducer.applyAsInt(t.result, s.result);
5637 >                        s = t.rights = s.nextRight;
5638 >                    }
5639 >                }
5640 >            }
5641 >        }
5642 >    }
5643 >
5644 >    static final class MapReduceEntriesToIntTask<K,V>
5645 >        extends BulkTask<K,V,Integer> {
5646 >        final ToIntFunction<Map.Entry<K,V>> transformer;
5647 >        final IntBinaryOperator reducer;
5648 >        final int basis;
5649 >        int result;
5650 >        MapReduceEntriesToIntTask<K,V> rights, nextRight;
5651 >        MapReduceEntriesToIntTask
5652 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5653 >             MapReduceEntriesToIntTask<K,V> nextRight,
5654 >             ToIntFunction<Map.Entry<K,V>> transformer,
5655 >             int basis,
5656 >             IntBinaryOperator reducer) {
5657 >            super(p, b, i, f, t); this.nextRight = nextRight;
5658 >            this.transformer = transformer;
5659 >            this.basis = basis; this.reducer = reducer;
5660 >        }
5661 >        public final Integer getRawResult() { return result; }
5662 >        public final void compute() {
5663 >            final ToIntFunction<Map.Entry<K,V>> transformer;
5664 >            final IntBinaryOperator reducer;
5665 >            if ((transformer = this.transformer) != null &&
5666 >                (reducer = this.reducer) != null) {
5667 >                int r = this.basis;
5668 >                for (int i = baseIndex, f, h; batch > 0 &&
5669 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5670 >                    addToPendingCount(1);
5671 >                    (rights = new MapReduceEntriesToIntTask<K,V>
5672 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5673 >                      rights, transformer, r, reducer)).fork();
5674 >                }
5675 >                for (Node<K,V> p; (p = advance()) != null; )
5676 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p));
5677 >                result = r;
5678 >                CountedCompleter<?> c;
5679 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5680 >                    MapReduceEntriesToIntTask<K,V>
5681 >                        t = (MapReduceEntriesToIntTask<K,V>)c,
5682 >                        s = t.rights;
5683 >                    while (s != null) {
5684 >                        t.result = reducer.applyAsInt(t.result, s.result);
5685 >                        s = t.rights = s.nextRight;
5686 >                    }
5687 >                }
5688 >            }
5689 >        }
5690 >    }
5691 >
5692 >    static final class MapReduceMappingsToIntTask<K,V>
5693 >        extends BulkTask<K,V,Integer> {
5694 >        final ToIntBiFunction<? super K, ? super V> transformer;
5695 >        final IntBinaryOperator reducer;
5696 >        final int basis;
5697 >        int result;
5698 >        MapReduceMappingsToIntTask<K,V> rights, nextRight;
5699 >        MapReduceMappingsToIntTask
5700 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5701 >             MapReduceMappingsToIntTask<K,V> nextRight,
5702 >             ToIntBiFunction<? super K, ? super V> transformer,
5703 >             int basis,
5704 >             IntBinaryOperator reducer) {
5705 >            super(p, b, i, f, t); this.nextRight = nextRight;
5706 >            this.transformer = transformer;
5707 >            this.basis = basis; this.reducer = reducer;
5708 >        }
5709 >        public final Integer getRawResult() { return result; }
5710 >        public final void compute() {
5711 >            final ToIntBiFunction<? super K, ? super V> transformer;
5712 >            final IntBinaryOperator reducer;
5713 >            if ((transformer = this.transformer) != null &&
5714 >                (reducer = this.reducer) != null) {
5715 >                int r = this.basis;
5716 >                for (int i = baseIndex, f, h; batch > 0 &&
5717 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5718 >                    addToPendingCount(1);
5719 >                    (rights = new MapReduceMappingsToIntTask<K,V>
5720 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5721 >                      rights, transformer, r, reducer)).fork();
5722 >                }
5723 >                for (Node<K,V> p; (p = advance()) != null; )
5724 >                    r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key, p.val));
5725 >                result = r;
5726 >                CountedCompleter<?> c;
5727 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5728 >                    MapReduceMappingsToIntTask<K,V>
5729 >                        t = (MapReduceMappingsToIntTask<K,V>)c,
5730 >                        s = t.rights;
5731 >                    while (s != null) {
5732 >                        t.result = reducer.applyAsInt(t.result, s.result);
5733 >                        s = t.rights = s.nextRight;
5734 >                    }
5735 >                }
5736 >            }
5737          }
5738      }
5739  
5740      // Unsafe mechanics
5741 <    private static final sun.misc.Unsafe UNSAFE;
5742 <    private static final long SBASE;
5743 <    private static final int SSHIFT;
5744 <    private static final long TBASE;
5745 <    private static final int TSHIFT;
5741 >    private static final sun.misc.Unsafe U;
5742 >    private static final long SIZECTL;
5743 >    private static final long TRANSFERINDEX;
5744 >    private static final long TRANSFERORIGIN;
5745 >    private static final long BASECOUNT;
5746 >    private static final long CELLSBUSY;
5747 >    private static final long CELLVALUE;
5748 >    private static final long ABASE;
5749 >    private static final int ASHIFT;
5750  
5751      static {
1474        int ss, ts;
5752          try {
5753 <            UNSAFE = sun.misc.Unsafe.getUnsafe();
5754 <            Class tc = HashEntry[].class;
5755 <            Class sc = Segment[].class;
5756 <            TBASE = UNSAFE.arrayBaseOffset(tc);
5757 <            SBASE = UNSAFE.arrayBaseOffset(sc);
5758 <            ts = UNSAFE.arrayIndexScale(tc);
5759 <            ss = UNSAFE.arrayIndexScale(sc);
5753 >            U = sun.misc.Unsafe.getUnsafe();
5754 >            Class<?> k = ConcurrentHashMap.class;
5755 >            SIZECTL = U.objectFieldOffset
5756 >                (k.getDeclaredField("sizeCtl"));
5757 >            TRANSFERINDEX = U.objectFieldOffset
5758 >                (k.getDeclaredField("transferIndex"));
5759 >            TRANSFERORIGIN = U.objectFieldOffset
5760 >                (k.getDeclaredField("transferOrigin"));
5761 >            BASECOUNT = U.objectFieldOffset
5762 >                (k.getDeclaredField("baseCount"));
5763 >            CELLSBUSY = U.objectFieldOffset
5764 >                (k.getDeclaredField("cellsBusy"));
5765 >            Class<?> ck = Cell.class;
5766 >            CELLVALUE = U.objectFieldOffset
5767 >                (ck.getDeclaredField("value"));
5768 >            Class<?> sc = Node[].class;
5769 >            ABASE = U.arrayBaseOffset(sc);
5770 >            int scale = U.arrayIndexScale(sc);
5771 >            if ((scale & (scale - 1)) != 0)
5772 >                throw new Error("data type scale not a power of two");
5773 >            ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
5774          } catch (Exception e) {
5775              throw new Error(e);
5776          }
1486        if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
1487            throw new Error("data type scale not a power of two");
1488        SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
1489        TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
5777      }
1491
5778   }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines