ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
(Generate patch)

Comparing jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java (file contents):
Revision 1.110 by jsr166, Wed Apr 27 14:06:30 2011 UTC vs.
Revision 1.222 by dl, Mon Jun 17 18:53:58 2013 UTC

# Line 5 | Line 5
5   */
6  
7   package java.util.concurrent;
8 < import java.util.concurrent.locks.*;
9 < import java.util.*;
8 >
9 > import java.io.ObjectStreamField;
10   import java.io.Serializable;
11 < import java.io.IOException;
12 < import java.io.ObjectInputStream;
13 < import java.io.ObjectOutputStream;
11 > import java.lang.reflect.ParameterizedType;
12 > import java.lang.reflect.Type;
13 > import java.util.Arrays;
14 > import java.util.Collection;
15 > import java.util.Comparator;
16 > import java.util.ConcurrentModificationException;
17 > import java.util.Enumeration;
18 > import java.util.HashMap;
19 > import java.util.Hashtable;
20 > import java.util.Iterator;
21 > import java.util.Map;
22 > import java.util.NoSuchElementException;
23 > import java.util.Set;
24 > import java.util.Spliterator;
25 > import java.util.concurrent.ConcurrentMap;
26 > import java.util.concurrent.ForkJoinPool;
27 > import java.util.concurrent.atomic.AtomicReference;
28 > import java.util.concurrent.locks.LockSupport;
29 > import java.util.concurrent.locks.ReentrantLock;
30 > import java.util.function.BiConsumer;
31 > import java.util.function.BiFunction;
32 > import java.util.function.BinaryOperator;
33 > import java.util.function.Consumer;
34 > import java.util.function.DoubleBinaryOperator;
35 > import java.util.function.Function;
36 > import java.util.function.IntBinaryOperator;
37 > import java.util.function.LongBinaryOperator;
38 > import java.util.function.ToDoubleBiFunction;
39 > import java.util.function.ToDoubleFunction;
40 > import java.util.function.ToIntBiFunction;
41 > import java.util.function.ToIntFunction;
42 > import java.util.function.ToLongBiFunction;
43 > import java.util.function.ToLongFunction;
44 > import java.util.stream.Stream;
45  
46   /**
47   * A hash table supporting full concurrency of retrievals and
48 < * adjustable expected concurrency for updates. This class obeys the
48 > * high expected concurrency for updates. This class obeys the
49   * same functional specification as {@link java.util.Hashtable}, and
50   * includes versions of methods corresponding to each method of
51 < * <tt>Hashtable</tt>. However, even though all operations are
51 > * {@code Hashtable}. However, even though all operations are
52   * thread-safe, retrieval operations do <em>not</em> entail locking,
53   * and there is <em>not</em> any support for locking the entire table
54   * in a way that prevents all access.  This class is fully
55 < * interoperable with <tt>Hashtable</tt> in programs that rely on its
55 > * interoperable with {@code Hashtable} in programs that rely on its
56   * thread safety but not on its synchronization details.
57   *
58 < * <p> Retrieval operations (including <tt>get</tt>) generally do not
59 < * block, so may overlap with update operations (including
60 < * <tt>put</tt> and <tt>remove</tt>). Retrievals reflect the results
61 < * of the most recently <em>completed</em> update operations holding
62 < * upon their onset.  For aggregate operations such as <tt>putAll</tt>
63 < * and <tt>clear</tt>, concurrent retrievals may reflect insertion or
64 < * removal of only some entries.  Similarly, Iterators and
65 < * Enumerations return elements reflecting the state of the hash table
66 < * at some point at or since the creation of the iterator/enumeration.
67 < * They do <em>not</em> throw {@link ConcurrentModificationException}.
68 < * However, iterators are designed to be used by only one thread at a time.
69 < *
70 < * <p> The allowed concurrency among update operations is guided by
71 < * the optional <tt>concurrencyLevel</tt> constructor argument
72 < * (default <tt>16</tt>), which is used as a hint for internal sizing.  The
73 < * table is internally partitioned to try to permit the indicated
74 < * number of concurrent updates without contention. Because placement
75 < * in hash tables is essentially random, the actual concurrency will
76 < * vary.  Ideally, you should choose a value to accommodate as many
77 < * threads as will ever concurrently modify the table. Using a
78 < * significantly higher value than you need can waste space and time,
79 < * and a significantly lower value can lead to thread contention. But
80 < * overestimates and underestimates within an order of magnitude do
81 < * not usually have much noticeable impact. A value of one is
82 < * appropriate when it is known that only one thread will modify and
83 < * all others will only read. Also, resizing this or any other kind of
84 < * hash table is a relatively slow operation, so, when possible, it is
85 < * a good idea to provide estimates of expected table sizes in
86 < * constructors.
58 > * <p>Retrieval operations (including {@code get}) generally do not
59 > * block, so may overlap with update operations (including {@code put}
60 > * and {@code remove}). Retrievals reflect the results of the most
61 > * recently <em>completed</em> update operations holding upon their
62 > * onset. (More formally, an update operation for a given key bears a
63 > * <em>happens-before</em> relation with any (non-null) retrieval for
64 > * that key reporting the updated value.)  For aggregate operations
65 > * such as {@code putAll} and {@code clear}, concurrent retrievals may
66 > * reflect insertion or removal of only some entries.  Similarly,
67 > * Iterators and Enumerations return elements reflecting the state of
68 > * the hash table at some point at or since the creation of the
69 > * iterator/enumeration.  They do <em>not</em> throw {@link
70 > * ConcurrentModificationException}.  However, iterators are designed
71 > * to be used by only one thread at a time.  Bear in mind that the
72 > * results of aggregate status methods including {@code size}, {@code
73 > * isEmpty}, and {@code containsValue} are typically useful only when
74 > * a map is not undergoing concurrent updates in other threads.
75 > * Otherwise the results of these methods reflect transient states
76 > * that may be adequate for monitoring or estimation purposes, but not
77 > * for program control.
78 > *
79 > * <p>The table is dynamically expanded when there are too many
80 > * collisions (i.e., keys that have distinct hash codes but fall into
81 > * the same slot modulo the table size), with the expected average
82 > * effect of maintaining roughly two bins per mapping (corresponding
83 > * to a 0.75 load factor threshold for resizing). There may be much
84 > * variance around this average as mappings are added and removed, but
85 > * overall, this maintains a commonly accepted time/space tradeoff for
86 > * hash tables.  However, resizing this or any other kind of hash
87 > * table may be a relatively slow operation. When possible, it is a
88 > * good idea to provide a size estimate as an optional {@code
89 > * initialCapacity} constructor argument. An additional optional
90 > * {@code loadFactor} constructor argument provides a further means of
91 > * customizing initial table capacity by specifying the table density
92 > * to be used in calculating the amount of space to allocate for the
93 > * given number of elements.  Also, for compatibility with previous
94 > * versions of this class, constructors may optionally specify an
95 > * expected {@code concurrencyLevel} as an additional hint for
96 > * internal sizing.  Note that using many keys with exactly the same
97 > * {@code hashCode()} is a sure way to slow down performance of any
98 > * hash table. To ameliorate impact, when keys are {@link Comparable},
99 > * this class may use comparison order among keys to help break ties.
100 > *
101 > * <p>A {@link Set} projection of a ConcurrentHashMap may be created
102 > * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
103 > * (using {@link #keySet(Object)} when only keys are of interest, and the
104 > * mapped values are (perhaps transiently) not used or all take the
105 > * same mapping value.
106 > *
107 > * <p>A ConcurrentHashMap can be used as scalable frequency map (a
108 > * form of histogram or multiset) by using {@link
109 > * java.util.concurrent.atomic.LongAdder} values and initializing via
110 > * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
111 > * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
112 > * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();}
113   *
114   * <p>This class and its views and iterators implement all of the
115   * <em>optional</em> methods of the {@link Map} and {@link Iterator}
116   * interfaces.
117   *
118 < * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
119 < * does <em>not</em> allow <tt>null</tt> to be used as a key or value.
118 > * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
119 > * does <em>not</em> allow {@code null} to be used as a key or value.
120 > *
121 > * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
122 > * operations that, unlike most {@link Stream} methods, are designed
123 > * to be safely, and often sensibly, applied even with maps that are
124 > * being concurrently updated by other threads; for example, when
125 > * computing a snapshot summary of the values in a shared registry.
126 > * There are three kinds of operation, each with four forms, accepting
127 > * functions with Keys, Values, Entries, and (Key, Value) arguments
128 > * and/or return values. Because the elements of a ConcurrentHashMap
129 > * are not ordered in any particular way, and may be processed in
130 > * different orders in different parallel executions, the correctness
131 > * of supplied functions should not depend on any ordering, or on any
132 > * other objects or values that may transiently change while
133 > * computation is in progress; and except for forEach actions, should
134 > * ideally be side-effect-free. Bulk operations on {@link Map.Entry}
135 > * objects do not support method {@code setValue}.
136 > *
137 > * <ul>
138 > * <li> forEach: Perform a given action on each element.
139 > * A variant form applies a given transformation on each element
140 > * before performing the action.</li>
141 > *
142 > * <li> search: Return the first available non-null result of
143 > * applying a given function on each element; skipping further
144 > * search when a result is found.</li>
145 > *
146 > * <li> reduce: Accumulate each element.  The supplied reduction
147 > * function cannot rely on ordering (more formally, it should be
148 > * both associative and commutative).  There are five variants:
149 > *
150 > * <ul>
151 > *
152 > * <li> Plain reductions. (There is not a form of this method for
153 > * (key, value) function arguments since there is no corresponding
154 > * return type.)</li>
155 > *
156 > * <li> Mapped reductions that accumulate the results of a given
157 > * function applied to each element.</li>
158 > *
159 > * <li> Reductions to scalar doubles, longs, and ints, using a
160 > * given basis value.</li>
161 > *
162 > * </ul>
163 > * </li>
164 > * </ul>
165 > *
166 > * <p>These bulk operations accept a {@code parallelismThreshold}
167 > * argument. Methods proceed sequentially if the current map size is
168 > * estimated to be less than the given threshold. Using a value of
169 > * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
170 > * of {@code 1} results in maximal parallelism by partitioning into
171 > * enough subtasks to fully utilize the {@link
172 > * ForkJoinPool#commonPool()} that is used for all parallel
173 > * computations. Normally, you would initially choose one of these
174 > * extreme values, and then measure performance of using in-between
175 > * values that trade off overhead versus throughput.
176 > *
177 > * <p>The concurrency properties of bulk operations follow
178 > * from those of ConcurrentHashMap: Any non-null result returned
179 > * from {@code get(key)} and related access methods bears a
180 > * happens-before relation with the associated insertion or
181 > * update.  The result of any bulk operation reflects the
182 > * composition of these per-element relations (but is not
183 > * necessarily atomic with respect to the map as a whole unless it
184 > * is somehow known to be quiescent).  Conversely, because keys
185 > * and values in the map are never null, null serves as a reliable
186 > * atomic indicator of the current lack of any result.  To
187 > * maintain this property, null serves as an implicit basis for
188 > * all non-scalar reduction operations. For the double, long, and
189 > * int versions, the basis should be one that, when combined with
190 > * any other value, returns that other value (more formally, it
191 > * should be the identity element for the reduction). Most common
192 > * reductions have these properties; for example, computing a sum
193 > * with basis 0 or a minimum with basis MAX_VALUE.
194 > *
195 > * <p>Search and transformation functions provided as arguments
196 > * should similarly return null to indicate the lack of any result
197 > * (in which case it is not used). In the case of mapped
198 > * reductions, this also enables transformations to serve as
199 > * filters, returning null (or, in the case of primitive
200 > * specializations, the identity basis) if the element should not
201 > * be combined. You can create compound transformations and
202 > * filterings by composing them yourself under this "null means
203 > * there is nothing there now" rule before using them in search or
204 > * reduce operations.
205 > *
206 > * <p>Methods accepting and/or returning Entry arguments maintain
207 > * key-value associations. They may be useful for example when
208 > * finding the key for the greatest value. Note that "plain" Entry
209 > * arguments can be supplied using {@code new
210 > * AbstractMap.SimpleEntry(k,v)}.
211 > *
212 > * <p>Bulk operations may complete abruptly, throwing an
213 > * exception encountered in the application of a supplied
214 > * function. Bear in mind when handling such exceptions that other
215 > * concurrently executing functions could also have thrown
216 > * exceptions, or would have done so if the first exception had
217 > * not occurred.
218 > *
219 > * <p>Speedups for parallel compared to sequential forms are common
220 > * but not guaranteed.  Parallel operations involving brief functions
221 > * on small maps may execute more slowly than sequential forms if the
222 > * underlying work to parallelize the computation is more expensive
223 > * than the computation itself.  Similarly, parallelization may not
224 > * lead to much actual parallelism if all processors are busy
225 > * performing unrelated tasks.
226 > *
227 > * <p>All arguments to all task methods must be non-null.
228   *
229   * <p>This class is a member of the
230   * <a href="{@docRoot}/../technotes/guides/collections/index.html">
# Line 70 | Line 235 | import java.io.ObjectOutputStream;
235   * @param <K> the type of keys maintained by this map
236   * @param <V> the type of mapped values
237   */
238 < public class ConcurrentHashMap<K, V> extends AbstractMap<K, V>
74 <        implements ConcurrentMap<K, V>, Serializable {
238 > public class ConcurrentHashMap<K,V> implements ConcurrentMap<K,V>, Serializable {
239      private static final long serialVersionUID = 7249069246763182397L;
240  
241      /*
242 <     * The basic strategy is to subdivide the table among Segments,
243 <     * each of which itself is a concurrently readable hash table.  To
244 <     * reduce footprint, all but one segments are constructed only
245 <     * when first needed (see ensureSegment). To maintain visibility
246 <     * in the presence of lazy construction, accesses to segments as
247 <     * well as elements of segment's table must use volatile access,
248 <     * which is done via Unsafe within methods segmentAt etc
249 <     * below. These provide the functionality of AtomicReferenceArrays
250 <     * but reduce the levels of indirection. Additionally,
251 <     * volatile-writes of table elements and entry "next" fields
252 <     * within locked operations use the cheaper "lazySet" forms of
253 <     * writes (via putOrderedObject) because these writes are always
254 <     * followed by lock releases that maintain sequential consistency
255 <     * of table updates.
256 <     *
257 <     * Historical note: The previous version of this class relied
258 <     * heavily on "final" fields, which avoided some volatile reads at
259 <     * the expense of a large initial footprint.  Some remnants of
260 <     * that design (including forced construction of segment 0) exist
261 <     * to ensure serialization compatibility.
242 >     * Overview:
243 >     *
244 >     * The primary design goal of this hash table is to maintain
245 >     * concurrent readability (typically method get(), but also
246 >     * iterators and related methods) while minimizing update
247 >     * contention. Secondary goals are to keep space consumption about
248 >     * the same or better than java.util.HashMap, and to support high
249 >     * initial insertion rates on an empty table by many threads.
250 >     *
251 >     * This map usually acts as a binned (bucketed) hash table.
252 >     * Each key-value mapping is held in a Node.  Most nodes are
253 >     * instances of the basic Node class with hash, key, value, and
254 >     * next fields. However, various subclasses exist: TreeNodes are
255 >     * arranged in balanced trees, not lists.  TreeBins hold the roots
256 >     * of sets of TreeNodes. ForwardingNodes are placed at the heads
257 >     * of bins during resizing. ReservationNodes are used as
258 >     * placeholders while establishing values in computeIfAbsent and
259 >     * related methods.  The three type TreeBin, ForwardingNode, and
260 >     * ReservationNode do not hold normal user keys, values, or
261 >     * hashes, and are readily distinguishable during search etc
262 >     * because they have negative hash fields and null key and value
263 >     * fields. (These special nodes are either uncommon or transient,
264 >     * so the impact of carrying around some unused fields is
265 >     * insignficant.)
266 >     *
267 >     * The table is lazily initialized to a power-of-two size upon the
268 >     * first insertion.  Each bin in the table normally contains a
269 >     * list of Nodes (most often, the list has only zero or one Node).
270 >     * Table accesses require volatile/atomic reads, writes, and
271 >     * CASes.  Because there is no other way to arrange this without
272 >     * adding further indirections, we use intrinsics
273 >     * (sun.misc.Unsafe) operations.
274 >     *
275 >     * We use the top (sign) bit of Node hash fields for control
276 >     * purposes -- it is available anyway because of addressing
277 >     * constraints.  Nodes with negative hash fields are specially
278 >     * handled or ignored in map methods.
279 >     *
280 >     * Insertion (via put or its variants) of the first node in an
281 >     * empty bin is performed by just CASing it to the bin.  This is
282 >     * by far the most common case for put operations under most
283 >     * key/hash distributions.  Other update operations (insert,
284 >     * delete, and replace) require locks.  We do not want to waste
285 >     * the space required to associate a distinct lock object with
286 >     * each bin, so instead use the first node of a bin list itself as
287 >     * a lock. Locking support for these locks relies on builtin
288 >     * "synchronized" monitors.
289 >     *
290 >     * Using the first node of a list as a lock does not by itself
291 >     * suffice though: When a node is locked, any update must first
292 >     * validate that it is still the first node after locking it, and
293 >     * retry if not. Because new nodes are always appended to lists,
294 >     * once a node is first in a bin, it remains first until deleted
295 >     * or the bin becomes invalidated (upon resizing).
296 >     *
297 >     * The main disadvantage of per-bin locks is that other update
298 >     * operations on other nodes in a bin list protected by the same
299 >     * lock can stall, for example when user equals() or mapping
300 >     * functions take a long time.  However, statistically, under
301 >     * random hash codes, this is not a common problem.  Ideally, the
302 >     * frequency of nodes in bins follows a Poisson distribution
303 >     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
304 >     * parameter of about 0.5 on average, given the resizing threshold
305 >     * of 0.75, although with a large variance because of resizing
306 >     * granularity. Ignoring variance, the expected occurrences of
307 >     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
308 >     * first values are:
309 >     *
310 >     * 0:    0.60653066
311 >     * 1:    0.30326533
312 >     * 2:    0.07581633
313 >     * 3:    0.01263606
314 >     * 4:    0.00157952
315 >     * 5:    0.00015795
316 >     * 6:    0.00001316
317 >     * 7:    0.00000094
318 >     * 8:    0.00000006
319 >     * more: less than 1 in ten million
320 >     *
321 >     * Lock contention probability for two threads accessing distinct
322 >     * elements is roughly 1 / (8 * #elements) under random hashes.
323 >     *
324 >     * Actual hash code distributions encountered in practice
325 >     * sometimes deviate significantly from uniform randomness.  This
326 >     * includes the case when N > (1<<30), so some keys MUST collide.
327 >     * Similarly for dumb or hostile usages in which multiple keys are
328 >     * designed to have identical hash codes or ones that differs only
329 >     * in high bits. So we use a secondary strategy that applies when
330 >     * the number of nodes in a bin exceeds a threshold. These
331 >     * TreeBins use a balanced tree to hold nodes (a specialized form
332 >     * of red-black trees), bounding search time to O(log N).  Each
333 >     * search step in a TreeBin is at least twice as slow as in a
334 >     * regular list, but given that N cannot exceed (1<<64) (before
335 >     * running out of addresses) this bounds search steps, lock hold
336 >     * times, etc, to reasonable constants (roughly 100 nodes
337 >     * inspected per operation worst case) so long as keys are
338 >     * Comparable (which is very common -- String, Long, etc).
339 >     * TreeBin nodes (TreeNodes) also maintain the same "next"
340 >     * traversal pointers as regular nodes, so can be traversed in
341 >     * iterators in the same way.
342 >     *
343 >     * The table is resized when occupancy exceeds a percentage
344 >     * threshold (nominally, 0.75, but see below).  Any thread
345 >     * noticing an overfull bin may assist in resizing after the
346 >     * initiating thread allocates and sets up the replacement
347 >     * array. However, rather than stalling, these other threads may
348 >     * proceed with insertions etc.  The use of TreeBins shields us
349 >     * from the worst case effects of overfilling while resizes are in
350 >     * progress.  Resizing proceeds by transferring bins, one by one,
351 >     * from the table to the next table. To enable concurrency, the
352 >     * next table must be (incrementally) prefilled with place-holders
353 >     * serving as reverse forwarders to the old table.  Because we are
354 >     * using power-of-two expansion, the elements from each bin must
355 >     * either stay at same index, or move with a power of two
356 >     * offset. We eliminate unnecessary node creation by catching
357 >     * cases where old nodes can be reused because their next fields
358 >     * won't change.  On average, only about one-sixth of them need
359 >     * cloning when a table doubles. The nodes they replace will be
360 >     * garbage collectable as soon as they are no longer referenced by
361 >     * any reader thread that may be in the midst of concurrently
362 >     * traversing table.  Upon transfer, the old table bin contains
363 >     * only a special forwarding node (with hash field "MOVED") that
364 >     * contains the next table as its key. On encountering a
365 >     * forwarding node, access and update operations restart, using
366 >     * the new table.
367 >     *
368 >     * Each bin transfer requires its bin lock, which can stall
369 >     * waiting for locks while resizing. However, because other
370 >     * threads can join in and help resize rather than contend for
371 >     * locks, average aggregate waits become shorter as resizing
372 >     * progresses.  The transfer operation must also ensure that all
373 >     * accessible bins in both the old and new table are usable by any
374 >     * traversal.  This is arranged by proceeding from the last bin
375 >     * (table.length - 1) up towards the first.  Upon seeing a
376 >     * forwarding node, traversals (see class Traverser) arrange to
377 >     * move to the new table without revisiting nodes.  However, to
378 >     * ensure that no intervening nodes are skipped, bin splitting can
379 >     * only begin after the associated reverse-forwarders are in
380 >     * place.
381 >     *
382 >     * The traversal scheme also applies to partial traversals of
383 >     * ranges of bins (via an alternate Traverser constructor)
384 >     * to support partitioned aggregate operations.  Also, read-only
385 >     * operations give up if ever forwarded to a null table, which
386 >     * provides support for shutdown-style clearing, which is also not
387 >     * currently implemented.
388 >     *
389 >     * Lazy table initialization minimizes footprint until first use,
390 >     * and also avoids resizings when the first operation is from a
391 >     * putAll, constructor with map argument, or deserialization.
392 >     * These cases attempt to override the initial capacity settings,
393 >     * but harmlessly fail to take effect in cases of races.
394 >     *
395 >     * The element count is maintained using a specialization of
396 >     * LongAdder. We need to incorporate a specialization rather than
397 >     * just use a LongAdder in order to access implicit
398 >     * contention-sensing that leads to creation of multiple
399 >     * CounterCells.  The counter mechanics avoid contention on
400 >     * updates but can encounter cache thrashing if read too
401 >     * frequently during concurrent access. To avoid reading so often,
402 >     * resizing under contention is attempted only upon adding to a
403 >     * bin already holding two or more nodes. Under uniform hash
404 >     * distributions, the probability of this occurring at threshold
405 >     * is around 13%, meaning that only about 1 in 8 puts check
406 >     * threshold (and after resizing, many fewer do so).
407 >     *
408 >     * TreeBins use a special form of comparison for search and
409 >     * related operations (which is the main reason we cannot use
410 >     * existing collections such as TreeMaps). TreeBins contain
411 >     * Comparable elements, but may contain others, as well as
412 >     * elements that are Comparable but not necessarily Comparable
413 >     * for the same T, so we cannot invoke compareTo among them. To
414 >     * handle this, the tree is ordered primarily by hash value, then
415 >     * by Comparable.compareTo order if applicable.  On lookup at a
416 >     * node, if elements are not comparable or compare as 0 then both
417 >     * left and right children may need to be searched in the case of
418 >     * tied hash values. (This corresponds to the full list search
419 >     * that would be necessary if all elements were non-Comparable and
420 >     * had tied hashes.)  The red-black balancing code is updated from
421 >     * pre-jdk-collections
422 >     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
423 >     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
424 >     * Algorithms" (CLR).
425 >     *
426 >     * TreeBins also require an additional locking mechanism.  While
427 >     * list traversal is always possible by readers evern during
428 >     * updates, tree traversal is not, mainly beause of tree-rotations
429 >     * that may change the root node and/or its linkages.  TreeBins
430 >     * include a simple read-write lock mechanism parasitic on the
431 >     * main bin-synchronization strategy: Structural adjustments
432 >     * associated with an insertion or removal are already bin-locked
433 >     * (and so cannot conflict with other writers) but must wait for
434 >     * ongoing readers to finish. Since there can be only one such
435 >     * waiter, we use a simple scheme using a single "waiter" field to
436 >     * block writers.  However, readers need never block.  If the root
437 >     * lock is held, they proceed along the slow traversal path (via
438 >     * next-pointers) until the lock becomes available or the list is
439 >     * exhausted, whichever comes first. These cases are not fast, but
440 >     * maximize aggregate expected throughput.
441 >     *
442 >     * Maintaining API and serialization compatibility with previous
443 >     * versions of this class introduces several oddities. Mainly: We
444 >     * leave untouched but unused constructor arguments refering to
445 >     * concurrencyLevel. We accept a loadFactor constructor argument,
446 >     * but apply it only to initial table capacity (which is the only
447 >     * time that we can guarantee to honor it.) We also declare an
448 >     * unused "Segment" class that is instantiated in minimal form
449 >     * only when serializing.
450 >     *
451 >     * This file is organized to make things a little easier to follow
452 >     * while reading than they might otherwise:. First the main static
453 >     * declarations and utilities, then fields, then main public
454 >     * methods (with a few factorings of multiple public methods into
455 >     * internal ones), then sizing methods, trees, traversers, and
456 >     * bulk operations.
457       */
458  
459      /* ---------------- Constants -------------- */
460  
461      /**
462 <     * The default initial capacity for this table,
463 <     * used when not otherwise specified in a constructor.
462 >     * The largest possible table capacity.  This value must be
463 >     * exactly 1<<30 to stay within Java array allocation and indexing
464 >     * bounds for power of two table sizes, and is further required
465 >     * because the top two bits of 32bit hash fields are used for
466 >     * control purposes.
467       */
468 <    static final int DEFAULT_INITIAL_CAPACITY = 16;
468 >    private static final int MAXIMUM_CAPACITY = 1 << 30;
469  
470      /**
471 <     * The default load factor for this table, used when not
472 <     * otherwise specified in a constructor.
471 >     * The default initial table capacity.  Must be a power of 2
472 >     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
473       */
474 <    static final float DEFAULT_LOAD_FACTOR = 0.75f;
474 >    private static final int DEFAULT_CAPACITY = 16;
475  
476      /**
477 <     * The default concurrency level for this table, used when not
478 <     * otherwise specified in a constructor.
477 >     * The largest possible (non-power of two) array size.
478 >     * Needed by toArray and related methods.
479       */
480 <    static final int DEFAULT_CONCURRENCY_LEVEL = 16;
480 >    static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
481  
482      /**
483 <     * The maximum capacity, used if a higher value is implicitly
484 <     * specified by either of the constructors with arguments.  MUST
123 <     * be a power of two <= 1<<30 to ensure that entries are indexable
124 <     * using ints.
483 >     * The default concurrency level for this table. Unused but
484 >     * defined for compatibility with previous versions of this class.
485       */
486 <    static final int MAXIMUM_CAPACITY = 1 << 30;
486 >    private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
487  
488      /**
489 <     * The minimum capacity for per-segment tables.  Must be a power
490 <     * of two, at least two to avoid immediate resizing on next use
491 <     * after lazy construction.
489 >     * The load factor for this table. Overrides of this value in
490 >     * constructors affect only the initial table capacity.  The
491 >     * actual floating point value isn't normally used -- it is
492 >     * simpler to use expressions such as {@code n - (n >>> 2)} for
493 >     * the associated resizing threshold.
494       */
495 <    static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
495 >    private static final float LOAD_FACTOR = 0.75f;
496  
497      /**
498 <     * The maximum number of segments to allow; used to bound
499 <     * constructor arguments. Must be power of two less than 1 << 24.
498 >     * The bin count threshold for using a tree rather than list for a
499 >     * bin.  Bins are converted to trees when adding an element to a
500 >     * bin with at least this many nodes. The value should be at least
501 >     * 8 to mesh with assumptions in tree removal about conversion
502 >     * back to plain bins upon shrinkage.
503       */
504 <    static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
504 >    static final int TREEIFY_THRESHOLD = 8;
505  
506      /**
507 <     * Number of unsynchronized retries in size and containsValue
508 <     * methods before resorting to locking. This is used to avoid
509 <     * unbounded retries if tables undergo continuous modification
145 <     * which would make it impossible to obtain an accurate result.
507 >     * The bin count threshold for untreeifying a (split) bin during a
508 >     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
509 >     * most 6 to mesh with shrinkage detection under removal.
510       */
511 <    static final int RETRIES_BEFORE_LOCK = 2;
148 <
149 <    /* ---------------- Fields -------------- */
511 >    static final int UNTREEIFY_THRESHOLD = 6;
512  
513      /**
514 <     * Mask value for indexing into segments. The upper bits of a
515 <     * key's hash code are used to choose the segment.
514 >     * The smallest table capacity for which bins may be treeified.
515 >     * (Otherwise the table is resized if too many nodes in a bin.)
516 >     * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
517 >     * between resizing and treeification thresholds.
518       */
519 <    final int segmentMask;
519 >    static final int MIN_TREEIFY_CAPACITY = 64;
520  
521      /**
522 <     * Shift value for indexing within segments.
522 >     * Minimum number of rebinnings per transfer step. Ranges are
523 >     * subdivided to allow multiple resizer threads.  This value
524 >     * serves as a lower bound to avoid resizers encountering
525 >     * excessive memory contention.  The value should be at least
526 >     * DEFAULT_CAPACITY.
527       */
528 <    final int segmentShift;
528 >    private static final int MIN_TRANSFER_STRIDE = 16;
529  
530 <    /**
531 <     * The segments, each of which is a specialized hash table.
530 >    /*
531 >     * Encodings for Node hash fields. See above for explanation.
532       */
533 <    final Segment<K,V>[] segments;
534 <
535 <    transient Set<K> keySet;
536 <    transient Set<Map.Entry<K,V>> entrySet;
537 <    transient Collection<V> values;
538 <
539 <    /**
540 <     * ConcurrentHashMap list entry. Note that this is never exported
541 <     * out as a user-visible Map.Entry.
533 >    static final int MOVED     = 0x8fffffff; // (-1) hash for forwarding nodes
534 >    static final int TREEBIN   = 0x80000000; // hash for heads of treea
535 >    static final int RESERVED  = 0x80000001; // hash for transient reservations
536 >    static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
537 >
538 >    /** Number of CPUS, to place bounds on some sizings */
539 >    static final int NCPU = Runtime.getRuntime().availableProcessors();
540 >
541 >    /** For serialization compatibility. */
542 >    private static final ObjectStreamField[] serialPersistentFields = {
543 >        new ObjectStreamField("segments", Segment[].class),
544 >        new ObjectStreamField("segmentMask", Integer.TYPE),
545 >        new ObjectStreamField("segmentShift", Integer.TYPE)
546 >    };
547 >
548 >    /* ---------------- Nodes -------------- */
549 >
550 >    /**
551 >     * Key-value entry.  This class is never exported out as a
552 >     * user-mutable Map.Entry (i.e., one supporting setValue; see
553 >     * MapEntry below), but can be used for read-only traversals used
554 >     * in bulk tasks.  Subclasses of Node with a hash field of MOVED are special,
555 >     * and contain null keys and values (but are never exported).
556 >     * Otherwise, keys and vals are never null.
557       */
558 <    static final class HashEntry<K,V> {
558 >    static class Node<K,V> implements Map.Entry<K,V> {
559          final int hash;
560          final K key;
561 <        volatile V value;
562 <        volatile HashEntry<K,V> next;
561 >        volatile V val;
562 >        Node<K,V> next;
563  
564 <        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
564 >        Node(int hash, K key, V val, Node<K,V> next) {
565              this.hash = hash;
566              this.key = key;
567 <            this.value = value;
567 >            this.val = val;
568              this.next = next;
569          }
570  
571 <        /**
572 <         * Sets next field with volatile write semantics.  (See above
573 <         * about use of putOrderedObject.)
574 <         */
575 <        final void setNext(HashEntry<K,V> n) {
576 <            UNSAFE.putOrderedObject(this, nextOffset, n);
571 >        public final K getKey()       { return key; }
572 >        public final V getValue()     { return val; }
573 >        public final int hashCode()   { return key.hashCode() ^ val.hashCode(); }
574 >        public final String toString(){ return key + "=" + val; }
575 >        public final V setValue(V value) {
576 >            throw new UnsupportedOperationException();
577 >        }
578 >
579 >        public final boolean equals(Object o) {
580 >            Object k, v, u; Map.Entry<?,?> e;
581 >            return ((o instanceof Map.Entry) &&
582 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
583 >                    (v = e.getValue()) != null &&
584 >                    (k == key || k.equals(key)) &&
585 >                    (v == (u = val) || v.equals(u)));
586 >        }
587 >
588 >        Node<K,V> find(int h, Object k) {
589 >            Node<K,V> e = this;
590 >            if (k != null) {
591 >                do {
592 >                    K ek;
593 >                    if (e.hash == h &&
594 >                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
595 >                        return e;
596 >                } while ((e = e.next) != null);
597 >            }
598 >            return null;
599          }
600 +    }
601  
602 <        // Unsafe mechanics
603 <        static final sun.misc.Unsafe UNSAFE;
604 <        static final long nextOffset;
605 <        static {
606 <            try {
607 <                UNSAFE = sun.misc.Unsafe.getUnsafe();
608 <                Class k = HashEntry.class;
609 <                nextOffset = UNSAFE.objectFieldOffset
610 <                    (k.getDeclaredField("next"));
611 <            } catch (Exception e) {
612 <                throw new Error(e);
602 >    /* ---------------- Static utilities -------------- */
603 >
604 >    /**
605 >     * Spreads (XORs) higher bits of hash to lower and also forces top
606 >     * bit to 0. Because the table uses power-of-two masking, sets of
607 >     * hashes that vary only in bits above the current mask will
608 >     * always collide. (Among known examples are sets of Float keys
609 >     * holding consecutive whole numbers in small tables.)  So we
610 >     * apply a transform that spreads the impact of higher bits
611 >     * downward. There is a tradeoff between speed, utility, and
612 >     * quality of bit-spreading. Because many common sets of hashes
613 >     * are already reasonably distributed (so don't benefit from
614 >     * spreading), and because we use trees to handle large sets of
615 >     * collisions in bins, we just XOR some shifted bits in the
616 >     * cheapest possible way to reduce systematic lossage, as well as
617 >     * to incorporate impact of the highest bits that would otherwise
618 >     * never be used in index calculations because of table bounds.
619 >     */
620 >    static final int spread(int h) {
621 >        return (h ^ (h >>> 16)) & HASH_BITS;
622 >    }
623 >
624 >    /**
625 >     * Returns a power of two table size for the given desired capacity.
626 >     * See Hackers Delight, sec 3.2
627 >     */
628 >    private static final int tableSizeFor(int c) {
629 >        int n = c - 1;
630 >        n |= n >>> 1;
631 >        n |= n >>> 2;
632 >        n |= n >>> 4;
633 >        n |= n >>> 8;
634 >        n |= n >>> 16;
635 >        return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
636 >    }
637 >
638 >    /**
639 >     * Returns x's Class if it is of the form "class C implements
640 >     * Comparable<C>", else null.
641 >     */
642 >    static Class<?> comparableClassFor(Object x) {
643 >        if (x instanceof Comparable) {
644 >            Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
645 >            if ((c = x.getClass()) == String.class) // bypass checks
646 >                return c;
647 >            if ((ts = c.getGenericInterfaces()) != null) {
648 >                for (int i = 0; i < ts.length; ++i) {
649 >                    if (((t = ts[i]) instanceof ParameterizedType) &&
650 >                        ((p = (ParameterizedType)t).getRawType() ==
651 >                         Comparable.class) &&
652 >                        (as = p.getActualTypeArguments()) != null &&
653 >                        as.length == 1 && as[0] == c) // type arg is c
654 >                        return c;
655 >                }
656              }
657          }
658 +        return null;
659      }
660  
661      /**
662 <     * Gets the ith element of given table (if nonnull) with volatile
663 <     * read semantics. Note: This is manually integrated into a few
664 <     * performance-sensitive methods to reduce call overhead.
662 >     * Returns k.compareTo(x) if x matches kc (k's screened comparable
663 >     * class), else 0.
664 >     */
665 >    @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
666 >    static int compareComparables(Class<?> kc, Object k, Object x) {
667 >        return (x == null || x.getClass() != kc ? 0 :
668 >                ((Comparable)k).compareTo(x));
669 >    }
670 >
671 >    /* ---------------- Table element access -------------- */
672 >
673 >    /*
674 >     * Volatile access methods are used for table elements as well as
675 >     * elements of in-progress next table while resizing.  All uses of
676 >     * the tab arguments must be null checked by callers.  All callers
677 >     * also paranoically precheck that tab's length is not zero (or an
678 >     * equivalent check), thus ensuring that any index argument taking
679 >     * the form of a hash value anded with (length - 1) is a valid
680 >     * index.  Note that, to be correct wrt arbitrary concurrency
681 >     * errors by users, these checks must operate on local variables,
682 >     * which accounts for some odd-looking inline assignments below.
683 >     * Note that calls to setTabAt always occur within locked regions,
684 >     * and so do not need full volatile semantics, but still require
685 >     * ordering to maintain concurrent readability.
686       */
687 +
688      @SuppressWarnings("unchecked")
689 <    static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
690 <        return (tab == null) ? null :
691 <            (HashEntry<K,V>) UNSAFE.getObjectVolatile
692 <            (tab, ((long)i << TSHIFT) + TBASE);
689 >    static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
690 >        return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
691 >    }
692 >
693 >    static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
694 >                                        Node<K,V> c, Node<K,V> v) {
695 >        return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
696      }
697  
698 +    static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
699 +        U.putOrderedObject(tab, ((long)i << ASHIFT) + ABASE, v);
700 +    }
701 +
702 +    /* ---------------- Fields -------------- */
703 +
704 +    /**
705 +     * The array of bins. Lazily initialized upon first insertion.
706 +     * Size is always a power of two. Accessed directly by iterators.
707 +     */
708 +    transient volatile Node<K,V>[] table;
709 +
710      /**
711 <     * Sets the ith element of given table, with volatile write
225 <     * semantics. (See above about use of putOrderedObject.)
711 >     * The next table to use; non-null only while resizing.
712       */
713 <    static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
714 <                                       HashEntry<K,V> e) {
715 <        UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
713 >    private transient volatile Node<K,V>[] nextTable;
714 >
715 >    /**
716 >     * Base counter value, used mainly when there is no contention,
717 >     * but also as a fallback during table initialization
718 >     * races. Updated via CAS.
719 >     */
720 >    private transient volatile long baseCount;
721 >
722 >    /**
723 >     * Table initialization and resizing control.  When negative, the
724 >     * table is being initialized or resized: -1 for initialization,
725 >     * else -(1 + the number of active resizing threads).  Otherwise,
726 >     * when table is null, holds the initial table size to use upon
727 >     * creation, or 0 for default. After initialization, holds the
728 >     * next element count value upon which to resize the table.
729 >     */
730 >    private transient volatile int sizeCtl;
731 >
732 >    /**
733 >     * The next table index (plus one) to split while resizing.
734 >     */
735 >    private transient volatile int transferIndex;
736 >
737 >    /**
738 >     * The least available table index to split while resizing.
739 >     */
740 >    private transient volatile int transferOrigin;
741 >
742 >    /**
743 >     * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
744 >     */
745 >    private transient volatile int cellsBusy;
746 >
747 >    /**
748 >     * Table of counter cells. When non-null, size is a power of 2.
749 >     */
750 >    private transient volatile CounterCell[] counterCells;
751 >
752 >    // views
753 >    private transient KeySetView<K,V> keySet;
754 >    private transient ValuesView<K,V> values;
755 >    private transient EntrySetView<K,V> entrySet;
756 >
757 >
758 >    /* ---------------- Public operations -------------- */
759 >
760 >    /**
761 >     * Creates a new, empty map with the default initial table size (16).
762 >     */
763 >    public ConcurrentHashMap() {
764      }
765  
766      /**
767 <     * Applies a supplemental hash function to a given hashCode, which
768 <     * defends against poor quality hash functions.  This is critical
769 <     * because ConcurrentHashMap uses power-of-two length hash tables,
770 <     * that otherwise encounter collisions for hashCodes that do not
771 <     * differ in lower or upper bits.
767 >     * Creates a new, empty map with an initial table size
768 >     * accommodating the specified number of elements without the need
769 >     * to dynamically resize.
770 >     *
771 >     * @param initialCapacity The implementation performs internal
772 >     * sizing to accommodate this many elements.
773 >     * @throws IllegalArgumentException if the initial capacity of
774 >     * elements is negative
775       */
776 <    private static int hash(int h) {
777 <        // Spread bits to regularize both segment and index locations,
778 <        // using variant of single-word Wang/Jenkins hash.
779 <        h += (h <<  15) ^ 0xffffcd7d;
780 <        h ^= (h >>> 10);
781 <        h += (h <<   3);
782 <        h ^= (h >>>  6);
246 <        h += (h <<   2) + (h << 14);
247 <        return h ^ (h >>> 16);
776 >    public ConcurrentHashMap(int initialCapacity) {
777 >        if (initialCapacity < 0)
778 >            throw new IllegalArgumentException();
779 >        int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
780 >                   MAXIMUM_CAPACITY :
781 >                   tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
782 >        this.sizeCtl = cap;
783      }
784  
785      /**
786 <     * Segments are specialized versions of hash tables.  This
787 <     * subclasses from ReentrantLock opportunistically, just to
788 <     * simplify some locking and avoid separate construction.
786 >     * Creates a new map with the same mappings as the given map.
787 >     *
788 >     * @param m the map
789       */
790 <    static final class Segment<K,V> extends ReentrantLock implements Serializable {
791 <        /*
792 <         * Segments maintain a table of entry lists that are always
793 <         * kept in a consistent state, so can be read (via volatile
259 <         * reads of segments and tables) without locking.  This
260 <         * requires replicating nodes when necessary during table
261 <         * resizing, so the old lists can be traversed by readers
262 <         * still using old version of table.
263 <         *
264 <         * This class defines only mutative methods requiring locking.
265 <         * Except as noted, the methods of this class perform the
266 <         * per-segment versions of ConcurrentHashMap methods.  (Other
267 <         * methods are integrated directly into ConcurrentHashMap
268 <         * methods.) These mutative methods use a form of controlled
269 <         * spinning on contention via methods scanAndLock and
270 <         * scanAndLockForPut. These intersperse tryLocks with
271 <         * traversals to locate nodes.  The main benefit is to absorb
272 <         * cache misses (which are very common for hash tables) while
273 <         * obtaining locks so that traversal is faster once
274 <         * acquired. We do not actually use the found nodes since they
275 <         * must be re-acquired under lock anyway to ensure sequential
276 <         * consistency of updates (and in any case may be undetectably
277 <         * stale), but they will normally be much faster to re-locate.
278 <         * Also, scanAndLockForPut speculatively creates a fresh node
279 <         * to use in put if no node is found.
280 <         */
790 >    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
791 >        this.sizeCtl = DEFAULT_CAPACITY;
792 >        putAll(m);
793 >    }
794  
795 <        private static final long serialVersionUID = 2249069246763182397L;
795 >    /**
796 >     * Creates a new, empty map with an initial table size based on
797 >     * the given number of elements ({@code initialCapacity}) and
798 >     * initial table density ({@code loadFactor}).
799 >     *
800 >     * @param initialCapacity the initial capacity. The implementation
801 >     * performs internal sizing to accommodate this many elements,
802 >     * given the specified load factor.
803 >     * @param loadFactor the load factor (table density) for
804 >     * establishing the initial table size
805 >     * @throws IllegalArgumentException if the initial capacity of
806 >     * elements is negative or the load factor is nonpositive
807 >     *
808 >     * @since 1.6
809 >     */
810 >    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
811 >        this(initialCapacity, loadFactor, 1);
812 >    }
813  
814 <        /**
815 <         * The maximum number of times to tryLock in a prescan before
816 <         * possibly blocking on acquire in preparation for a locked
817 <         * segment operation. On multiprocessors, using a bounded
818 <         * number of retries maintains cache acquired while locating
819 <         * nodes.
820 <         */
821 <        static final int MAX_SCAN_RETRIES =
822 <            Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
814 >    /**
815 >     * Creates a new, empty map with an initial table size based on
816 >     * the given number of elements ({@code initialCapacity}), table
817 >     * density ({@code loadFactor}), and number of concurrently
818 >     * updating threads ({@code concurrencyLevel}).
819 >     *
820 >     * @param initialCapacity the initial capacity. The implementation
821 >     * performs internal sizing to accommodate this many elements,
822 >     * given the specified load factor.
823 >     * @param loadFactor the load factor (table density) for
824 >     * establishing the initial table size
825 >     * @param concurrencyLevel the estimated number of concurrently
826 >     * updating threads. The implementation may use this value as
827 >     * a sizing hint.
828 >     * @throws IllegalArgumentException if the initial capacity is
829 >     * negative or the load factor or concurrencyLevel are
830 >     * nonpositive
831 >     */
832 >    public ConcurrentHashMap(int initialCapacity,
833 >                             float loadFactor, int concurrencyLevel) {
834 >        if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
835 >            throw new IllegalArgumentException();
836 >        if (initialCapacity < concurrencyLevel)   // Use at least as many bins
837 >            initialCapacity = concurrencyLevel;   // as estimated threads
838 >        long size = (long)(1.0 + (long)initialCapacity / loadFactor);
839 >        int cap = (size >= (long)MAXIMUM_CAPACITY) ?
840 >            MAXIMUM_CAPACITY : tableSizeFor((int)size);
841 >        this.sizeCtl = cap;
842 >    }
843  
844 <        /**
295 <         * The per-segment table. Elements are accessed via
296 <         * entryAt/setEntryAt providing volatile semantics.
297 <         */
298 <        transient volatile HashEntry<K,V>[] table;
844 >    // Original (since JDK1.2) Map methods
845  
846 <        /**
847 <         * The number of elements. Accessed only either within locks
848 <         * or among other volatile reads that maintain visibility.
849 <         */
850 <        transient int count;
846 >    /**
847 >     * {@inheritDoc}
848 >     */
849 >    public int size() {
850 >        long n = sumCount();
851 >        return ((n < 0L) ? 0 :
852 >                (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
853 >                (int)n);
854 >    }
855  
856 <        /**
857 <         * The total number of mutative operations in this segment.
858 <         * Even though this may overflows 32 bits, it provides
859 <         * sufficient accuracy for stability checks in CHM isEmpty()
860 <         * and size() methods.  Accessed only either within locks or
861 <         * among other volatile reads that maintain visibility.
312 <         */
313 <        transient int modCount;
856 >    /**
857 >     * {@inheritDoc}
858 >     */
859 >    public boolean isEmpty() {
860 >        return sumCount() <= 0L; // ignore transient negative values
861 >    }
862  
863 <        /**
864 <         * The table is rehashed when its size exceeds this threshold.
865 <         * (The value of this field is always <tt>(int)(capacity *
866 <         * loadFactor)</tt>.)
867 <         */
868 <        transient int threshold;
863 >    /**
864 >     * Returns the value to which the specified key is mapped,
865 >     * or {@code null} if this map contains no mapping for the key.
866 >     *
867 >     * <p>More formally, if this map contains a mapping from a key
868 >     * {@code k} to a value {@code v} such that {@code key.equals(k)},
869 >     * then this method returns {@code v}; otherwise it returns
870 >     * {@code null}.  (There can be at most one such mapping.)
871 >     *
872 >     * @throws NullPointerException if the specified key is null
873 >     */
874 >    public V get(Object key) {
875 >        Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
876 >        int h = spread(key.hashCode());
877 >        if ((tab = table) != null && (n = tab.length) > 0 &&
878 >            (e = tabAt(tab, (n - 1) & h)) != null) {
879 >            if ((eh = e.hash) == h) {
880 >                if ((ek = e.key) == key || (ek != null && key.equals(ek)))
881 >                    return e.val;
882 >            }
883 >            else if (eh < 0)
884 >                return (p = e.find(h, key)) != null ? p.val : null;
885 >            while ((e = e.next) != null) {
886 >                if (e.hash == h &&
887 >                    ((ek = e.key) == key || (ek != null && key.equals(ek))))
888 >                    return e.val;
889 >            }
890 >        }
891 >        return null;
892 >    }
893  
894 <        /**
895 <         * The load factor for the hash table.  Even though this value
896 <         * is same for all segments, it is replicated to avoid needing
897 <         * links to outer object.
898 <         * @serial
899 <         */
900 <        final float loadFactor;
894 >    /**
895 >     * Tests if the specified object is a key in this table.
896 >     *
897 >     * @param  key possible key
898 >     * @return {@code true} if and only if the specified object
899 >     *         is a key in this table, as determined by the
900 >     *         {@code equals} method; {@code false} otherwise
901 >     * @throws NullPointerException if the specified key is null
902 >     */
903 >    public boolean containsKey(Object key) {
904 >        return get(key) != null;
905 >    }
906  
907 <        Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
908 <            this.loadFactor = lf;
909 <            this.threshold = threshold;
910 <            this.table = tab;
907 >    /**
908 >     * Returns {@code true} if this map maps one or more keys to the
909 >     * specified value. Note: This method may require a full traversal
910 >     * of the map, and is much slower than method {@code containsKey}.
911 >     *
912 >     * @param value value whose presence in this map is to be tested
913 >     * @return {@code true} if this map maps one or more keys to the
914 >     *         specified value
915 >     * @throws NullPointerException if the specified value is null
916 >     */
917 >    public boolean containsValue(Object value) {
918 >        if (value == null)
919 >            throw new NullPointerException();
920 >        Node<K,V>[] t;
921 >        if ((t = table) != null) {
922 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
923 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
924 >                V v;
925 >                if ((v = p.val) == value || (v != null && value.equals(v)))
926 >                    return true;
927 >            }
928          }
929 +        return false;
930 +    }
931  
932 <        final V put(K key, int hash, V value, boolean onlyIfAbsent) {
933 <            HashEntry<K,V> node = tryLock() ? null :
934 <                scanAndLockForPut(key, hash, value);
935 <            V oldValue;
936 <            try {
937 <                HashEntry<K,V>[] tab = table;
938 <                int index = (tab.length - 1) & hash;
939 <                HashEntry<K,V> first = entryAt(tab, index);
940 <                for (HashEntry<K,V> e = first;;) {
941 <                    if (e != null) {
942 <                        K k;
943 <                        if ((k = e.key) == key ||
944 <                            (e.hash == hash && key.equals(k))) {
945 <                            oldValue = e.value;
946 <                            if (!onlyIfAbsent) {
947 <                                e.value = value;
948 <                                ++modCount;
932 >    /**
933 >     * Maps the specified key to the specified value in this table.
934 >     * Neither the key nor the value can be null.
935 >     *
936 >     * <p>The value can be retrieved by calling the {@code get} method
937 >     * with a key that is equal to the original key.
938 >     *
939 >     * @param key key with which the specified value is to be associated
940 >     * @param value value to be associated with the specified key
941 >     * @return the previous value associated with {@code key}, or
942 >     *         {@code null} if there was no mapping for {@code key}
943 >     * @throws NullPointerException if the specified key or value is null
944 >     */
945 >    public V put(K key, V value) {
946 >        return putVal(key, value, false);
947 >    }
948 >
949 >    /** Implementation for put and putIfAbsent */
950 >    final V putVal(K key, V value, boolean onlyIfAbsent) {
951 >        if (key == null || value == null) throw new NullPointerException();
952 >        int hash = spread(key.hashCode());
953 >        int binCount = 0;
954 >        for (Node<K,V>[] tab = table;;) {
955 >            Node<K,V> f; int n, i, fh;
956 >            if (tab == null || (n = tab.length) == 0)
957 >                tab = initTable();
958 >            else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
959 >                if (casTabAt(tab, i, null,
960 >                             new Node<K,V>(hash, key, value, null)))
961 >                    break;                   // no lock when adding to empty bin
962 >            }
963 >            else if ((fh = f.hash) == MOVED)
964 >                tab = helpTransfer(tab, f);
965 >            else {
966 >                V oldVal = null;
967 >                synchronized (f) {
968 >                    if (tabAt(tab, i) == f) {
969 >                        if (fh >= 0) {
970 >                            binCount = 1;
971 >                            for (Node<K,V> e = f;; ++binCount) {
972 >                                K ek;
973 >                                if (e.hash == hash &&
974 >                                    ((ek = e.key) == key ||
975 >                                     (ek != null && key.equals(ek)))) {
976 >                                    oldVal = e.val;
977 >                                    if (!onlyIfAbsent)
978 >                                        e.val = value;
979 >                                    break;
980 >                                }
981 >                                Node<K,V> pred = e;
982 >                                if ((e = e.next) == null) {
983 >                                    pred.next = new Node<K,V>(hash, key,
984 >                                                              value, null);
985 >                                    break;
986 >                                }
987 >                            }
988 >                        }
989 >                        else if (f instanceof TreeBin) {
990 >                            Node<K,V> p;
991 >                            binCount = 2;
992 >                            if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
993 >                                                           value)) != null) {
994 >                                oldVal = p.val;
995 >                                if (!onlyIfAbsent)
996 >                                    p.val = value;
997                              }
354                            break;
998                          }
356                        e = e.next;
357                    }
358                    else {
359                        if (node != null)
360                            node.setNext(first);
361                        else
362                            node = new HashEntry<K,V>(hash, key, value, first);
363                        int c = count + 1;
364                        if (c > threshold && tab.length < MAXIMUM_CAPACITY)
365                            rehash(node);
366                        else
367                            setEntryAt(tab, index, node);
368                        ++modCount;
369                        count = c;
370                        oldValue = null;
371                        break;
372                    }
373                }
374            } finally {
375                unlock();
376            }
377            return oldValue;
378        }
379
380        /**
381         * Doubles size of table and repacks entries, also adding the
382         * given node to new table
383         */
384        @SuppressWarnings("unchecked")
385        private void rehash(HashEntry<K,V> node) {
386            /*
387             * Reclassify nodes in each list to new table.  Because we
388             * are using power-of-two expansion, the elements from
389             * each bin must either stay at same index, or move with a
390             * power of two offset. We eliminate unnecessary node
391             * creation by catching cases where old nodes can be
392             * reused because their next fields won't change.
393             * Statistically, at the default threshold, only about
394             * one-sixth of them need cloning when a table
395             * doubles. The nodes they replace will be garbage
396             * collectable as soon as they are no longer referenced by
397             * any reader thread that may be in the midst of
398             * concurrently traversing table. Entry accesses use plain
399             * array indexing because they are followed by volatile
400             * table write.
401             */
402            HashEntry<K,V>[] oldTable = table;
403            int oldCapacity = oldTable.length;
404            int newCapacity = oldCapacity << 1;
405            threshold = (int)(newCapacity * loadFactor);
406            HashEntry<K,V>[] newTable =
407                (HashEntry<K,V>[]) new HashEntry[newCapacity];
408            int sizeMask = newCapacity - 1;
409            for (int i = 0; i < oldCapacity ; i++) {
410                HashEntry<K,V> e = oldTable[i];
411                if (e != null) {
412                    HashEntry<K,V> next = e.next;
413                    int idx = e.hash & sizeMask;
414                    if (next == null)   //  Single node on list
415                        newTable[idx] = e;
416                    else { // Reuse consecutive sequence at same slot
417                        HashEntry<K,V> lastRun = e;
418                        int lastIdx = idx;
419                        for (HashEntry<K,V> last = next;
420                             last != null;
421                             last = last.next) {
422                            int k = last.hash & sizeMask;
423                            if (k != lastIdx) {
424                                lastIdx = k;
425                                lastRun = last;
426                            }
427                        }
428                        newTable[lastIdx] = lastRun;
429                        // Clone remaining nodes
430                        for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
431                            V v = p.value;
432                            int h = p.hash;
433                            int k = h & sizeMask;
434                            HashEntry<K,V> n = newTable[k];
435                            newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
436                        }
437                    }
438                }
439            }
440            int nodeIndex = node.hash & sizeMask; // add the new node
441            node.setNext(newTable[nodeIndex]);
442            newTable[nodeIndex] = node;
443            table = newTable;
444        }
445
446        /**
447         * Scans for a node containing given key while trying to
448         * acquire lock, creating and returning one if not found. Upon
449         * return, guarantees that lock is held. Unlike in most
450         * methods, calls to method equals are not screened: Since
451         * traversal speed doesn't matter, we might as well help warm
452         * up the associated code and accesses as well.
453         *
454         * @return a new node if key not found, else null
455         */
456        private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
457            HashEntry<K,V> first = entryForHash(this, hash);
458            HashEntry<K,V> e = first;
459            HashEntry<K,V> node = null;
460            int retries = -1; // negative while locating node
461            while (!tryLock()) {
462                HashEntry<K,V> f; // to recheck first below
463                if (retries < 0) {
464                    if (e == null) {
465                        if (node == null) // speculatively create node
466                            node = new HashEntry<K,V>(hash, key, value, null);
467                        retries = 0;
999                      }
469                    else if (key.equals(e.key))
470                        retries = 0;
471                    else
472                        e = e.next;
1000                  }
1001 <                else if (++retries > MAX_SCAN_RETRIES) {
1002 <                    lock();
1001 >                if (binCount != 0) {
1002 >                    if (binCount >= TREEIFY_THRESHOLD)
1003 >                        treeifyBin(tab, i);
1004 >                    if (oldVal != null)
1005 >                        return oldVal;
1006                      break;
1007                  }
478                else if ((retries & 1) == 0 &&
479                         (f = entryForHash(this, hash)) != first) {
480                    e = first = f; // re-traverse if entry changed
481                    retries = -1;
482                }
483            }
484            return node;
485        }
486
487        /**
488         * Scans for a node containing the given key while trying to
489         * acquire lock for a remove or replace operation. Upon
490         * return, guarantees that lock is held.  Note that we must
491         * lock even if the key is not found, to ensure sequential
492         * consistency of updates.
493         */
494        private void scanAndLock(Object key, int hash) {
495            // similar to but simpler than scanAndLockForPut
496            HashEntry<K,V> first = entryForHash(this, hash);
497            HashEntry<K,V> e = first;
498            int retries = -1;
499            while (!tryLock()) {
500                HashEntry<K,V> f;
501                if (retries < 0) {
502                    if (e == null || key.equals(e.key))
503                        retries = 0;
504                    else
505                        e = e.next;
506                }
507                else if (++retries > MAX_SCAN_RETRIES) {
508                    lock();
509                    break;
510                }
511                else if ((retries & 1) == 0 &&
512                         (f = entryForHash(this, hash)) != first) {
513                    e = first = f;
514                    retries = -1;
515                }
1008              }
1009          }
1010 +        addCount(1L, binCount);
1011 +        return null;
1012 +    }
1013  
1014 <        /**
1015 <         * Remove; match on key only if value null, else match both.
1016 <         */
1017 <        final V remove(Object key, int hash, Object value) {
1018 <            if (!tryLock())
1019 <                scanAndLock(key, hash);
1020 <            V oldValue = null;
1021 <            try {
1022 <                HashEntry<K,V>[] tab = table;
1023 <                int index = (tab.length - 1) & hash;
1024 <                HashEntry<K,V> e = entryAt(tab, index);
1025 <                HashEntry<K,V> pred = null;
1026 <                while (e != null) {
1027 <                    K k;
1028 <                    HashEntry<K,V> next = e.next;
1029 <                    if ((k = e.key) == key ||
1030 <                        (e.hash == hash && key.equals(k))) {
1031 <                        V v = e.value;
1032 <                        if (value == null || value == v || value.equals(v)) {
1033 <                            if (pred == null)
1034 <                                setEntryAt(tab, index, next);
1035 <                            else
1036 <                                pred.setNext(next);
1037 <                            ++modCount;
1038 <                            --count;
1039 <                            oldValue = v;
1014 >    /**
1015 >     * Copies all of the mappings from the specified map to this one.
1016 >     * These mappings replace any mappings that this map had for any of the
1017 >     * keys currently in the specified map.
1018 >     *
1019 >     * @param m mappings to be stored in this map
1020 >     */
1021 >    public void putAll(Map<? extends K, ? extends V> m) {
1022 >        tryPresize(m.size());
1023 >        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1024 >            putVal(e.getKey(), e.getValue(), false);
1025 >    }
1026 >
1027 >    /**
1028 >     * Removes the key (and its corresponding value) from this map.
1029 >     * This method does nothing if the key is not in the map.
1030 >     *
1031 >     * @param  key the key that needs to be removed
1032 >     * @return the previous value associated with {@code key}, or
1033 >     *         {@code null} if there was no mapping for {@code key}
1034 >     * @throws NullPointerException if the specified key is null
1035 >     */
1036 >    public V remove(Object key) {
1037 >        return replaceNode(key, null, null);
1038 >    }
1039 >
1040 >    /**
1041 >     * Implementation for the four public remove/replace methods:
1042 >     * Replaces node value with v, conditional upon match of cv if
1043 >     * non-null.  If resulting value is null, delete.
1044 >     */
1045 >    final V replaceNode(Object key, V value, Object cv) {
1046 >        int hash = spread(key.hashCode());
1047 >        for (Node<K,V>[] tab = table;;) {
1048 >            Node<K,V> f; int n, i, fh;
1049 >            if (tab == null || (n = tab.length) == 0 ||
1050 >                (f = tabAt(tab, i = (n - 1) & hash)) == null)
1051 >                break;
1052 >            else if ((fh = f.hash) == MOVED)
1053 >                tab = helpTransfer(tab, f);
1054 >            else {
1055 >                V oldVal = null;
1056 >                boolean validated = false;
1057 >                synchronized (f) {
1058 >                    if (tabAt(tab, i) == f) {
1059 >                        if (fh >= 0) {
1060 >                            validated = true;
1061 >                            for (Node<K,V> e = f, pred = null;;) {
1062 >                                K ek;
1063 >                                if (e.hash == hash &&
1064 >                                    ((ek = e.key) == key ||
1065 >                                     (ek != null && key.equals(ek)))) {
1066 >                                    V ev = e.val;
1067 >                                    if (cv == null || cv == ev ||
1068 >                                        (ev != null && cv.equals(ev))) {
1069 >                                        oldVal = ev;
1070 >                                        if (value != null)
1071 >                                            e.val = value;
1072 >                                        else if (pred != null)
1073 >                                            pred.next = e.next;
1074 >                                        else
1075 >                                            setTabAt(tab, i, e.next);
1076 >                                    }
1077 >                                    break;
1078 >                                }
1079 >                                pred = e;
1080 >                                if ((e = e.next) == null)
1081 >                                    break;
1082 >                            }
1083 >                        }
1084 >                        else if (f instanceof TreeBin) {
1085 >                            validated = true;
1086 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1087 >                            TreeNode<K,V> r, p;
1088 >                            if ((r = t.root) != null &&
1089 >                                (p = r.findTreeNode(hash, key, null)) != null) {
1090 >                                V pv = p.val;
1091 >                                if (cv == null || cv == pv ||
1092 >                                    (pv != null && cv.equals(pv))) {
1093 >                                    oldVal = pv;
1094 >                                    if (value != null)
1095 >                                        p.val = value;
1096 >                                    else if (t.removeTreeNode(p))
1097 >                                        setTabAt(tab, i, untreeify(t.first));
1098 >                                }
1099 >                            }
1100                          }
546                        break;
1101                      }
548                    pred = e;
549                    e = next;
1102                  }
1103 <            } finally {
1104 <                unlock();
1105 <            }
1106 <            return oldValue;
1107 <        }
556 <
557 <        final boolean replace(K key, int hash, V oldValue, V newValue) {
558 <            if (!tryLock())
559 <                scanAndLock(key, hash);
560 <            boolean replaced = false;
561 <            try {
562 <                HashEntry<K,V> e;
563 <                for (e = entryForHash(this, hash); e != null; e = e.next) {
564 <                    K k;
565 <                    if ((k = e.key) == key ||
566 <                        (e.hash == hash && key.equals(k))) {
567 <                        if (oldValue.equals(e.value)) {
568 <                            e.value = newValue;
569 <                            ++modCount;
570 <                            replaced = true;
571 <                        }
572 <                        break;
1103 >                if (validated) {
1104 >                    if (oldVal != null) {
1105 >                        if (value == null)
1106 >                            addCount(-1L, -1);
1107 >                        return oldVal;
1108                      }
1109 +                    break;
1110                  }
575            } finally {
576                unlock();
1111              }
578            return replaced;
1112          }
1113 +        return null;
1114 +    }
1115  
1116 <        final V replace(K key, int hash, V value) {
1117 <            if (!tryLock())
1118 <                scanAndLock(key, hash);
1119 <            V oldValue = null;
1120 <            try {
1121 <                HashEntry<K,V> e;
1122 <                for (e = entryForHash(this, hash); e != null; e = e.next) {
1123 <                    K k;
1124 <                    if ((k = e.key) == key ||
1125 <                        (e.hash == hash && key.equals(k))) {
1126 <                        oldValue = e.value;
1127 <                        e.value = value;
1128 <                        ++modCount;
1129 <                        break;
1116 >    /**
1117 >     * Removes all of the mappings from this map.
1118 >     */
1119 >    public void clear() {
1120 >        long delta = 0L; // negative number of deletions
1121 >        int i = 0;
1122 >        Node<K,V>[] tab = table;
1123 >        while (tab != null && i < tab.length) {
1124 >            int fh;
1125 >            Node<K,V> f = tabAt(tab, i);
1126 >            if (f == null)
1127 >                ++i;
1128 >            else if ((fh = f.hash) == MOVED) {
1129 >                tab = helpTransfer(tab, f);
1130 >                i = 0; // restart
1131 >            }
1132 >            else {
1133 >                synchronized (f) {
1134 >                    if (tabAt(tab, i) == f) {
1135 >                        Node<K,V> p = (fh >= 0 ? f :
1136 >                                       (f instanceof TreeBin) ?
1137 >                                       ((TreeBin<K,V>)f).first : null);
1138 >                        while (p != null) {
1139 >                            --delta;
1140 >                            p = p.next;
1141 >                        }
1142 >                        setTabAt(tab, i++, null);
1143                      }
1144                  }
597            } finally {
598                unlock();
1145              }
600            return oldValue;
1146          }
1147 +        if (delta != 0L)
1148 +            addCount(delta, -1);
1149 +    }
1150  
1151 <        final void clear() {
1152 <            lock();
1153 <            try {
1154 <                HashEntry<K,V>[] tab = table;
1155 <                for (int i = 0; i < tab.length ; i++)
1156 <                    setEntryAt(tab, i, null);
1157 <                ++modCount;
1158 <                count = 0;
1159 <            } finally {
1160 <                unlock();
1161 <            }
1162 <        }
1151 >    /**
1152 >     * Returns a {@link Set} view of the keys contained in this map.
1153 >     * The set is backed by the map, so changes to the map are
1154 >     * reflected in the set, and vice-versa. The set supports element
1155 >     * removal, which removes the corresponding mapping from this map,
1156 >     * via the {@code Iterator.remove}, {@code Set.remove},
1157 >     * {@code removeAll}, {@code retainAll}, and {@code clear}
1158 >     * operations.  It does not support the {@code add} or
1159 >     * {@code addAll} operations.
1160 >     *
1161 >     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1162 >     * that will never throw {@link ConcurrentModificationException},
1163 >     * and guarantees to traverse elements as they existed upon
1164 >     * construction of the iterator, and may (but is not guaranteed to)
1165 >     * reflect any modifications subsequent to construction.
1166 >     *
1167 >     * @return the set view
1168 >     */
1169 >    public KeySetView<K,V> keySet() {
1170 >        KeySetView<K,V> ks;
1171 >        return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1172 >    }
1173 >
1174 >    /**
1175 >     * Returns a {@link Collection} view of the values contained in this map.
1176 >     * The collection is backed by the map, so changes to the map are
1177 >     * reflected in the collection, and vice-versa.  The collection
1178 >     * supports element removal, which removes the corresponding
1179 >     * mapping from this map, via the {@code Iterator.remove},
1180 >     * {@code Collection.remove}, {@code removeAll},
1181 >     * {@code retainAll}, and {@code clear} operations.  It does not
1182 >     * support the {@code add} or {@code addAll} operations.
1183 >     *
1184 >     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1185 >     * that will never throw {@link ConcurrentModificationException},
1186 >     * and guarantees to traverse elements as they existed upon
1187 >     * construction of the iterator, and may (but is not guaranteed to)
1188 >     * reflect any modifications subsequent to construction.
1189 >     *
1190 >     * @return the collection view
1191 >     */
1192 >    public Collection<V> values() {
1193 >        ValuesView<K,V> vs;
1194 >        return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1195      }
1196  
1197 <    // Accessing segments
1197 >    /**
1198 >     * Returns a {@link Set} view of the mappings contained in this map.
1199 >     * The set is backed by the map, so changes to the map are
1200 >     * reflected in the set, and vice-versa.  The set supports element
1201 >     * removal, which removes the corresponding mapping from the map,
1202 >     * via the {@code Iterator.remove}, {@code Set.remove},
1203 >     * {@code removeAll}, {@code retainAll}, and {@code clear}
1204 >     * operations.
1205 >     *
1206 >     * <p>The view's {@code iterator} is a "weakly consistent" iterator
1207 >     * that will never throw {@link ConcurrentModificationException},
1208 >     * and guarantees to traverse elements as they existed upon
1209 >     * construction of the iterator, and may (but is not guaranteed to)
1210 >     * reflect any modifications subsequent to construction.
1211 >     *
1212 >     * @return the set view
1213 >     */
1214 >    public Set<Map.Entry<K,V>> entrySet() {
1215 >        EntrySetView<K,V> es;
1216 >        return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1217 >    }
1218  
1219      /**
1220 <     * Gets the jth element of given segment array (if nonnull) with
1221 <     * volatile element access semantics via Unsafe. (The null check
1222 <     * can trigger harmlessly only during deserialization.) Note:
1223 <     * because each element of segments array is set only once (using
1224 <     * fully ordered writes), some performance-sensitive methods rely
625 <     * on this method only as a recheck upon null reads.
1220 >     * Returns the hash code value for this {@link Map}, i.e.,
1221 >     * the sum of, for each key-value pair in the map,
1222 >     * {@code key.hashCode() ^ value.hashCode()}.
1223 >     *
1224 >     * @return the hash code value for this map
1225       */
1226 <    @SuppressWarnings("unchecked")
1227 <    static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
1228 <        long u = (j << SSHIFT) + SBASE;
1229 <        return ss == null ? null :
1230 <            (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
1226 >    public int hashCode() {
1227 >        int h = 0;
1228 >        Node<K,V>[] t;
1229 >        if ((t = table) != null) {
1230 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1231 >            for (Node<K,V> p; (p = it.advance()) != null; )
1232 >                h += p.key.hashCode() ^ p.val.hashCode();
1233 >        }
1234 >        return h;
1235      }
1236  
1237      /**
1238 <     * Returns the segment for the given index, creating it and
1239 <     * recording in segment table (via CAS) if not already present.
1238 >     * Returns a string representation of this map.  The string
1239 >     * representation consists of a list of key-value mappings (in no
1240 >     * particular order) enclosed in braces ("{@code {}}").  Adjacent
1241 >     * mappings are separated by the characters {@code ", "} (comma
1242 >     * and space).  Each key-value mapping is rendered as the key
1243 >     * followed by an equals sign ("{@code =}") followed by the
1244 >     * associated value.
1245       *
1246 <     * @param k the index
639 <     * @return the segment
1246 >     * @return a string representation of this map
1247       */
1248 <    @SuppressWarnings("unchecked")
1249 <    private Segment<K,V> ensureSegment(int k) {
1250 <        final Segment<K,V>[] ss = this.segments;
1251 <        long u = (k << SSHIFT) + SBASE; // raw offset
1252 <        Segment<K,V> seg;
1253 <        if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
1254 <            Segment<K,V> proto = ss[0]; // use segment 0 as prototype
1255 <            int cap = proto.table.length;
1256 <            float lf = proto.loadFactor;
1257 <            int threshold = (int)(cap * lf);
1258 <            HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
1259 <            if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
1260 <                == null) { // recheck
1261 <                Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
1262 <                while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
1263 <                       == null) {
1264 <                    if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
658 <                        break;
659 <                }
1248 >    public String toString() {
1249 >        Node<K,V>[] t;
1250 >        int f = (t = table) == null ? 0 : t.length;
1251 >        Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1252 >        StringBuilder sb = new StringBuilder();
1253 >        sb.append('{');
1254 >        Node<K,V> p;
1255 >        if ((p = it.advance()) != null) {
1256 >            for (;;) {
1257 >                K k = p.key;
1258 >                V v = p.val;
1259 >                sb.append(k == this ? "(this Map)" : k);
1260 >                sb.append('=');
1261 >                sb.append(v == this ? "(this Map)" : v);
1262 >                if ((p = it.advance()) == null)
1263 >                    break;
1264 >                sb.append(',').append(' ');
1265              }
1266          }
1267 <        return seg;
1267 >        return sb.append('}').toString();
1268      }
1269  
665    // Hash-based segment and entry accesses
666
1270      /**
1271 <     * Gets the segment for the given hash code.
1271 >     * Compares the specified object with this map for equality.
1272 >     * Returns {@code true} if the given object is a map with the same
1273 >     * mappings as this map.  This operation may return misleading
1274 >     * results if either map is concurrently modified during execution
1275 >     * of this method.
1276 >     *
1277 >     * @param o object to be compared for equality with this map
1278 >     * @return {@code true} if the specified object is equal to this map
1279       */
1280 <    @SuppressWarnings("unchecked")
1281 <    private Segment<K,V> segmentForHash(int h) {
1282 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1283 <        return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u);
1280 >    public boolean equals(Object o) {
1281 >        if (o != this) {
1282 >            if (!(o instanceof Map))
1283 >                return false;
1284 >            Map<?,?> m = (Map<?,?>) o;
1285 >            Node<K,V>[] t;
1286 >            int f = (t = table) == null ? 0 : t.length;
1287 >            Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1288 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1289 >                V val = p.val;
1290 >                Object v = m.get(p.key);
1291 >                if (v == null || (v != val && !v.equals(val)))
1292 >                    return false;
1293 >            }
1294 >            for (Map.Entry<?,?> e : m.entrySet()) {
1295 >                Object mk, mv, v;
1296 >                if ((mk = e.getKey()) == null ||
1297 >                    (mv = e.getValue()) == null ||
1298 >                    (v = get(mk)) == null ||
1299 >                    (mv != v && !mv.equals(v)))
1300 >                    return false;
1301 >            }
1302 >        }
1303 >        return true;
1304      }
1305  
1306      /**
1307 <     * Gets the table entry for the given segment and hash code.
1307 >     * Stripped-down version of helper class used in previous version,
1308 >     * declared for the sake of serialization compatibility
1309       */
1310 <    @SuppressWarnings("unchecked")
1311 <    static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
1312 <        HashEntry<K,V>[] tab;
1313 <        return (seg == null || (tab = seg.table) == null) ? null :
683 <            (HashEntry<K,V>) UNSAFE.getObjectVolatile
684 <            (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1310 >    static class Segment<K,V> extends ReentrantLock implements Serializable {
1311 >        private static final long serialVersionUID = 2249069246763182397L;
1312 >        final float loadFactor;
1313 >        Segment(float lf) { this.loadFactor = lf; }
1314      }
1315  
687    /* ---------------- Public operations -------------- */
688
1316      /**
1317 <     * Creates a new, empty map with the specified initial
1318 <     * capacity, load factor and concurrency level.
1319 <     *
1320 <     * @param initialCapacity the initial capacity. The implementation
1321 <     * performs internal sizing to accommodate this many elements.
1322 <     * @param loadFactor  the load factor threshold, used to control resizing.
1323 <     * Resizing may be performed when the average number of elements per
697 <     * bin exceeds this threshold.
698 <     * @param concurrencyLevel the estimated number of concurrently
699 <     * updating threads. The implementation performs internal sizing
700 <     * to try to accommodate this many threads.
701 <     * @throws IllegalArgumentException if the initial capacity is
702 <     * negative or the load factor or concurrencyLevel are
703 <     * nonpositive.
1317 >     * Saves the state of the {@code ConcurrentHashMap} instance to a
1318 >     * stream (i.e., serializes it).
1319 >     * @param s the stream
1320 >     * @serialData
1321 >     * the key (Object) and value (Object)
1322 >     * for each key-value mapping, followed by a null pair.
1323 >     * The key-value mappings are emitted in no particular order.
1324       */
1325 <    @SuppressWarnings("unchecked")
1326 <    public ConcurrentHashMap(int initialCapacity,
1327 <                             float loadFactor, int concurrencyLevel) {
1328 <        if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
709 <            throw new IllegalArgumentException();
710 <        if (concurrencyLevel > MAX_SEGMENTS)
711 <            concurrencyLevel = MAX_SEGMENTS;
712 <        // Find power-of-two sizes best matching arguments
1325 >    private void writeObject(java.io.ObjectOutputStream s)
1326 >        throws java.io.IOException {
1327 >        // For serialization compatibility
1328 >        // Emulate segment calculation from previous version of this class
1329          int sshift = 0;
1330          int ssize = 1;
1331 <        while (ssize < concurrencyLevel) {
1331 >        while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1332              ++sshift;
1333              ssize <<= 1;
1334          }
1335 <        this.segmentShift = 32 - sshift;
1336 <        this.segmentMask = ssize - 1;
1337 <        if (initialCapacity > MAXIMUM_CAPACITY)
1338 <            initialCapacity = MAXIMUM_CAPACITY;
1339 <        int c = initialCapacity / ssize;
1340 <        if (c * ssize < initialCapacity)
1341 <            ++c;
1342 <        int cap = MIN_SEGMENT_TABLE_CAPACITY;
1343 <        while (cap < c)
1344 <            cap <<= 1;
1345 <        // create segments and segments[0]
1346 <        Segment<K,V> s0 =
1347 <            new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
1348 <                             (HashEntry<K,V>[])new HashEntry[cap]);
1349 <        Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
1350 <        UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
1351 <        this.segments = ss;
1335 >        int segmentShift = 32 - sshift;
1336 >        int segmentMask = ssize - 1;
1337 >        @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[])
1338 >            new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1339 >        for (int i = 0; i < segments.length; ++i)
1340 >            segments[i] = new Segment<K,V>(LOAD_FACTOR);
1341 >        s.putFields().put("segments", segments);
1342 >        s.putFields().put("segmentShift", segmentShift);
1343 >        s.putFields().put("segmentMask", segmentMask);
1344 >        s.writeFields();
1345 >
1346 >        Node<K,V>[] t;
1347 >        if ((t = table) != null) {
1348 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1349 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1350 >                s.writeObject(p.key);
1351 >                s.writeObject(p.val);
1352 >            }
1353 >        }
1354 >        s.writeObject(null);
1355 >        s.writeObject(null);
1356 >        segments = null; // throw away
1357      }
1358  
1359      /**
1360 <     * Creates a new, empty map with the specified initial capacity
1361 <     * and load factor and with the default concurrencyLevel (16).
1362 <     *
1363 <     * @param initialCapacity The implementation performs internal
1364 <     * sizing to accommodate this many elements.
1365 <     * @param loadFactor  the load factor threshold, used to control resizing.
1366 <     * Resizing may be performed when the average number of elements per
1367 <     * bin exceeds this threshold.
1368 <     * @throws IllegalArgumentException if the initial capacity of
1369 <     * elements is negative or the load factor is nonpositive
1360 >     * Reconstitutes the instance from a stream (that is, deserializes it).
1361 >     * @param s the stream
1362 >     */
1363 >    private void readObject(java.io.ObjectInputStream s)
1364 >        throws java.io.IOException, ClassNotFoundException {
1365 >        /*
1366 >         * To improve performance in typical cases, we create nodes
1367 >         * while reading, then place in table once size is known.
1368 >         * However, we must also validate uniqueness and deal with
1369 >         * overpopulated bins while doing so, which requires
1370 >         * specialized versions of putVal mechanics.
1371 >         */
1372 >        sizeCtl = -1; // force exclusion for table construction
1373 >        s.defaultReadObject();
1374 >        long size = 0L;
1375 >        Node<K,V> p = null;
1376 >        for (;;) {
1377 >            @SuppressWarnings("unchecked") K k = (K) s.readObject();
1378 >            @SuppressWarnings("unchecked") V v = (V) s.readObject();
1379 >            if (k != null && v != null) {
1380 >                p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1381 >                ++size;
1382 >            }
1383 >            else
1384 >                break;
1385 >        }
1386 >        if (size == 0L)
1387 >            sizeCtl = 0;
1388 >        else {
1389 >            int n;
1390 >            if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1391 >                n = MAXIMUM_CAPACITY;
1392 >            else {
1393 >                int sz = (int)size;
1394 >                n = tableSizeFor(sz + (sz >>> 1) + 1);
1395 >            }
1396 >            @SuppressWarnings({"rawtypes","unchecked"})
1397 >                Node<K,V>[] tab = (Node<K,V>[])new Node[n];
1398 >            int mask = n - 1;
1399 >            long added = 0L;
1400 >            while (p != null) {
1401 >                boolean insertAtFront;
1402 >                Node<K,V> next = p.next, first;
1403 >                int h = p.hash, j = h & mask;
1404 >                if ((first = tabAt(tab, j)) == null)
1405 >                    insertAtFront = true;
1406 >                else {
1407 >                    K k = p.key;
1408 >                    if (first.hash < 0) {
1409 >                        TreeBin<K,V> t = (TreeBin<K,V>)first;
1410 >                        if (t.putTreeVal(h, k, p.val) == null)
1411 >                            ++added;
1412 >                        insertAtFront = false;
1413 >                    }
1414 >                    else {
1415 >                        int binCount = 0;
1416 >                        insertAtFront = true;
1417 >                        Node<K,V> q; K qk;
1418 >                        for (q = first; q != null; q = q.next) {
1419 >                            if (q.hash == h &&
1420 >                                ((qk = q.key) == k ||
1421 >                                 (qk != null && k.equals(qk)))) {
1422 >                                insertAtFront = false;
1423 >                                break;
1424 >                            }
1425 >                            ++binCount;
1426 >                        }
1427 >                        if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1428 >                            insertAtFront = false;
1429 >                            ++added;
1430 >                            p.next = first;
1431 >                            TreeNode<K,V> hd = null, tl = null;
1432 >                            for (q = p; q != null; q = q.next) {
1433 >                                TreeNode<K,V> t = new TreeNode<K,V>
1434 >                                    (q.hash, q.key, q.val, null, null);
1435 >                                if ((t.prev = tl) == null)
1436 >                                    hd = t;
1437 >                                else
1438 >                                    tl.next = t;
1439 >                                tl = t;
1440 >                            }
1441 >                            setTabAt(tab, j, new TreeBin<K,V>(hd));
1442 >                        }
1443 >                    }
1444 >                }
1445 >                if (insertAtFront) {
1446 >                    ++added;
1447 >                    p.next = first;
1448 >                    setTabAt(tab, j, p);
1449 >                }
1450 >                p = next;
1451 >            }
1452 >            table = tab;
1453 >            sizeCtl = n - (n >>> 2);
1454 >            baseCount = added;
1455 >        }
1456 >    }
1457 >
1458 >    // ConcurrentMap methods
1459 >
1460 >    /**
1461 >     * {@inheritDoc}
1462       *
1463 <     * @since 1.6
1463 >     * @return the previous value associated with the specified key,
1464 >     *         or {@code null} if there was no mapping for the key
1465 >     * @throws NullPointerException if the specified key or value is null
1466       */
1467 <    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
1468 <        this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
1467 >    public V putIfAbsent(K key, V value) {
1468 >        return putVal(key, value, true);
1469      }
1470  
1471      /**
1472 <     * Creates a new, empty map with the specified initial capacity,
758 <     * and with default load factor (0.75) and concurrencyLevel (16).
1472 >     * {@inheritDoc}
1473       *
1474 <     * @param initialCapacity the initial capacity. The implementation
761 <     * performs internal sizing to accommodate this many elements.
762 <     * @throws IllegalArgumentException if the initial capacity of
763 <     * elements is negative.
1474 >     * @throws NullPointerException if the specified key is null
1475       */
1476 <    public ConcurrentHashMap(int initialCapacity) {
1477 <        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
1476 >    public boolean remove(Object key, Object value) {
1477 >        if (key == null)
1478 >            throw new NullPointerException();
1479 >        return value != null && replaceNode(key, null, value) != null;
1480      }
1481  
1482      /**
1483 <     * Creates a new, empty map with a default initial capacity (16),
1484 <     * load factor (0.75) and concurrencyLevel (16).
1483 >     * {@inheritDoc}
1484 >     *
1485 >     * @throws NullPointerException if any of the arguments are null
1486       */
1487 <    public ConcurrentHashMap() {
1488 <        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
1487 >    public boolean replace(K key, V oldValue, V newValue) {
1488 >        if (key == null || oldValue == null || newValue == null)
1489 >            throw new NullPointerException();
1490 >        return replaceNode(key, newValue, oldValue) != null;
1491      }
1492  
1493      /**
1494 <     * Creates a new map with the same mappings as the given map.
779 <     * The map is created with a capacity of 1.5 times the number
780 <     * of mappings in the given map or 16 (whichever is greater),
781 <     * and a default load factor (0.75) and concurrencyLevel (16).
1494 >     * {@inheritDoc}
1495       *
1496 <     * @param m the map
1496 >     * @return the previous value associated with the specified key,
1497 >     *         or {@code null} if there was no mapping for the key
1498 >     * @throws NullPointerException if the specified key or value is null
1499       */
1500 <    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
1501 <        this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
1502 <                      DEFAULT_INITIAL_CAPACITY),
1503 <             DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
789 <        putAll(m);
1500 >    public V replace(K key, V value) {
1501 >        if (key == null || value == null)
1502 >            throw new NullPointerException();
1503 >        return replaceNode(key, value, null);
1504      }
1505  
1506 +    // Overrides of JDK8+ Map extension method defaults
1507 +
1508      /**
1509 <     * Returns <tt>true</tt> if this map contains no key-value mappings.
1509 >     * Returns the value to which the specified key is mapped, or the
1510 >     * given default value if this map contains no mapping for the
1511 >     * key.
1512       *
1513 <     * @return <tt>true</tt> if this map contains no key-value mappings
1513 >     * @param key the key whose associated value is to be returned
1514 >     * @param defaultValue the value to return if this map contains
1515 >     * no mapping for the given key
1516 >     * @return the mapping for the key, if present; else the default value
1517 >     * @throws NullPointerException if the specified key is null
1518       */
1519 <    public boolean isEmpty() {
1520 <        /*
1521 <         * Sum per-segment modCounts to avoid mis-reporting when
1522 <         * elements are concurrently added and removed in one segment
1523 <         * while checking another, in which case the table was never
1524 <         * actually empty at any point. (The sum ensures accuracy up
1525 <         * through at least 1<<31 per-segment modifications before
1526 <         * recheck.)  Methods size() and containsValue() use similar
1527 <         * constructions for stability checks.
1528 <         */
1529 <        long sum = 0L;
1530 <        final Segment<K,V>[] segments = this.segments;
809 <        for (int j = 0; j < segments.length; ++j) {
810 <            Segment<K,V> seg = segmentAt(segments, j);
811 <            if (seg != null) {
812 <                if (seg.count != 0)
813 <                    return false;
814 <                sum += seg.modCount;
1519 >    public V getOrDefault(Object key, V defaultValue) {
1520 >        V v;
1521 >        return (v = get(key)) == null ? defaultValue : v;
1522 >    }
1523 >
1524 >    public void forEach(BiConsumer<? super K, ? super V> action) {
1525 >        if (action == null) throw new NullPointerException();
1526 >        Node<K,V>[] t;
1527 >        if ((t = table) != null) {
1528 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1529 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1530 >                action.accept(p.key, p.val);
1531              }
1532          }
1533 <        if (sum != 0L) { // recheck unless no modifications
1534 <            for (int j = 0; j < segments.length; ++j) {
1535 <                Segment<K,V> seg = segmentAt(segments, j);
1536 <                if (seg != null) {
1537 <                    if (seg.count != 0)
1538 <                        return false;
1539 <                    sum -= seg.modCount;
1533 >    }
1534 >
1535 >    public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1536 >        if (function == null) throw new NullPointerException();
1537 >        Node<K,V>[] t;
1538 >        if ((t = table) != null) {
1539 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1540 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1541 >                V oldValue = p.val;
1542 >                for (K key = p.key;;) {
1543 >                    V newValue = function.apply(key, oldValue);
1544 >                    if (newValue == null)
1545 >                        throw new NullPointerException();
1546 >                    if (replaceNode(key, newValue, oldValue) != null ||
1547 >                        (oldValue = get(key)) == null)
1548 >                        break;
1549                  }
1550              }
826            if (sum != 0L)
827                return false;
1551          }
829        return true;
1552      }
1553  
1554      /**
1555 <     * Returns the number of key-value mappings in this map.  If the
1556 <     * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
1557 <     * <tt>Integer.MAX_VALUE</tt>.
1555 >     * If the specified key is not already associated with a value,
1556 >     * attempts to compute its value using the given mapping function
1557 >     * and enters it into this map unless {@code null}.  The entire
1558 >     * method invocation is performed atomically, so the function is
1559 >     * applied at most once per key.  Some attempted update operations
1560 >     * on this map by other threads may be blocked while computation
1561 >     * is in progress, so the computation should be short and simple,
1562 >     * and must not attempt to update any other mappings of this map.
1563       *
1564 <     * @return the number of key-value mappings in this map
1564 >     * @param key key with which the specified value is to be associated
1565 >     * @param mappingFunction the function to compute a value
1566 >     * @return the current (existing or computed) value associated with
1567 >     *         the specified key, or null if the computed value is null
1568 >     * @throws NullPointerException if the specified key or mappingFunction
1569 >     *         is null
1570 >     * @throws IllegalStateException if the computation detectably
1571 >     *         attempts a recursive update to this map that would
1572 >     *         otherwise never complete
1573 >     * @throws RuntimeException or Error if the mappingFunction does so,
1574 >     *         in which case the mapping is left unestablished
1575       */
1576 <    public int size() {
1577 <        // Try a few times to get accurate count. On failure due to
1578 <        // continuous async changes in table, resort to locking.
1579 <        final Segment<K,V>[] segments = this.segments;
1580 <        int size;
1581 <        boolean overflow; // true if size overflows 32 bits
1582 <        long sum;         // sum of modCounts
1583 <        long last = 0L;   // previous sum
1584 <        int retries = -1; // first iteration isn't retry
1585 <        try {
1586 <            for (;;) {
1587 <                if (retries++ == RETRIES_BEFORE_LOCK) {
1588 <                    for (int j = 0; j < segments.length; ++j)
1589 <                        ensureSegment(j).lock(); // force creation
1590 <                }
1591 <                sum = 0L;
1592 <                size = 0;
1593 <                overflow = false;
1594 <                for (int j = 0; j < segments.length; ++j) {
1595 <                    Segment<K,V> seg = segmentAt(segments, j);
1596 <                    if (seg != null) {
1597 <                        sum += seg.modCount;
861 <                        int c = seg.count;
862 <                        if (c < 0 || (size += c) < 0)
863 <                            overflow = true;
1576 >    public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1577 >        if (key == null || mappingFunction == null)
1578 >            throw new NullPointerException();
1579 >        int h = spread(key.hashCode());
1580 >        V val = null;
1581 >        int binCount = 0;
1582 >        for (Node<K,V>[] tab = table;;) {
1583 >            Node<K,V> f; int n, i, fh;
1584 >            if (tab == null || (n = tab.length) == 0)
1585 >                tab = initTable();
1586 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1587 >                Node<K,V> r = new ReservationNode<K,V>();
1588 >                synchronized (r) {
1589 >                    if (casTabAt(tab, i, null, r)) {
1590 >                        binCount = 1;
1591 >                        Node<K,V> node = null;
1592 >                        try {
1593 >                            if ((val = mappingFunction.apply(key)) != null)
1594 >                                node = new Node<K,V>(h, key, val, null);
1595 >                        } finally {
1596 >                            setTabAt(tab, i, node);
1597 >                        }
1598                      }
1599                  }
1600 <                if (sum == last)
1600 >                if (binCount != 0)
1601                      break;
868                last = sum;
1602              }
1603 <        } finally {
1604 <            if (retries > RETRIES_BEFORE_LOCK) {
1605 <                for (int j = 0; j < segments.length; ++j)
1606 <                    segmentAt(segments, j).unlock();
1603 >            else if ((fh = f.hash) == MOVED)
1604 >                tab = helpTransfer(tab, f);
1605 >            else {
1606 >                boolean added = false;
1607 >                synchronized (f) {
1608 >                    if (tabAt(tab, i) == f) {
1609 >                        if (fh >= 0) {
1610 >                            binCount = 1;
1611 >                            for (Node<K,V> e = f;; ++binCount) {
1612 >                                K ek; V ev;
1613 >                                if (e.hash == h &&
1614 >                                    ((ek = e.key) == key ||
1615 >                                     (ek != null && key.equals(ek)))) {
1616 >                                    val = e.val;
1617 >                                    break;
1618 >                                }
1619 >                                Node<K,V> pred = e;
1620 >                                if ((e = e.next) == null) {
1621 >                                    if ((val = mappingFunction.apply(key)) != null) {
1622 >                                        added = true;
1623 >                                        pred.next = new Node<K,V>(h, key, val, null);
1624 >                                    }
1625 >                                    break;
1626 >                                }
1627 >                            }
1628 >                        }
1629 >                        else if (f instanceof TreeBin) {
1630 >                            binCount = 2;
1631 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1632 >                            TreeNode<K,V> r, p;
1633 >                            if ((r = t.root) != null &&
1634 >                                (p = r.findTreeNode(h, key, null)) != null)
1635 >                                val = p.val;
1636 >                            else if ((val = mappingFunction.apply(key)) != null) {
1637 >                                added = true;
1638 >                                t.putTreeVal(h, key, val);
1639 >                            }
1640 >                        }
1641 >                    }
1642 >                }
1643 >                if (binCount != 0) {
1644 >                    if (binCount >= TREEIFY_THRESHOLD)
1645 >                        treeifyBin(tab, i);
1646 >                    if (!added)
1647 >                        return val;
1648 >                    break;
1649 >                }
1650              }
1651          }
1652 <        return overflow ? Integer.MAX_VALUE : size;
1652 >        if (val != null)
1653 >            addCount(1L, binCount);
1654 >        return val;
1655      }
1656  
1657      /**
1658 <     * Returns the value to which the specified key is mapped,
1659 <     * or {@code null} if this map contains no mapping for the key.
1660 <     *
1661 <     * <p>More formally, if this map contains a mapping from a key
1662 <     * {@code k} to a value {@code v} such that {@code key.equals(k)},
1663 <     * then this method returns {@code v}; otherwise it returns
1664 <     * {@code null}.  (There can be at most one such mapping.)
1658 >     * If the value for the specified key is present, attempts to
1659 >     * compute a new mapping given the key and its current mapped
1660 >     * value.  The entire method invocation is performed atomically.
1661 >     * Some attempted update operations on this map by other threads
1662 >     * may be blocked while computation is in progress, so the
1663 >     * computation should be short and simple, and must not attempt to
1664 >     * update any other mappings of this map.
1665       *
1666 <     * @throws NullPointerException if the specified key is null
1666 >     * @param key key with which a value may be associated
1667 >     * @param remappingFunction the function to compute a value
1668 >     * @return the new value associated with the specified key, or null if none
1669 >     * @throws NullPointerException if the specified key or remappingFunction
1670 >     *         is null
1671 >     * @throws IllegalStateException if the computation detectably
1672 >     *         attempts a recursive update to this map that would
1673 >     *         otherwise never complete
1674 >     * @throws RuntimeException or Error if the remappingFunction does so,
1675 >     *         in which case the mapping is unchanged
1676       */
1677 <    public V get(Object key) {
1678 <        Segment<K,V> s; // manually integrate access methods to reduce overhead
1679 <        HashEntry<K,V>[] tab;
1680 <        int h = hash(key.hashCode());
1681 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1682 <        if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
1683 <            (tab = s.table) != null) {
1684 <            for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
1685 <                     (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1686 <                 e != null; e = e.next) {
1687 <                K k;
1688 <                if ((k = e.key) == key || (e.hash == h && key.equals(k)))
1689 <                    return e.value;
1677 >    public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1678 >        if (key == null || remappingFunction == null)
1679 >            throw new NullPointerException();
1680 >        int h = spread(key.hashCode());
1681 >        V val = null;
1682 >        int delta = 0;
1683 >        int binCount = 0;
1684 >        for (Node<K,V>[] tab = table;;) {
1685 >            Node<K,V> f; int n, i, fh;
1686 >            if (tab == null || (n = tab.length) == 0)
1687 >                tab = initTable();
1688 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1689 >                break;
1690 >            else if ((fh = f.hash) == MOVED)
1691 >                tab = helpTransfer(tab, f);
1692 >            else {
1693 >                synchronized (f) {
1694 >                    if (tabAt(tab, i) == f) {
1695 >                        if (fh >= 0) {
1696 >                            binCount = 1;
1697 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
1698 >                                K ek;
1699 >                                if (e.hash == h &&
1700 >                                    ((ek = e.key) == key ||
1701 >                                     (ek != null && key.equals(ek)))) {
1702 >                                    val = remappingFunction.apply(key, e.val);
1703 >                                    if (val != null)
1704 >                                        e.val = val;
1705 >                                    else {
1706 >                                        delta = -1;
1707 >                                        Node<K,V> en = e.next;
1708 >                                        if (pred != null)
1709 >                                            pred.next = en;
1710 >                                        else
1711 >                                            setTabAt(tab, i, en);
1712 >                                    }
1713 >                                    break;
1714 >                                }
1715 >                                pred = e;
1716 >                                if ((e = e.next) == null)
1717 >                                    break;
1718 >                            }
1719 >                        }
1720 >                        else if (f instanceof TreeBin) {
1721 >                            binCount = 2;
1722 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1723 >                            TreeNode<K,V> r, p;
1724 >                            if ((r = t.root) != null &&
1725 >                                (p = r.findTreeNode(h, key, null)) != null) {
1726 >                                val = remappingFunction.apply(key, p.val);
1727 >                                if (val != null)
1728 >                                    p.val = val;
1729 >                                else {
1730 >                                    delta = -1;
1731 >                                    if (t.removeTreeNode(p))
1732 >                                        setTabAt(tab, i, untreeify(t.first));
1733 >                                }
1734 >                            }
1735 >                        }
1736 >                    }
1737 >                }
1738 >                if (binCount != 0)
1739 >                    break;
1740              }
1741          }
1742 <        return null;
1742 >        if (delta != 0)
1743 >            addCount((long)delta, binCount);
1744 >        return val;
1745      }
1746  
1747      /**
1748 <     * Tests if the specified object is a key in this table.
1748 >     * Attempts to compute a mapping for the specified key and its
1749 >     * current mapped value (or {@code null} if there is no current
1750 >     * mapping). The entire method invocation is performed atomically.
1751 >     * Some attempted update operations on this map by other threads
1752 >     * may be blocked while computation is in progress, so the
1753 >     * computation should be short and simple, and must not attempt to
1754 >     * update any other mappings of this Map.
1755       *
1756 <     * @param  key   possible key
1757 <     * @return <tt>true</tt> if and only if the specified object
1758 <     *         is a key in this table, as determined by the
1759 <     *         <tt>equals</tt> method; <tt>false</tt> otherwise.
1760 <     * @throws NullPointerException if the specified key is null
1761 <     */
1762 <    @SuppressWarnings("unchecked")
1763 <    public boolean containsKey(Object key) {
1764 <        Segment<K,V> s; // same as get() except no need for volatile value read
1765 <        HashEntry<K,V>[] tab;
1766 <        int h = hash(key.hashCode());
1767 <        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1768 <        if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
1769 <            (tab = s.table) != null) {
1770 <            for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
1771 <                     (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1772 <                 e != null; e = e.next) {
1773 <                K k;
1774 <                if ((k = e.key) == key || (e.hash == h && key.equals(k)))
1775 <                    return true;
1756 >     * @param key key with which the specified value is to be associated
1757 >     * @param remappingFunction the function to compute a value
1758 >     * @return the new value associated with the specified key, or null if none
1759 >     * @throws NullPointerException if the specified key or remappingFunction
1760 >     *         is null
1761 >     * @throws IllegalStateException if the computation detectably
1762 >     *         attempts a recursive update to this map that would
1763 >     *         otherwise never complete
1764 >     * @throws RuntimeException or Error if the remappingFunction does so,
1765 >     *         in which case the mapping is unchanged
1766 >     */
1767 >    public V compute(K key,
1768 >                     BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1769 >        if (key == null || remappingFunction == null)
1770 >            throw new NullPointerException();
1771 >        int h = spread(key.hashCode());
1772 >        V val = null;
1773 >        int delta = 0;
1774 >        int binCount = 0;
1775 >        for (Node<K,V>[] tab = table;;) {
1776 >            Node<K,V> f; int n, i, fh;
1777 >            if (tab == null || (n = tab.length) == 0)
1778 >                tab = initTable();
1779 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1780 >                Node<K,V> r = new ReservationNode<K,V>();
1781 >                synchronized (r) {
1782 >                    if (casTabAt(tab, i, null, r)) {
1783 >                        binCount = 1;
1784 >                        Node<K,V> node = null;
1785 >                        try {
1786 >                            if ((val = remappingFunction.apply(key, null)) != null) {
1787 >                                delta = 1;
1788 >                                node = new Node<K,V>(h, key, val, null);
1789 >                            }
1790 >                        } finally {
1791 >                            setTabAt(tab, i, node);
1792 >                        }
1793 >                    }
1794 >                }
1795 >                if (binCount != 0)
1796 >                    break;
1797 >            }
1798 >            else if ((fh = f.hash) == MOVED)
1799 >                tab = helpTransfer(tab, f);
1800 >            else {
1801 >                synchronized (f) {
1802 >                    if (tabAt(tab, i) == f) {
1803 >                        if (fh >= 0) {
1804 >                            binCount = 1;
1805 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
1806 >                                K ek;
1807 >                                if (e.hash == h &&
1808 >                                    ((ek = e.key) == key ||
1809 >                                     (ek != null && key.equals(ek)))) {
1810 >                                    val = remappingFunction.apply(key, e.val);
1811 >                                    if (val != null)
1812 >                                        e.val = val;
1813 >                                    else {
1814 >                                        delta = -1;
1815 >                                        Node<K,V> en = e.next;
1816 >                                        if (pred != null)
1817 >                                            pred.next = en;
1818 >                                        else
1819 >                                            setTabAt(tab, i, en);
1820 >                                    }
1821 >                                    break;
1822 >                                }
1823 >                                pred = e;
1824 >                                if ((e = e.next) == null) {
1825 >                                    val = remappingFunction.apply(key, null);
1826 >                                    if (val != null) {
1827 >                                        delta = 1;
1828 >                                        pred.next =
1829 >                                            new Node<K,V>(h, key, val, null);
1830 >                                    }
1831 >                                    break;
1832 >                                }
1833 >                            }
1834 >                        }
1835 >                        else if (f instanceof TreeBin) {
1836 >                            binCount = 1;
1837 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1838 >                            TreeNode<K,V> r, p;
1839 >                            if ((r = t.root) != null)
1840 >                                p = r.findTreeNode(h, key, null);
1841 >                            else
1842 >                                p = null;
1843 >                            V pv = (p == null) ? null : p.val;
1844 >                            val = remappingFunction.apply(key, pv);
1845 >                            if (val != null) {
1846 >                                if (p != null)
1847 >                                    p.val = val;
1848 >                                else {
1849 >                                    delta = 1;
1850 >                                    t.putTreeVal(h, key, val);
1851 >                                }
1852 >                            }
1853 >                            else if (p != null) {
1854 >                                delta = -1;
1855 >                                if (t.removeTreeNode(p))
1856 >                                    setTabAt(tab, i, untreeify(t.first));
1857 >                            }
1858 >                        }
1859 >                    }
1860 >                }
1861 >                if (binCount != 0) {
1862 >                    if (binCount >= TREEIFY_THRESHOLD)
1863 >                        treeifyBin(tab, i);
1864 >                    break;
1865 >                }
1866              }
1867          }
1868 <        return false;
1868 >        if (delta != 0)
1869 >            addCount((long)delta, binCount);
1870 >        return val;
1871      }
1872  
1873      /**
1874 <     * Returns <tt>true</tt> if this map maps one or more keys to the
1875 <     * specified value. Note: This method requires a full internal
1876 <     * traversal of the hash table, and so is much slower than
1877 <     * method <tt>containsKey</tt>.
1874 >     * If the specified key is not already associated with a
1875 >     * (non-null) value, associates it with the given value.
1876 >     * Otherwise, replaces the value with the results of the given
1877 >     * remapping function, or removes if {@code null}. The entire
1878 >     * method invocation is performed atomically.  Some attempted
1879 >     * update operations on this map by other threads may be blocked
1880 >     * while computation is in progress, so the computation should be
1881 >     * short and simple, and must not attempt to update any other
1882 >     * mappings of this Map.
1883       *
1884 <     * @param value value whose presence in this map is to be tested
1885 <     * @return <tt>true</tt> if this map maps one or more keys to the
1886 <     *         specified value
1887 <     * @throws NullPointerException if the specified value is null
1884 >     * @param key key with which the specified value is to be associated
1885 >     * @param value the value to use if absent
1886 >     * @param remappingFunction the function to recompute a value if present
1887 >     * @return the new value associated with the specified key, or null if none
1888 >     * @throws NullPointerException if the specified key or the
1889 >     *         remappingFunction is null
1890 >     * @throws RuntimeException or Error if the remappingFunction does so,
1891 >     *         in which case the mapping is unchanged
1892       */
1893 <    public boolean containsValue(Object value) {
1894 <        // Same idea as size()
949 <        if (value == null)
1893 >    public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1894 >        if (key == null || value == null || remappingFunction == null)
1895              throw new NullPointerException();
1896 <        final Segment<K,V>[] segments = this.segments;
1897 <        boolean found = false;
1898 <        long last = 0L;   // previous sum
1899 <        int retries = -1;
1900 <        try {
1901 <            outer: for (;;) {
1902 <                if (retries++ == RETRIES_BEFORE_LOCK) {
1903 <                    for (int j = 0; j < segments.length; ++j)
1904 <                        ensureSegment(j).lock(); // force creation
1905 <                }
1906 <                long sum = 0L;
1907 <                for (int j = 0; j < segments.length; ++j) {
1908 <                    HashEntry<K,V>[] tab;
1909 <                    Segment<K,V> seg = segmentAt(segments, j);
1910 <                    if (seg != null && (tab = seg.table) != null) {
1911 <                        for (int i = 0 ; i < tab.length; i++) {
1912 <                            HashEntry<K,V> e;
1913 <                            for (e = entryAt(tab, i); e != null; e = e.next) {
1914 <                                V v = e.value;
1915 <                                if (v != null && value.equals(v)) {
1916 <                                    found = true;
1917 <                                    break outer;
1896 >        int h = spread(key.hashCode());
1897 >        V val = null;
1898 >        int delta = 0;
1899 >        int binCount = 0;
1900 >        for (Node<K,V>[] tab = table;;) {
1901 >            Node<K,V> f; int n, i, fh;
1902 >            if (tab == null || (n = tab.length) == 0)
1903 >                tab = initTable();
1904 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1905 >                if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1906 >                    delta = 1;
1907 >                    val = value;
1908 >                    break;
1909 >                }
1910 >            }
1911 >            else if ((fh = f.hash) == MOVED)
1912 >                tab = helpTransfer(tab, f);
1913 >            else {
1914 >                synchronized (f) {
1915 >                    if (tabAt(tab, i) == f) {
1916 >                        if (fh >= 0) {
1917 >                            binCount = 1;
1918 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
1919 >                                K ek;
1920 >                                if (e.hash == h &&
1921 >                                    ((ek = e.key) == key ||
1922 >                                     (ek != null && key.equals(ek)))) {
1923 >                                    val = remappingFunction.apply(e.val, value);
1924 >                                    if (val != null)
1925 >                                        e.val = val;
1926 >                                    else {
1927 >                                        delta = -1;
1928 >                                        Node<K,V> en = e.next;
1929 >                                        if (pred != null)
1930 >                                            pred.next = en;
1931 >                                        else
1932 >                                            setTabAt(tab, i, en);
1933 >                                    }
1934 >                                    break;
1935 >                                }
1936 >                                pred = e;
1937 >                                if ((e = e.next) == null) {
1938 >                                    delta = 1;
1939 >                                    val = value;
1940 >                                    pred.next =
1941 >                                        new Node<K,V>(h, key, val, null);
1942 >                                    break;
1943                                  }
1944                              }
1945                          }
1946 <                        sum += seg.modCount;
1946 >                        else if (f instanceof TreeBin) {
1947 >                            binCount = 2;
1948 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1949 >                            TreeNode<K,V> r = t.root;
1950 >                            TreeNode<K,V> p = (r == null) ? null :
1951 >                                r.findTreeNode(h, key, null);
1952 >                            val = (p == null) ? value :
1953 >                                remappingFunction.apply(p.val, value);
1954 >                            if (val != null) {
1955 >                                if (p != null)
1956 >                                    p.val = val;
1957 >                                else {
1958 >                                    delta = 1;
1959 >                                    t.putTreeVal(h, key, val);
1960 >                                }
1961 >                            }
1962 >                            else if (p != null) {
1963 >                                delta = -1;
1964 >                                if (t.removeTreeNode(p))
1965 >                                    setTabAt(tab, i, untreeify(t.first));
1966 >                            }
1967 >                        }
1968                      }
1969                  }
1970 <                if (retries > 0 && sum == last)
1970 >                if (binCount != 0) {
1971 >                    if (binCount >= TREEIFY_THRESHOLD)
1972 >                        treeifyBin(tab, i);
1973                      break;
1974 <                last = sum;
982 <            }
983 <        } finally {
984 <            if (retries > RETRIES_BEFORE_LOCK) {
985 <                for (int j = 0; j < segments.length; ++j)
986 <                    segmentAt(segments, j).unlock();
1974 >                }
1975              }
1976          }
1977 <        return found;
1977 >        if (delta != 0)
1978 >            addCount((long)delta, binCount);
1979 >        return val;
1980      }
1981  
1982 +    // Hashtable legacy methods
1983 +
1984      /**
1985       * Legacy method testing if some key maps into the specified value
1986       * in this table.  This method is identical in functionality to
1987 <     * {@link #containsValue}, and exists solely to ensure
1987 >     * {@link #containsValue(Object)}, and exists solely to ensure
1988       * full compatibility with class {@link java.util.Hashtable},
1989       * which supported this method prior to introduction of the
1990       * Java Collections framework.
1991       *
1992       * @param  value a value to search for
1993 <     * @return <tt>true</tt> if and only if some key maps to the
1994 <     *         <tt>value</tt> argument in this table as
1995 <     *         determined by the <tt>equals</tt> method;
1996 <     *         <tt>false</tt> otherwise
1993 >     * @return {@code true} if and only if some key maps to the
1994 >     *         {@code value} argument in this table as
1995 >     *         determined by the {@code equals} method;
1996 >     *         {@code false} otherwise
1997       * @throws NullPointerException if the specified value is null
1998       */
1999 <    public boolean contains(Object value) {
1999 >    @Deprecated public boolean contains(Object value) {
2000          return containsValue(value);
2001      }
2002  
2003      /**
2004 <     * Maps the specified key to the specified value in this table.
1013 <     * Neither the key nor the value can be null.
1014 <     *
1015 <     * <p> The value can be retrieved by calling the <tt>get</tt> method
1016 <     * with a key that is equal to the original key.
2004 >     * Returns an enumeration of the keys in this table.
2005       *
2006 <     * @param key key with which the specified value is to be associated
2007 <     * @param value value to be associated with the specified key
1020 <     * @return the previous value associated with <tt>key</tt>, or
1021 <     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
1022 <     * @throws NullPointerException if the specified key or value is null
2006 >     * @return an enumeration of the keys in this table
2007 >     * @see #keySet()
2008       */
2009 <    @SuppressWarnings("unchecked")
2010 <    public V put(K key, V value) {
2011 <        Segment<K,V> s;
2012 <        if (value == null)
1028 <            throw new NullPointerException();
1029 <        int hash = hash(key.hashCode());
1030 <        int j = (hash >>> segmentShift) & segmentMask;
1031 <        if ((s = (Segment<K,V>)UNSAFE.getObject          // nonvolatile; recheck
1032 <             (segments, (j << SSHIFT) + SBASE)) == null) //  in ensureSegment
1033 <            s = ensureSegment(j);
1034 <        return s.put(key, hash, value, false);
2009 >    public Enumeration<K> keys() {
2010 >        Node<K,V>[] t;
2011 >        int f = (t = table) == null ? 0 : t.length;
2012 >        return new KeyIterator<K,V>(t, f, 0, f, this);
2013      }
2014  
2015      /**
2016 <     * {@inheritDoc}
2016 >     * Returns an enumeration of the values in this table.
2017       *
2018 <     * @return the previous value associated with the specified key,
2019 <     *         or <tt>null</tt> if there was no mapping for the key
1042 <     * @throws NullPointerException if the specified key or value is null
2018 >     * @return an enumeration of the values in this table
2019 >     * @see #values()
2020       */
2021 <    @SuppressWarnings("unchecked")
2022 <    public V putIfAbsent(K key, V value) {
2023 <        Segment<K,V> s;
2024 <        if (value == null)
1048 <            throw new NullPointerException();
1049 <        int hash = hash(key.hashCode());
1050 <        int j = (hash >>> segmentShift) & segmentMask;
1051 <        if ((s = (Segment<K,V>)UNSAFE.getObject
1052 <             (segments, (j << SSHIFT) + SBASE)) == null)
1053 <            s = ensureSegment(j);
1054 <        return s.put(key, hash, value, true);
2021 >    public Enumeration<V> elements() {
2022 >        Node<K,V>[] t;
2023 >        int f = (t = table) == null ? 0 : t.length;
2024 >        return new ValueIterator<K,V>(t, f, 0, f, this);
2025      }
2026  
2027 +    // ConcurrentHashMap-only methods
2028 +
2029      /**
2030 <     * Copies all of the mappings from the specified map to this one.
2031 <     * These mappings replace any mappings that this map had for any of the
2032 <     * keys currently in the specified map.
2030 >     * Returns the number of mappings. This method should be used
2031 >     * instead of {@link #size} because a ConcurrentHashMap may
2032 >     * contain more mappings than can be represented as an int. The
2033 >     * value returned is an estimate; the actual count may differ if
2034 >     * there are concurrent insertions or removals.
2035       *
2036 <     * @param m mappings to be stored in this map
2036 >     * @return the number of mappings
2037 >     * @since 1.8
2038       */
2039 <    public void putAll(Map<? extends K, ? extends V> m) {
2040 <        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
2041 <            put(e.getKey(), e.getValue());
2039 >    public long mappingCount() {
2040 >        long n = sumCount();
2041 >        return (n < 0L) ? 0L : n; // ignore transient negative values
2042      }
2043  
2044      /**
2045 <     * Removes the key (and its corresponding value) from this map.
2046 <     * This method does nothing if the key is not in the map.
2045 >     * Creates a new {@link Set} backed by a ConcurrentHashMap
2046 >     * from the given type to {@code Boolean.TRUE}.
2047       *
2048 <     * @param  key the key that needs to be removed
2049 <     * @return the previous value associated with <tt>key</tt>, or
1075 <     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
1076 <     * @throws NullPointerException if the specified key is null
2048 >     * @return the new set
2049 >     * @since 1.8
2050       */
2051 <    public V remove(Object key) {
2052 <        int hash = hash(key.hashCode());
2053 <        Segment<K,V> s = segmentForHash(hash);
1081 <        return s == null ? null : s.remove(key, hash, null);
2051 >    public static <K> KeySetView<K,Boolean> newKeySet() {
2052 >        return new KeySetView<K,Boolean>
2053 >            (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2054      }
2055  
2056      /**
2057 <     * {@inheritDoc}
2057 >     * Creates a new {@link Set} backed by a ConcurrentHashMap
2058 >     * from the given type to {@code Boolean.TRUE}.
2059       *
2060 <     * @throws NullPointerException if the specified key is null
2060 >     * @param initialCapacity The implementation performs internal
2061 >     * sizing to accommodate this many elements.
2062 >     * @throws IllegalArgumentException if the initial capacity of
2063 >     * elements is negative
2064 >     * @return the new set
2065 >     * @since 1.8
2066       */
2067 <    public boolean remove(Object key, Object value) {
2068 <        int hash = hash(key.hashCode());
2069 <        Segment<K,V> s;
1092 <        return value != null && (s = segmentForHash(hash)) != null &&
1093 <            s.remove(key, hash, value) != null;
2067 >    public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2068 >        return new KeySetView<K,Boolean>
2069 >            (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2070      }
2071  
2072      /**
2073 <     * {@inheritDoc}
2073 >     * Returns a {@link Set} view of the keys in this map, using the
2074 >     * given common mapped value for any additions (i.e., {@link
2075 >     * Collection#add} and {@link Collection#addAll(Collection)}).
2076 >     * This is of course only appropriate if it is acceptable to use
2077 >     * the same value for all additions from this view.
2078       *
2079 <     * @throws NullPointerException if any of the arguments are null
2079 >     * @param mappedValue the mapped value to use for any additions
2080 >     * @return the set view
2081 >     * @throws NullPointerException if the mappedValue is null
2082       */
2083 <    public boolean replace(K key, V oldValue, V newValue) {
2084 <        int hash = hash(key.hashCode());
1103 <        if (oldValue == null || newValue == null)
2083 >    public KeySetView<K,V> keySet(V mappedValue) {
2084 >        if (mappedValue == null)
2085              throw new NullPointerException();
2086 <        Segment<K,V> s = segmentForHash(hash);
1106 <        return s != null && s.replace(key, hash, oldValue, newValue);
2086 >        return new KeySetView<K,V>(this, mappedValue);
2087      }
2088  
2089 +    /* ---------------- Special Nodes -------------- */
2090 +
2091      /**
2092 <     * {@inheritDoc}
1111 <     *
1112 <     * @return the previous value associated with the specified key,
1113 <     *         or <tt>null</tt> if there was no mapping for the key
1114 <     * @throws NullPointerException if the specified key or value is null
2092 >     * A node inserted at head of bins during transfer operations.
2093       */
2094 <    public V replace(K key, V value) {
2095 <        int hash = hash(key.hashCode());
2096 <        if (value == null)
2097 <            throw new NullPointerException();
2098 <        Segment<K,V> s = segmentForHash(hash);
2099 <        return s == null ? null : s.replace(key, hash, value);
2094 >    static final class ForwardingNode<K,V> extends Node<K,V> {
2095 >        final Node<K,V>[] nextTable;
2096 >        ForwardingNode(Node<K,V>[] tab) {
2097 >            super(MOVED, null, null, null);
2098 >            this.nextTable = tab;
2099 >        }
2100 >
2101 >        Node<K,V> find(int h, Object k) {
2102 >            Node<K,V> e; int n;
2103 >            Node<K,V>[] tab = nextTable;
2104 >            if (k != null && tab != null && (n = tab.length) > 0 &&
2105 >                (e = tabAt(tab, (n - 1) & h)) != null) {
2106 >                do {
2107 >                    int eh; K ek;
2108 >                    if ((eh = e.hash) == h &&
2109 >                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
2110 >                        return e;
2111 >                    if (eh < 0)
2112 >                        return e.find(h, k);
2113 >                } while ((e = e.next) != null);
2114 >            }
2115 >            return null;
2116 >        }
2117      }
2118  
2119      /**
2120 <     * Removes all of the mappings from this map.
2120 >     * A place-holder node used in computeIfAbsent and compute
2121       */
2122 <    public void clear() {
2123 <        final Segment<K,V>[] segments = this.segments;
2124 <        for (int j = 0; j < segments.length; ++j) {
2125 <            Segment<K,V> s = segmentAt(segments, j);
2126 <            if (s != null)
2127 <                s.clear();
2122 >    static final class ReservationNode<K,V> extends Node<K,V> {
2123 >        ReservationNode() {
2124 >            super(RESERVED, null, null, null);
2125 >        }
2126 >
2127 >        Node<K,V> find(int h, Object k) {
2128 >            return null;
2129          }
2130      }
2131  
2132 +    /* ---------------- Table Initialization and Resizing -------------- */
2133 +
2134      /**
2135 <     * Returns a {@link Set} view of the keys contained in this map.
1138 <     * The set is backed by the map, so changes to the map are
1139 <     * reflected in the set, and vice-versa.  The set supports element
1140 <     * removal, which removes the corresponding mapping from this map,
1141 <     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
1142 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
1143 <     * operations.  It does not support the <tt>add</tt> or
1144 <     * <tt>addAll</tt> operations.
1145 <     *
1146 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1147 <     * that will never throw {@link ConcurrentModificationException},
1148 <     * and guarantees to traverse elements as they existed upon
1149 <     * construction of the iterator, and may (but is not guaranteed to)
1150 <     * reflect any modifications subsequent to construction.
2135 >     * Initializes table, using the size recorded in sizeCtl.
2136       */
2137 <    public Set<K> keySet() {
2138 <        Set<K> ks = keySet;
2139 <        return (ks != null) ? ks : (keySet = new KeySet());
2137 >    private final Node<K,V>[] initTable() {
2138 >        Node<K,V>[] tab; int sc;
2139 >        while ((tab = table) == null || tab.length == 0) {
2140 >            if ((sc = sizeCtl) < 0)
2141 >                Thread.yield(); // lost initialization race; just spin
2142 >            else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2143 >                try {
2144 >                    if ((tab = table) == null || tab.length == 0) {
2145 >                        int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2146 >                        @SuppressWarnings({"rawtypes","unchecked"})
2147 >                            Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2148 >                        table = tab = nt;
2149 >                        sc = n - (n >>> 2);
2150 >                    }
2151 >                } finally {
2152 >                    sizeCtl = sc;
2153 >                }
2154 >                break;
2155 >            }
2156 >        }
2157 >        return tab;
2158      }
2159  
2160      /**
2161 <     * Returns a {@link Collection} view of the values contained in this map.
2162 <     * The collection is backed by the map, so changes to the map are
2163 <     * reflected in the collection, and vice-versa.  The collection
2164 <     * supports element removal, which removes the corresponding
2165 <     * mapping from this map, via the <tt>Iterator.remove</tt>,
1163 <     * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
1164 <     * <tt>retainAll</tt>, and <tt>clear</tt> operations.  It does not
1165 <     * support the <tt>add</tt> or <tt>addAll</tt> operations.
2161 >     * Adds to count, and if table is too small and not already
2162 >     * resizing, initiates transfer. If already resizing, helps
2163 >     * perform transfer if work is available.  Rechecks occupancy
2164 >     * after a transfer to see if another resize is already needed
2165 >     * because resizings are lagging additions.
2166       *
2167 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
2168 <     * that will never throw {@link ConcurrentModificationException},
1169 <     * and guarantees to traverse elements as they existed upon
1170 <     * construction of the iterator, and may (but is not guaranteed to)
1171 <     * reflect any modifications subsequent to construction.
2167 >     * @param x the count to add
2168 >     * @param check if <0, don't check resize, if <= 1 only check if uncontended
2169       */
2170 <    public Collection<V> values() {
2171 <        Collection<V> vs = values;
2172 <        return (vs != null) ? vs : (values = new Values());
2170 >    private final void addCount(long x, int check) {
2171 >        CounterCell[] as; long b, s;
2172 >        if ((as = counterCells) != null ||
2173 >            !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2174 >            CounterCell a; long v; int m;
2175 >            boolean uncontended = true;
2176 >            if (as == null || (m = as.length - 1) < 0 ||
2177 >                (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
2178 >                !(uncontended =
2179 >                  U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2180 >                fullAddCount(x, uncontended);
2181 >                return;
2182 >            }
2183 >            if (check <= 1)
2184 >                return;
2185 >            s = sumCount();
2186 >        }
2187 >        if (check >= 0) {
2188 >            Node<K,V>[] tab, nt; int sc;
2189 >            while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2190 >                   tab.length < MAXIMUM_CAPACITY) {
2191 >                if (sc < 0) {
2192 >                    if (sc == -1 || transferIndex <= transferOrigin ||
2193 >                        (nt = nextTable) == null)
2194 >                        break;
2195 >                    if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2196 >                        transfer(tab, nt);
2197 >                }
2198 >                else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
2199 >                    transfer(tab, null);
2200 >                s = sumCount();
2201 >            }
2202 >        }
2203      }
2204  
2205      /**
2206 <     * Returns a {@link Set} view of the mappings contained in this map.
1180 <     * The set is backed by the map, so changes to the map are
1181 <     * reflected in the set, and vice-versa.  The set supports element
1182 <     * removal, which removes the corresponding mapping from the map,
1183 <     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
1184 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
1185 <     * operations.  It does not support the <tt>add</tt> or
1186 <     * <tt>addAll</tt> operations.
1187 <     *
1188 <     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1189 <     * that will never throw {@link ConcurrentModificationException},
1190 <     * and guarantees to traverse elements as they existed upon
1191 <     * construction of the iterator, and may (but is not guaranteed to)
1192 <     * reflect any modifications subsequent to construction.
2206 >     * Helps transfer if a resize is in progress.
2207       */
2208 <    public Set<Map.Entry<K,V>> entrySet() {
2209 <        Set<Map.Entry<K,V>> es = entrySet;
2210 <        return (es != null) ? es : (entrySet = new EntrySet());
2208 >    final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2209 >        Node<K,V>[] nextTab; int sc;
2210 >        if ((f instanceof ForwardingNode) &&
2211 >            (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2212 >            if (nextTab == nextTable && tab == table &&
2213 >                transferIndex > transferOrigin && (sc = sizeCtl) < -1 &&
2214 >                U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
2215 >                transfer(tab, nextTab);
2216 >            return nextTab;
2217 >        }
2218 >        return table;
2219      }
2220  
2221      /**
2222 <     * Returns an enumeration of the keys in this table.
2222 >     * Tries to presize table to accommodate the given number of elements.
2223       *
2224 <     * @return an enumeration of the keys in this table
1203 <     * @see #keySet()
2224 >     * @param size number of elements (doesn't need to be perfectly accurate)
2225       */
2226 <    public Enumeration<K> keys() {
2227 <        return new KeyIterator();
2226 >    private final void tryPresize(int size) {
2227 >        int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2228 >            tableSizeFor(size + (size >>> 1) + 1);
2229 >        int sc;
2230 >        while ((sc = sizeCtl) >= 0) {
2231 >            Node<K,V>[] tab = table; int n;
2232 >            if (tab == null || (n = tab.length) == 0) {
2233 >                n = (sc > c) ? sc : c;
2234 >                if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2235 >                    try {
2236 >                        if (table == tab) {
2237 >                            @SuppressWarnings({"rawtypes","unchecked"})
2238 >                                Node<K,V>[] nt = (Node<K,V>[])new Node[n];
2239 >                            table = nt;
2240 >                            sc = n - (n >>> 2);
2241 >                        }
2242 >                    } finally {
2243 >                        sizeCtl = sc;
2244 >                    }
2245 >                }
2246 >            }
2247 >            else if (c <= sc || n >= MAXIMUM_CAPACITY)
2248 >                break;
2249 >            else if (tab == table &&
2250 >                     U.compareAndSwapInt(this, SIZECTL, sc, -2))
2251 >                transfer(tab, null);
2252 >        }
2253      }
2254  
2255      /**
2256 <     * Returns an enumeration of the values in this table.
2257 <     *
1212 <     * @return an enumeration of the values in this table
1213 <     * @see #values()
2256 >     * Moves and/or copies the nodes in each bin to new table. See
2257 >     * above for explanation.
2258       */
2259 <    public Enumeration<V> elements() {
2260 <        return new ValueIterator();
2259 >    private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2260 >        int n = tab.length, stride;
2261 >        if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2262 >            stride = MIN_TRANSFER_STRIDE; // subdivide range
2263 >        if (nextTab == null) {            // initiating
2264 >            try {
2265 >                @SuppressWarnings({"rawtypes","unchecked"})
2266 >                    Node<K,V>[] nt = (Node<K,V>[])new Node[n << 1];
2267 >                nextTab = nt;
2268 >            } catch (Throwable ex) {      // try to cope with OOME
2269 >                sizeCtl = Integer.MAX_VALUE;
2270 >                return;
2271 >            }
2272 >            nextTable = nextTab;
2273 >            transferOrigin = n;
2274 >            transferIndex = n;
2275 >            ForwardingNode<K,V> rev = new ForwardingNode<K,V>(tab);
2276 >            for (int k = n; k > 0;) {    // progressively reveal ready slots
2277 >                int nextk = (k > stride) ? k - stride : 0;
2278 >                for (int m = nextk; m < k; ++m)
2279 >                    nextTab[m] = rev;
2280 >                for (int m = n + nextk; m < n + k; ++m)
2281 >                    nextTab[m] = rev;
2282 >                U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
2283 >            }
2284 >        }
2285 >        int nextn = nextTab.length;
2286 >        ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2287 >        boolean advance = true;
2288 >        for (int i = 0, bound = 0;;) {
2289 >            int nextIndex, nextBound, fh; Node<K,V> f;
2290 >            while (advance) {
2291 >                if (--i >= bound)
2292 >                    advance = false;
2293 >                else if ((nextIndex = transferIndex) <= transferOrigin) {
2294 >                    i = -1;
2295 >                    advance = false;
2296 >                }
2297 >                else if (U.compareAndSwapInt
2298 >                         (this, TRANSFERINDEX, nextIndex,
2299 >                          nextBound = (nextIndex > stride ?
2300 >                                       nextIndex - stride : 0))) {
2301 >                    bound = nextBound;
2302 >                    i = nextIndex - 1;
2303 >                    advance = false;
2304 >                }
2305 >            }
2306 >            if (i < 0 || i >= n || i + n >= nextn) {
2307 >                for (int sc;;) {
2308 >                    if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
2309 >                        if (sc == -1) {
2310 >                            nextTable = null;
2311 >                            table = nextTab;
2312 >                            sizeCtl = (n << 1) - (n >>> 1);
2313 >                        }
2314 >                        return;
2315 >                    }
2316 >                }
2317 >            }
2318 >            else if ((f = tabAt(tab, i)) == null) {
2319 >                if (casTabAt(tab, i, null, fwd)) {
2320 >                    setTabAt(nextTab, i, null);
2321 >                    setTabAt(nextTab, i + n, null);
2322 >                    advance = true;
2323 >                }
2324 >            }
2325 >            else if ((fh = f.hash) == MOVED)
2326 >                advance = true; // already processed
2327 >            else {
2328 >                synchronized (f) {
2329 >                    if (tabAt(tab, i) == f) {
2330 >                        Node<K,V> ln, hn;
2331 >                        if (fh >= 0) {
2332 >                            int runBit = fh & n;
2333 >                            Node<K,V> lastRun = f;
2334 >                            for (Node<K,V> p = f.next; p != null; p = p.next) {
2335 >                                int b = p.hash & n;
2336 >                                if (b != runBit) {
2337 >                                    runBit = b;
2338 >                                    lastRun = p;
2339 >                                }
2340 >                            }
2341 >                            if (runBit == 0) {
2342 >                                ln = lastRun;
2343 >                                hn = null;
2344 >                            }
2345 >                            else {
2346 >                                hn = lastRun;
2347 >                                ln = null;
2348 >                            }
2349 >                            for (Node<K,V> p = f; p != lastRun; p = p.next) {
2350 >                                int ph = p.hash; K pk = p.key; V pv = p.val;
2351 >                                if ((ph & n) == 0)
2352 >                                    ln = new Node<K,V>(ph, pk, pv, ln);
2353 >                                else
2354 >                                    hn = new Node<K,V>(ph, pk, pv, hn);
2355 >                            }
2356 >                        }
2357 >                        else if (f instanceof TreeBin) {
2358 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
2359 >                            TreeNode<K,V> lo = null, loTail = null;
2360 >                            TreeNode<K,V> hi = null, hiTail = null;
2361 >                            int lc = 0, hc = 0;
2362 >                            for (Node<K,V> e = t.first; e != null; e = e.next) {
2363 >                                int h = e.hash;
2364 >                                TreeNode<K,V> p = new TreeNode<K,V>
2365 >                                    (h, e.key, e.val, null, null);
2366 >                                if ((h & n) == 0) {
2367 >                                    if ((p.prev = loTail) == null)
2368 >                                        lo = p;
2369 >                                    else
2370 >                                        loTail.next = p;
2371 >                                    loTail = p;
2372 >                                    ++lc;
2373 >                                }
2374 >                                else {
2375 >                                    if ((p.prev = hiTail) == null)
2376 >                                        hi = p;
2377 >                                    else
2378 >                                        hiTail.next = p;
2379 >                                    hiTail = p;
2380 >                                    ++hc;
2381 >                                }
2382 >                            }
2383 >                            ln = (lc <= UNTREEIFY_THRESHOLD ?  untreeify(lo) :
2384 >                                  (hc != 0) ? new TreeBin<K,V>(lo) : t);
2385 >                            hn = (hc <= UNTREEIFY_THRESHOLD ? untreeify(hi) :
2386 >                                  (lc != 0) ? new TreeBin<K,V>(hi) : t);
2387 >                        }
2388 >                        else
2389 >                            ln = hn = null;
2390 >                        setTabAt(nextTab, i, ln);
2391 >                        setTabAt(nextTab, i + n, hn);
2392 >                        setTabAt(tab, i, fwd);
2393 >                        advance = true;
2394 >                    }
2395 >                }
2396 >            }
2397 >        }
2398 >    }
2399 >
2400 >    /* ---------------- Counter support -------------- */
2401 >
2402 >    /**
2403 >     * A padded cell for distributing counts.  Adapted from LongAdder
2404 >     * and Striped64.  See their internal docs for explanation.
2405 >     */
2406 >    @sun.misc.Contended static final class CounterCell {
2407 >        volatile long value;
2408 >        CounterCell(long x) { value = x; }
2409 >    }
2410 >
2411 >    final long sumCount() {
2412 >        CounterCell[] as = counterCells; CounterCell a;
2413 >        long sum = baseCount;
2414 >        if (as != null) {
2415 >            for (int i = 0; i < as.length; ++i) {
2416 >                if ((a = as[i]) != null)
2417 >                    sum += a.value;
2418 >            }
2419 >        }
2420 >        return sum;
2421 >    }
2422 >
2423 >    // See LongAdder version for explanation
2424 >    private final void fullAddCount(long x, boolean wasUncontended) {
2425 >        int h;
2426 >        if ((h = ThreadLocalRandom.getProbe()) == 0) {
2427 >            ThreadLocalRandom.localInit();      // force initialization
2428 >            h = ThreadLocalRandom.getProbe();
2429 >            wasUncontended = true;
2430 >        }
2431 >        boolean collide = false;                // True if last slot nonempty
2432 >        for (;;) {
2433 >            CounterCell[] as; CounterCell a; int n; long v;
2434 >            if ((as = counterCells) != null && (n = as.length) > 0) {
2435 >                if ((a = as[(n - 1) & h]) == null) {
2436 >                    if (cellsBusy == 0) {            // Try to attach new Cell
2437 >                        CounterCell r = new CounterCell(x); // Optimistic create
2438 >                        if (cellsBusy == 0 &&
2439 >                            U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2440 >                            boolean created = false;
2441 >                            try {               // Recheck under lock
2442 >                                CounterCell[] rs; int m, j;
2443 >                                if ((rs = counterCells) != null &&
2444 >                                    (m = rs.length) > 0 &&
2445 >                                    rs[j = (m - 1) & h] == null) {
2446 >                                    rs[j] = r;
2447 >                                    created = true;
2448 >                                }
2449 >                            } finally {
2450 >                                cellsBusy = 0;
2451 >                            }
2452 >                            if (created)
2453 >                                break;
2454 >                            continue;           // Slot is now non-empty
2455 >                        }
2456 >                    }
2457 >                    collide = false;
2458 >                }
2459 >                else if (!wasUncontended)       // CAS already known to fail
2460 >                    wasUncontended = true;      // Continue after rehash
2461 >                else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2462 >                    break;
2463 >                else if (counterCells != as || n >= NCPU)
2464 >                    collide = false;            // At max size or stale
2465 >                else if (!collide)
2466 >                    collide = true;
2467 >                else if (cellsBusy == 0 &&
2468 >                         U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2469 >                    try {
2470 >                        if (counterCells == as) {// Expand table unless stale
2471 >                            CounterCell[] rs = new CounterCell[n << 1];
2472 >                            for (int i = 0; i < n; ++i)
2473 >                                rs[i] = as[i];
2474 >                            counterCells = rs;
2475 >                        }
2476 >                    } finally {
2477 >                        cellsBusy = 0;
2478 >                    }
2479 >                    collide = false;
2480 >                    continue;                   // Retry with expanded table
2481 >                }
2482 >                h = ThreadLocalRandom.advanceProbe(h);
2483 >            }
2484 >            else if (cellsBusy == 0 && counterCells == as &&
2485 >                     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2486 >                boolean init = false;
2487 >                try {                           // Initialize table
2488 >                    if (counterCells == as) {
2489 >                        CounterCell[] rs = new CounterCell[2];
2490 >                        rs[h & 1] = new CounterCell(x);
2491 >                        counterCells = rs;
2492 >                        init = true;
2493 >                    }
2494 >                } finally {
2495 >                    cellsBusy = 0;
2496 >                }
2497 >                if (init)
2498 >                    break;
2499 >            }
2500 >            else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2501 >                break;                          // Fall back on using base
2502 >        }
2503      }
2504  
2505 <    /* ---------------- Iterator Support -------------- */
2505 >    /* ---------------- Conversion from/to TreeBins -------------- */
2506  
2507 <    abstract class HashIterator {
2508 <        int nextSegmentIndex;
2509 <        int nextTableIndex;
2510 <        HashEntry<K,V>[] currentTable;
2511 <        HashEntry<K, V> nextEntry;
2512 <        HashEntry<K, V> lastReturned;
2507 >    /**
2508 >     * Replaces all linked nodes in bin at given index unless table is
2509 >     * too small, in which case resizes instead.
2510 >     */
2511 >    private final void treeifyBin(Node<K,V>[] tab, int index) {
2512 >        Node<K,V> b; int n, sc;
2513 >        if (tab != null) {
2514 >            if ((n = tab.length) < MIN_TREEIFY_CAPACITY &&
2515 >                tab == table && (sc = sizeCtl) >= 0 &&
2516 >                U.compareAndSwapInt(this, SIZECTL, sc, -2))
2517 >                transfer(tab, null);
2518 >            else if ((b = tabAt(tab, index)) != null) {
2519 >                synchronized(b) {
2520 >                    if (tabAt(tab, index) == b) {
2521 >                        TreeNode<K,V> hd = null, tl = null;
2522 >                        for (Node<K,V> e = b; e != null; e = e.next) {
2523 >                            TreeNode<K,V> p =
2524 >                                new TreeNode<K,V>(e.hash, e.key, e.val,
2525 >                                                  null, null);
2526 >                            if ((p.prev = tl) == null)
2527 >                                hd = p;
2528 >                            else
2529 >                                tl.next = p;
2530 >                            tl = p;
2531 >                        }
2532 >                        setTabAt(tab, index, new TreeBin<K,V>(hd));
2533 >                    }
2534 >                }
2535 >            }
2536 >        }
2537 >    }
2538  
2539 <        HashIterator() {
2540 <            nextSegmentIndex = segments.length - 1;
2541 <            nextTableIndex = -1;
2542 <            advance();
2539 >    /**
2540 >     * Returns a list on non-TreeNodes replacing those in given list
2541 >     */
2542 >    static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2543 >        Node<K,V> hd = null, tl = null;
2544 >        for (Node<K,V> q = b; q != null; q = q.next) {
2545 >            Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2546 >            if (tl == null)
2547 >                hd = p;
2548 >            else
2549 >                tl.next = p;
2550 >            tl = p;
2551 >        }
2552 >        return hd;
2553 >    }
2554 >
2555 >    /* ---------------- TreeNodes -------------- */
2556 >
2557 >    /**
2558 >     * Nodes for use in TreeBins
2559 >     */
2560 >    static final class TreeNode<K,V> extends Node<K,V> {
2561 >        TreeNode<K,V> parent;  // red-black tree links
2562 >        TreeNode<K,V> left;
2563 >        TreeNode<K,V> right;
2564 >        TreeNode<K,V> prev;    // needed to unlink next upon deletion
2565 >        boolean red;
2566 >
2567 >        TreeNode(int hash, K key, V val, Node<K,V> next,
2568 >                 TreeNode<K,V> parent) {
2569 >            super(hash, key, val, next);
2570 >            this.parent = parent;
2571 >        }
2572 >
2573 >        Node<K,V> find(int h, Object k) {
2574 >            return findTreeNode(h, k, null);
2575 >        }
2576 >
2577 >        /**
2578 >         * Returns the TreeNode (or null if not found) for the given key
2579 >         * starting at given root.
2580 >         */
2581 >        final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2582 >            if (k == null)
2583 >                return null;
2584 >            TreeNode<K,V> p = this;
2585 >            do  {
2586 >                int ph, dir; K pk; TreeNode<K,V> q;
2587 >                TreeNode<K,V> pl = p.left, pr = p.right;
2588 >                if ((ph = p.hash) > h)
2589 >                    p = pl;
2590 >                else if (ph < h)
2591 >                    p = pr;
2592 >                else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2593 >                    return p;
2594 >                else if (pl == null && pr == null)
2595 >                    break;
2596 >                else if ((kc != null || (kc = comparableClassFor(k)) != null) &&
2597 >                         (dir = compareComparables(kc, k, pk)) != 0)
2598 >                    p = (dir < 0) ? pl : pr;
2599 >                else if (pl == null)
2600 >                    p = pr;
2601 >                else if (pr == null || (q = pr.findTreeNode(h, k, kc)) == null)
2602 >                    p = pl;
2603 >                else
2604 >                    return q;
2605 >            } while (p != null);
2606 >            return null;
2607 >        }
2608 >    }
2609 >
2610 >    /* ---------------- TreeBins -------------- */
2611 >
2612 >    /**
2613 >     * TreeNodes used at the heads of bins. TreeBins do not hold user
2614 >     * keys or values, but instead point to list of TreeNodes and
2615 >     * their root. They also maintain a parasitic read-write lock
2616 >     * forcing writers (who hold bin lock) to wait for readers (who do
2617 >     * not) to complete before tree restructuring operations.
2618 >     */
2619 >    static final class TreeBin<K,V> extends Node<K,V> {
2620 >        TreeNode<K,V> root;
2621 >        volatile TreeNode<K,V> first;
2622 >        volatile Thread waiter;
2623 >        static final int WRITER = 1; // values for lockState
2624 >        static final int WAITER = 2;
2625 >        static final int READER = 4;
2626 >        volatile int lockState;
2627 >
2628 >        /**
2629 >         * Creates bin with initial set of nodes headed by b.
2630 >         */
2631 >        TreeBin(TreeNode<K,V> b) {
2632 >            super(TREEBIN, null, null, null);
2633 >            first = b;
2634 >            TreeNode<K,V> r = null;
2635 >            for (TreeNode<K,V> x = b, next; x != null; x = next) {
2636 >                next = (TreeNode<K,V>)x.next;
2637 >                x.left = x.right = null;
2638 >                if (r == null) {
2639 >                    x.parent = null;
2640 >                    x.red = false;
2641 >                    r = x;
2642 >                }
2643 >                else {
2644 >                    Object key = x.key;
2645 >                    int hash = x.hash;
2646 >                    Class<?> kc = null;
2647 >                    for (TreeNode<K,V> p = r;;) {
2648 >                        int dir, ph;
2649 >                        if ((ph = p.hash) > hash)
2650 >                            dir = -1;
2651 >                        else if (ph < hash)
2652 >                            dir = 1;
2653 >                        else if ((kc != null ||
2654 >                                  (kc = comparableClassFor(key)) != null))
2655 >                            dir = compareComparables(kc, key, p.key);
2656 >                        else
2657 >                            dir = 0;
2658 >                        TreeNode<K,V> xp = p;
2659 >                        if ((p = (dir <= 0) ? p.left : p.right) == null) {
2660 >                            x.parent = xp;
2661 >                            if (dir <= 0)
2662 >                                xp.left = x;
2663 >                            else
2664 >                                xp.right = x;
2665 >                            r = balanceInsertion(r, x);
2666 >                            break;
2667 >                        }
2668 >                    }
2669 >                }
2670 >            }
2671 >            root = r;
2672          }
2673  
2674          /**
2675 <         * Sets nextEntry to first node of next non-empty table
1236 <         * (in backwards order, to simplify checks).
2675 >         * Acquires write lock for tree restructuring
2676           */
2677 <        final void advance() {
2677 >        private final void lockRoot() {
2678 >            if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2679 >                contendedLock(); // offload to separate method
2680 >        }
2681 >
2682 >        /**
2683 >         * Releases write lock for tree restructuring
2684 >         */
2685 >        private final void unlockRoot() {
2686 >            lockState = 0;
2687 >        }
2688 >
2689 >        /**
2690 >         * Possibly blocks awaiting root lock
2691 >         */
2692 >        private final void contendedLock() {
2693 >            boolean waiting = false;
2694 >            for (int s;;) {
2695 >                if (((s = lockState) & WRITER) == 0) {
2696 >                    if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2697 >                        if (waiting)
2698 >                            waiter = null;
2699 >                        return;
2700 >                    }
2701 >                }
2702 >                else if ((s | WAITER) == 0) {
2703 >                    if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2704 >                        waiting = true;
2705 >                        waiter = Thread.currentThread();
2706 >                    }
2707 >                }
2708 >                else if (waiting)
2709 >                    LockSupport.park(this);
2710 >            }
2711 >        }
2712 >
2713 >        /**
2714 >         * Returns matching node or null if none. Tries to search
2715 >         * using tree compareisons from root, but continues linear
2716 >         * search when lock not available.
2717 >         */
2718 >        final Node<K,V> find(int h, Object k) {
2719 >            if (k != null) {
2720 >                for (Node<K,V> e = first; e != null; e = e.next) {
2721 >                    int s; K ek;
2722 >                    if (((s = lockState) & (WAITER|WRITER)) != 0) {
2723 >                        if (e.hash == h &&
2724 >                            ((ek = e.key) == k || (ek != null && k.equals(ek))))
2725 >                            return e;
2726 >                    }
2727 >                    else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2728 >                                                 s + READER)) {
2729 >                        TreeNode<K,V> r, p;
2730 >                        try {
2731 >                            p = ((r = root) == null ? null :
2732 >                                 r.findTreeNode(h, k, null));
2733 >                        } finally {
2734 >                            Thread w;
2735 >                            if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2736 >                                (READER|WAITER) && (w = waiter) != null)
2737 >                                LockSupport.unpark(w);
2738 >                        }
2739 >                        return p;
2740 >                    }
2741 >                }
2742 >            }
2743 >            return null;
2744 >        }
2745 >
2746 >        /**
2747 >         * Finds or adds a node.
2748 >         * @return null if added
2749 >         */
2750 >        final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2751 >            TreeNode<K,V> p;
2752 >            if ((p = root) == null) {
2753 >                first = root = new TreeNode<K,V>(h, k, v, null, null);
2754 >                return null;
2755 >            }
2756 >            Class<?> kc = null;
2757              for (;;) {
2758 <                if (nextTableIndex >= 0) {
2759 <                    if ((nextEntry = entryAt(currentTable,
2760 <                                             nextTableIndex--)) != null)
2761 <                        break;
2758 >                int dir, ph; K pk; TreeNode<K,V> q, pr;
2759 >                if ((ph = p.hash) > h)
2760 >                    dir = -1;
2761 >                else if (ph < h)
2762 >                    dir = 1;
2763 >                else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2764 >                    return p;
2765 >                else if ((kc == null &&
2766 >                          (kc = comparableClassFor(k)) == null) ||
2767 >                         (dir = compareComparables(kc, k, pk)) == 0) {
2768 >                    if (p.left == null)
2769 >                        dir = 1;
2770 >                    else if ((pr = p.right) == null ||
2771 >                             (q = pr.findTreeNode(h, k, kc)) == null)
2772 >                        dir = -1;
2773 >                    else
2774 >                        return q;
2775                  }
2776 <                else if (nextSegmentIndex >= 0) {
2777 <                    Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
2778 <                    if (seg != null && (currentTable = seg.table) != null)
2779 <                        nextTableIndex = currentTable.length - 1;
2776 >                TreeNode<K,V> xp = p;
2777 >                if ((p = (dir < 0) ? p.left : p.right) == null) {
2778 >                    TreeNode<K,V> x, f = first;
2779 >                    first = x = new TreeNode<K,V>(h, k, v, f, xp);
2780 >                    if (f != null)
2781 >                        f.prev = x;
2782 >                    if (dir < 0)
2783 >                        xp.left = x;
2784 >                    else
2785 >                        xp.right = x;
2786 >                    if (!xp.red)
2787 >                        x.red = true;
2788 >                    else {
2789 >                        lockRoot();
2790 >                        try {
2791 >                            root = balanceInsertion(root, x);
2792 >                        } finally {
2793 >                            unlockRoot();
2794 >                        }
2795 >                    }
2796 >                    //                    assert checkInvariants(root);
2797 >                    return null;
2798 >                }
2799 >            }
2800 >        }
2801 >
2802 >        /**
2803 >         * Removes the given node, that must be present before this
2804 >         * call.  This is messier than typical red-black deletion code
2805 >         * because we cannot swap the contents of an interior node
2806 >         * with a leaf successor that is pinned by "next" pointers
2807 >         * that are accessible independently of lock. So instead we
2808 >         * swap the tree linkages.
2809 >         *
2810 >         * @return true if now too small so should be untreeified.
2811 >         */
2812 >        final boolean removeTreeNode(TreeNode<K,V> p) {
2813 >            TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2814 >            TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
2815 >            TreeNode<K,V> r, rl;
2816 >            if (pred == null)
2817 >                first = next;
2818 >            else
2819 >                pred.next = next;
2820 >            if (next != null)
2821 >                next.prev = pred;
2822 >            if (first == null) {
2823 >                root = null;
2824 >                return true;
2825 >            }
2826 >            if ((r = root) == null || r.right == null ||
2827 >                (rl = r.left) == null || rl.left == null)
2828 >                return true;
2829 >            lockRoot();
2830 >            try {
2831 >                TreeNode<K,V> replacement;
2832 >                TreeNode<K,V> pl = p.left;
2833 >                TreeNode<K,V> pr = p.right;
2834 >                if (pl != null && pr != null) {
2835 >                    TreeNode<K,V> s = pr, sl;
2836 >                    while ((sl = s.left) != null) // find successor
2837 >                        s = sl;
2838 >                    boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2839 >                    TreeNode<K,V> sr = s.right;
2840 >                    TreeNode<K,V> pp = p.parent;
2841 >                    if (s == pr) { // p was s's direct parent
2842 >                        p.parent = s;
2843 >                        s.right = p;
2844 >                    }
2845 >                    else {
2846 >                        TreeNode<K,V> sp = s.parent;
2847 >                        if ((p.parent = sp) != null) {
2848 >                            if (s == sp.left)
2849 >                                sp.left = p;
2850 >                            else
2851 >                                sp.right = p;
2852 >                        }
2853 >                        if ((s.right = pr) != null)
2854 >                            pr.parent = s;
2855 >                    }
2856 >                    p.left = null;
2857 >                    if ((p.right = sr) != null)
2858 >                        sr.parent = p;
2859 >                    if ((s.left = pl) != null)
2860 >                        pl.parent = s;
2861 >                    if ((s.parent = pp) == null)
2862 >                        r = s;
2863 >                    else if (p == pp.left)
2864 >                        pp.left = s;
2865 >                    else
2866 >                        pp.right = s;
2867 >                    if (sr != null)
2868 >                        replacement = sr;
2869 >                    else
2870 >                        replacement = p;
2871                  }
2872 +                else if (pl != null)
2873 +                    replacement = pl;
2874 +                else if (pr != null)
2875 +                    replacement = pr;
2876                  else
2877 <                    break;
2877 >                    replacement = p;
2878 >                if (replacement != p) {
2879 >                    TreeNode<K,V> pp = replacement.parent = p.parent;
2880 >                    if (pp == null)
2881 >                        r = replacement;
2882 >                    else if (p == pp.left)
2883 >                        pp.left = replacement;
2884 >                    else
2885 >                        pp.right = replacement;
2886 >                    p.left = p.right = p.parent = null;
2887 >                }
2888 >
2889 >                root = (p.red) ? r : balanceDeletion(r, replacement);
2890 >
2891 >                if (p == replacement) {  // detach pointers
2892 >                    TreeNode<K,V> pp;
2893 >                    if ((pp = p.parent) != null) {
2894 >                        if (p == pp.left)
2895 >                            pp.left = null;
2896 >                        else if (p == pp.right)
2897 >                            pp.right = null;
2898 >                        p.parent = null;
2899 >                    }
2900 >                }
2901 >            } finally {
2902 >                unlockRoot();
2903              }
2904 +            //            assert checkInvariants(root);
2905 +            return false;
2906          }
2907  
2908 <        final HashEntry<K,V> nextEntry() {
2909 <            HashEntry<K,V> e = nextEntry;
2910 <            if (e == null)
2911 <                throw new NoSuchElementException();
2912 <            lastReturned = e; // cannot assign until after null check
2913 <            if ((nextEntry = e.next) == null)
2914 <                advance();
2915 <            return e;
2908 >        /* ------------------------------------------------------------ */
2909 >        // Red-black tree methods, all adapted from CLR
2910 >
2911 >        static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2912 >                                              TreeNode<K,V> p) {
2913 >            if (p != null) {
2914 >                TreeNode<K,V> r = p.right, pp, rl;
2915 >                if ((rl = p.right = r.left) != null)
2916 >                    rl.parent = p;
2917 >                if ((pp = r.parent = p.parent) == null)
2918 >                    (root = r).red = false;
2919 >                else if (pp.left == p)
2920 >                    pp.left = r;
2921 >                else
2922 >                    pp.right = r;
2923 >                r.left = p;
2924 >                p.parent = r;
2925 >            }
2926 >            return root;
2927          }
2928  
2929 <        public final boolean hasNext() { return nextEntry != null; }
2930 <        public final boolean hasMoreElements() { return nextEntry != null; }
2929 >        static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2930 >                                               TreeNode<K,V> p) {
2931 >            if (p != null) {
2932 >                TreeNode<K,V> l = p.left, pp, lr;
2933 >                if ((lr = p.left = l.right) != null)
2934 >                    lr.parent = p;
2935 >                if ((pp = l.parent = p.parent) == null)
2936 >                    (root = l).red = false;
2937 >                else if (pp.right == p)
2938 >                    pp.right = l;
2939 >                else
2940 >                    pp.left = l;
2941 >                l.right = p;
2942 >                p.parent = l;
2943 >            }
2944 >            return root;
2945 >        }
2946 >
2947 >        static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2948 >                                                    TreeNode<K,V> x) {
2949 >            x.red = true;
2950 >            for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2951 >                if ((xp = x.parent) == null) {
2952 >                    x.red = false;
2953 >                    return x;
2954 >                }
2955 >                else if (!xp.red || (xpp = xp.parent) == null)
2956 >                    return root;
2957 >                if (xp == (xppl = xpp.left)) {
2958 >                    if ((xppr = xpp.right) != null && xppr.red) {
2959 >                        xppr.red = false;
2960 >                        xp.red = false;
2961 >                        xpp.red = true;
2962 >                        x = xpp;
2963 >                    }
2964 >                    else {
2965 >                        if (x == xp.right) {
2966 >                            root = rotateLeft(root, x = xp);
2967 >                            xpp = (xp = x.parent) == null ? null : xp.parent;
2968 >                        }
2969 >                        if (xp != null) {
2970 >                            xp.red = false;
2971 >                            if (xpp != null) {
2972 >                                xpp.red = true;
2973 >                                root = rotateRight(root, xpp);
2974 >                            }
2975 >                        }
2976 >                    }
2977 >                }
2978 >                else {
2979 >                    if (xppl != null && xppl.red) {
2980 >                        xppl.red = false;
2981 >                        xp.red = false;
2982 >                        xpp.red = true;
2983 >                        x = xpp;
2984 >                    }
2985 >                    else {
2986 >                        if (x == xp.left) {
2987 >                            root = rotateRight(root, x = xp);
2988 >                            xpp = (xp = x.parent) == null ? null : xp.parent;
2989 >                        }
2990 >                        if (xp != null) {
2991 >                            xp.red = false;
2992 >                            if (xpp != null) {
2993 >                                xpp.red = true;
2994 >                                root = rotateLeft(root, xpp);
2995 >                            }
2996 >                        }
2997 >                    }
2998 >                }
2999 >            }
3000 >        }
3001 >
3002 >        static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3003 >                                                   TreeNode<K,V> x) {
3004 >            for (TreeNode<K,V> xp, xpl, xpr;;)  {
3005 >                if (x == null || x == root)
3006 >                    return root;
3007 >                else if ((xp = x.parent) == null) {
3008 >                    x.red = false;
3009 >                    return x;
3010 >                }
3011 >                else if (x.red) {
3012 >                    x.red = false;
3013 >                    return root;
3014 >                }
3015 >                else if ((xpl = xp.left) == x) {
3016 >                    if ((xpr = xp.right) != null && xpr.red) {
3017 >                        xpr.red = false;
3018 >                        xp.red = true;
3019 >                        root = rotateLeft(root, xp);
3020 >                        xpr = (xp = x.parent) == null ? null : xp.right;
3021 >                    }
3022 >                    if (xpr == null)
3023 >                        x = xp;
3024 >                    else {
3025 >                        TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3026 >                        if ((sr == null || !sr.red) &&
3027 >                            (sl == null || !sl.red)) {
3028 >                            xpr.red = true;
3029 >                            x = xp;
3030 >                        }
3031 >                        else {
3032 >                            if (sr == null || !sr.red) {
3033 >                                if (sl != null)
3034 >                                    sl.red = false;
3035 >                                xpr.red = true;
3036 >                                root = rotateRight(root, xpr);
3037 >                                xpr = (xp = x.parent) == null ?
3038 >                                    null : xp.right;
3039 >                            }
3040 >                            if (xpr != null) {
3041 >                                xpr.red = (xp == null) ? false : xp.red;
3042 >                                if ((sr = xpr.right) != null)
3043 >                                    sr.red = false;
3044 >                            }
3045 >                            if (xp != null) {
3046 >                                xp.red = false;
3047 >                                root = rotateLeft(root, xp);
3048 >                            }
3049 >                            x = root;
3050 >                        }
3051 >                    }
3052 >                }
3053 >                else { // symmetric
3054 >                    if (xpl != null && xpl.red) {
3055 >                        xpl.red = false;
3056 >                        xp.red = true;
3057 >                        root = rotateRight(root, xp);
3058 >                        xpl = (xp = x.parent) == null ? null : xp.left;
3059 >                    }
3060 >                    if (xpl == null)
3061 >                        x = xp;
3062 >                    else {
3063 >                        TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3064 >                        if ((sl == null || !sl.red) &&
3065 >                            (sr == null || !sr.red)) {
3066 >                            xpl.red = true;
3067 >                            x = xp;
3068 >                        }
3069 >                        else {
3070 >                            if (sl == null || !sl.red) {
3071 >                                if (sr != null)
3072 >                                    sr.red = false;
3073 >                                xpl.red = true;
3074 >                                root = rotateLeft(root, xpl);
3075 >                                xpl = (xp = x.parent) == null ?
3076 >                                    null : xp.left;
3077 >                            }
3078 >                            if (xpl != null) {
3079 >                                xpl.red = (xp == null) ? false : xp.red;
3080 >                                if ((sl = xpl.left) != null)
3081 >                                    sl.red = false;
3082 >                            }
3083 >                            if (xp != null) {
3084 >                                xp.red = false;
3085 >                                root = rotateRight(root, xp);
3086 >                            }
3087 >                            x = root;
3088 >                        }
3089 >                    }
3090 >                }
3091 >            }
3092 >        }
3093 >
3094 >        /**
3095 >         * Recursive invariant check
3096 >         */
3097 >        static <K,V> boolean checkTreeNode(TreeNode<K,V> t) {
3098 >            TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3099 >                tb = t.prev, tn = (TreeNode<K,V>)t.next;
3100 >            if (tb != null && tb.next != t)
3101 >                return false;
3102 >            if (tn != null && tn.prev != t)
3103 >                return false;
3104 >            if (tp != null && t != tp.left && t != tp.right)
3105 >                return false;
3106 >            if (tl != null && (tl.parent != t || tl.hash > t.hash))
3107 >                return false;
3108 >            if (tr != null && (tr.parent != t || tr.hash < t.hash))
3109 >                return false;
3110 >            if (t.red && tl != null && tl.red && tr != null && tr.red)
3111 >                return false;
3112 >            if (tl != null && !checkTreeNode(tl))
3113 >                return false;
3114 >            if (tr != null && !checkTreeNode(tr))
3115 >                return false;
3116 >            return true;
3117 >        }
3118 >
3119 >        private static final sun.misc.Unsafe U;
3120 >        private static final long LOCKSTATE;
3121 >        static {
3122 >            try {
3123 >                U = sun.misc.Unsafe.getUnsafe();
3124 >                Class<?> k = TreeBin.class;
3125 >                LOCKSTATE = U.objectFieldOffset
3126 >                    (k.getDeclaredField("lockState"));
3127 >            } catch (Exception e) {
3128 >                throw new Error(e);
3129 >            }
3130 >        }
3131 >    }
3132 >
3133 >    /* ----------------Table Traversal -------------- */
3134 >
3135 >    /**
3136 >     * Encapsulates traversal for methods such as containsValue; also
3137 >     * serves as a base class for other iterators and spliterators.
3138 >     *
3139 >     * Method advance visits once each still-valid node that was
3140 >     * reachable upon iterator construction. It might miss some that
3141 >     * were added to a bin after the bin was visited, which is OK wrt
3142 >     * consistency guarantees. Maintaining this property in the face
3143 >     * of possible ongoing resizes requires a fair amount of
3144 >     * bookkeeping state that is difficult to optimize away amidst
3145 >     * volatile accesses.  Even so, traversal maintains reasonable
3146 >     * throughput.
3147 >     *
3148 >     * Normally, iteration proceeds bin-by-bin traversing lists.
3149 >     * However, if the table has been resized, then all future steps
3150 >     * must traverse both the bin at the current index as well as at
3151 >     * (index + baseSize); and so on for further resizings. To
3152 >     * paranoically cope with potential sharing by users of iterators
3153 >     * across threads, iteration terminates if a bounds checks fails
3154 >     * for a table read.
3155 >     */
3156 >    static class Traverser<K,V> {
3157 >        Node<K,V>[] tab;        // current table; updated if resized
3158 >        Node<K,V> next;         // the next entry to use
3159 >        int index;              // index of bin to use next
3160 >        int baseIndex;          // current index of initial table
3161 >        int baseLimit;          // index bound for initial table
3162 >        final int baseSize;     // initial table size
3163 >
3164 >        Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3165 >            this.tab = tab;
3166 >            this.baseSize = size;
3167 >            this.baseIndex = this.index = index;
3168 >            this.baseLimit = limit;
3169 >            this.next = null;
3170 >        }
3171 >
3172 >        /**
3173 >         * Advances if possible, returning next valid node, or null if none.
3174 >         */
3175 >        final Node<K,V> advance() {
3176 >            Node<K,V> e;
3177 >            if ((e = next) != null)
3178 >                e = e.next;
3179 >            for (;;) {
3180 >                Node<K,V>[] t; int i, n; K ek;  // must use locals in checks
3181 >                if (e != null)
3182 >                    return next = e;
3183 >                if (baseIndex >= baseLimit || (t = tab) == null ||
3184 >                    (n = t.length) <= (i = index) || i < 0)
3185 >                    return next = null;
3186 >                if ((e = tabAt(t, index)) != null && e.key == null) {
3187 >                    if (e instanceof ForwardingNode) {
3188 >                        tab = ((ForwardingNode<K,V>)e).nextTable;
3189 >                        e = null;
3190 >                        continue;
3191 >                    }
3192 >                    else if (e instanceof TreeBin)
3193 >                        e = ((TreeBin<K,V>)e).first;
3194 >                    else
3195 >                        e = null;
3196 >                }
3197 >                if ((index += baseSize) >= n)
3198 >                    index = ++baseIndex;    // visit upper slots if present
3199 >            }
3200 >        }
3201 >    }
3202 >
3203 >    /**
3204 >     * Base of key, value, and entry Iterators. Adds fields to
3205 >     * Traverser to support iterator.remove
3206 >     */
3207 >    static class BaseIterator<K,V> extends Traverser<K,V> {
3208 >        final ConcurrentHashMap<K,V> map;
3209 >        Node<K,V> lastReturned;
3210 >        BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3211 >                    ConcurrentHashMap<K,V> map) {
3212 >            super(tab, size, index, limit);
3213 >            this.map = map;
3214 >            advance();
3215 >        }
3216 >
3217 >        public final boolean hasNext() { return next != null; }
3218 >        public final boolean hasMoreElements() { return next != null; }
3219  
3220          public final void remove() {
3221 <            if (lastReturned == null)
3221 >            Node<K,V> p;
3222 >            if ((p = lastReturned) == null)
3223                  throw new IllegalStateException();
1271            ConcurrentHashMap.this.remove(lastReturned.key);
3224              lastReturned = null;
3225 +            map.replaceNode(p.key, null, null);
3226          }
3227      }
3228  
3229 <    final class KeyIterator
3230 <        extends HashIterator
3231 <        implements Iterator<K>, Enumeration<K>
3232 <    {
3233 <        public final K next()        { return super.nextEntry().key; }
3234 <        public final K nextElement() { return super.nextEntry().key; }
3235 <    }
3236 <
3237 <    final class ValueIterator
3238 <        extends HashIterator
3239 <        implements Iterator<V>, Enumeration<V>
3240 <    {
3241 <        public final V next()        { return super.nextEntry().value; }
3242 <        public final V nextElement() { return super.nextEntry().value; }
3229 >    static final class KeyIterator<K,V> extends BaseIterator<K,V>
3230 >        implements Iterator<K>, Enumeration<K> {
3231 >        KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3232 >                    ConcurrentHashMap<K,V> map) {
3233 >            super(tab, index, size, limit, map);
3234 >        }
3235 >
3236 >        public final K next() {
3237 >            Node<K,V> p;
3238 >            if ((p = next) == null)
3239 >                throw new NoSuchElementException();
3240 >            K k = p.key;
3241 >            lastReturned = p;
3242 >            advance();
3243 >            return k;
3244 >        }
3245 >
3246 >        public final K nextElement() { return next(); }
3247 >    }
3248 >
3249 >    static final class ValueIterator<K,V> extends BaseIterator<K,V>
3250 >        implements Iterator<V>, Enumeration<V> {
3251 >        ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3252 >                      ConcurrentHashMap<K,V> map) {
3253 >            super(tab, index, size, limit, map);
3254 >        }
3255 >
3256 >        public final V next() {
3257 >            Node<K,V> p;
3258 >            if ((p = next) == null)
3259 >                throw new NoSuchElementException();
3260 >            V v = p.val;
3261 >            lastReturned = p;
3262 >            advance();
3263 >            return v;
3264 >        }
3265 >
3266 >        public final V nextElement() { return next(); }
3267 >    }
3268 >
3269 >    static final class EntryIterator<K,V> extends BaseIterator<K,V>
3270 >        implements Iterator<Map.Entry<K,V>> {
3271 >        EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3272 >                      ConcurrentHashMap<K,V> map) {
3273 >            super(tab, index, size, limit, map);
3274 >        }
3275 >
3276 >        public final Map.Entry<K,V> next() {
3277 >            Node<K,V> p;
3278 >            if ((p = next) == null)
3279 >                throw new NoSuchElementException();
3280 >            K k = p.key;
3281 >            V v = p.val;
3282 >            lastReturned = p;
3283 >            advance();
3284 >            return new MapEntry<K,V>(k, v, map);
3285 >        }
3286      }
3287  
3288      /**
3289 <     * Custom Entry class used by EntryIterator.next(), that relays
3290 <     * setValue changes to the underlying map.
3291 <     */
3292 <    final class WriteThroughEntry
3293 <        extends AbstractMap.SimpleEntry<K,V>
3294 <    {
3295 <        WriteThroughEntry(K k, V v) {
3296 <            super(k,v);
3289 >     * Exported Entry for EntryIterator
3290 >     */
3291 >    static final class MapEntry<K,V> implements Map.Entry<K,V> {
3292 >        final K key; // non-null
3293 >        V val;       // non-null
3294 >        final ConcurrentHashMap<K,V> map;
3295 >        MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3296 >            this.key = key;
3297 >            this.val = val;
3298 >            this.map = map;
3299 >        }
3300 >        public K getKey()        { return key; }
3301 >        public V getValue()      { return val; }
3302 >        public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
3303 >        public String toString() { return key + "=" + val; }
3304 >
3305 >        public boolean equals(Object o) {
3306 >            Object k, v; Map.Entry<?,?> e;
3307 >            return ((o instanceof Map.Entry) &&
3308 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3309 >                    (v = e.getValue()) != null &&
3310 >                    (k == key || k.equals(key)) &&
3311 >                    (v == val || v.equals(val)));
3312          }
3313  
3314          /**
3315           * Sets our entry's value and writes through to the map. The
3316 <         * value to return is somewhat arbitrary here. Since a
3317 <         * WriteThroughEntry does not necessarily track asynchronous
3318 <         * changes, the most recent "previous" value could be
3319 <         * different from what we return (or could even have been
3320 <         * removed in which case the put will re-establish). We do not
1310 <         * and cannot guarantee more.
3316 >         * value to return is somewhat arbitrary here. Since we do not
3317 >         * necessarily track asynchronous changes, the most recent
3318 >         * "previous" value could be different from what we return (or
3319 >         * could even have been removed, in which case the put will
3320 >         * re-establish). We do not and cannot guarantee more.
3321           */
3322          public V setValue(V value) {
3323              if (value == null) throw new NullPointerException();
3324 <            V v = super.setValue(value);
3325 <            ConcurrentHashMap.this.put(getKey(), value);
3324 >            V v = val;
3325 >            val = value;
3326 >            map.put(key, value);
3327              return v;
3328          }
3329      }
3330  
3331 <    final class EntryIterator
3332 <        extends HashIterator
3333 <        implements Iterator<Entry<K,V>>
3334 <    {
3335 <        public Map.Entry<K,V> next() {
3336 <            HashEntry<K,V> e = super.nextEntry();
3337 <            return new WriteThroughEntry(e.key, e.value);
3331 >    static final class KeySpliterator<K,V> extends Traverser<K,V>
3332 >        implements Spliterator<K> {
3333 >        long est;               // size estimate
3334 >        KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3335 >                       long est) {
3336 >            super(tab, size, index, limit);
3337 >            this.est = est;
3338 >        }
3339 >
3340 >        public Spliterator<K> trySplit() {
3341 >            int i, f, h;
3342 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3343 >                new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3344 >                                        f, est >>>= 1);
3345 >        }
3346 >
3347 >        public void forEachRemaining(Consumer<? super K> action) {
3348 >            if (action == null) throw new NullPointerException();
3349 >            for (Node<K,V> p; (p = advance()) != null;)
3350 >                action.accept(p.key);
3351 >        }
3352 >
3353 >        public boolean tryAdvance(Consumer<? super K> action) {
3354 >            if (action == null) throw new NullPointerException();
3355 >            Node<K,V> p;
3356 >            if ((p = advance()) == null)
3357 >                return false;
3358 >            action.accept(p.key);
3359 >            return true;
3360 >        }
3361 >
3362 >        public long estimateSize() { return est; }
3363 >
3364 >        public int characteristics() {
3365 >            return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3366 >                Spliterator.NONNULL;
3367          }
3368      }
3369  
3370 <    final class KeySet extends AbstractSet<K> {
3371 <        public Iterator<K> iterator() {
3372 <            return new KeyIterator();
3370 >    static final class ValueSpliterator<K,V> extends Traverser<K,V>
3371 >        implements Spliterator<V> {
3372 >        long est;               // size estimate
3373 >        ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3374 >                         long est) {
3375 >            super(tab, size, index, limit);
3376 >            this.est = est;
3377          }
3378 <        public int size() {
3379 <            return ConcurrentHashMap.this.size();
3378 >
3379 >        public Spliterator<V> trySplit() {
3380 >            int i, f, h;
3381 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3382 >                new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3383 >                                          f, est >>>= 1);
3384          }
3385 <        public boolean isEmpty() {
3386 <            return ConcurrentHashMap.this.isEmpty();
3385 >
3386 >        public void forEachRemaining(Consumer<? super V> action) {
3387 >            if (action == null) throw new NullPointerException();
3388 >            for (Node<K,V> p; (p = advance()) != null;)
3389 >                action.accept(p.val);
3390          }
3391 <        public boolean contains(Object o) {
3392 <            return ConcurrentHashMap.this.containsKey(o);
3391 >
3392 >        public boolean tryAdvance(Consumer<? super V> action) {
3393 >            if (action == null) throw new NullPointerException();
3394 >            Node<K,V> p;
3395 >            if ((p = advance()) == null)
3396 >                return false;
3397 >            action.accept(p.val);
3398 >            return true;
3399          }
3400 <        public boolean remove(Object o) {
3401 <            return ConcurrentHashMap.this.remove(o) != null;
3400 >
3401 >        public long estimateSize() { return est; }
3402 >
3403 >        public int characteristics() {
3404 >            return Spliterator.CONCURRENT | Spliterator.NONNULL;
3405 >        }
3406 >    }
3407 >
3408 >    static final class EntrySpliterator<K,V> extends Traverser<K,V>
3409 >        implements Spliterator<Map.Entry<K,V>> {
3410 >        final ConcurrentHashMap<K,V> map; // To export MapEntry
3411 >        long est;               // size estimate
3412 >        EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3413 >                         long est, ConcurrentHashMap<K,V> map) {
3414 >            super(tab, size, index, limit);
3415 >            this.map = map;
3416 >            this.est = est;
3417 >        }
3418 >
3419 >        public Spliterator<Map.Entry<K,V>> trySplit() {
3420 >            int i, f, h;
3421 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3422 >                new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3423 >                                          f, est >>>= 1, map);
3424          }
3425 <        public void clear() {
3426 <            ConcurrentHashMap.this.clear();
3425 >
3426 >        public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3427 >            if (action == null) throw new NullPointerException();
3428 >            for (Node<K,V> p; (p = advance()) != null; )
3429 >                action.accept(new MapEntry<K,V>(p.key, p.val, map));
3430 >        }
3431 >
3432 >        public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3433 >            if (action == null) throw new NullPointerException();
3434 >            Node<K,V> p;
3435 >            if ((p = advance()) == null)
3436 >                return false;
3437 >            action.accept(new MapEntry<K,V>(p.key, p.val, map));
3438 >            return true;
3439          }
3440 +
3441 +        public long estimateSize() { return est; }
3442 +
3443 +        public int characteristics() {
3444 +            return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3445 +                Spliterator.NONNULL;
3446 +        }
3447 +    }
3448 +
3449 +    // Parallel bulk operations
3450 +
3451 +    /**
3452 +     * Computes initial batch value for bulk tasks. The returned value
3453 +     * is approximately exp2 of the number of times (minus one) to
3454 +     * split task by two before executing leaf action. This value is
3455 +     * faster to compute and more convenient to use as a guide to
3456 +     * splitting than is the depth, since it is used while dividing by
3457 +     * two anyway.
3458 +     */
3459 +    final int batchFor(long b) {
3460 +        long n;
3461 +        if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3462 +            return 0;
3463 +        int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3464 +        return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3465 +    }
3466 +
3467 +    /**
3468 +     * Performs the given action for each (key, value).
3469 +     *
3470 +     * @param parallelismThreshold the (estimated) number of elements
3471 +     * needed for this operation to be executed in parallel
3472 +     * @param action the action
3473 +     * @since 1.8
3474 +     */
3475 +    public void forEach(long parallelismThreshold,
3476 +                        BiConsumer<? super K,? super V> action) {
3477 +        if (action == null) throw new NullPointerException();
3478 +        new ForEachMappingTask<K,V>
3479 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3480 +             action).invoke();
3481 +    }
3482 +
3483 +    /**
3484 +     * Performs the given action for each non-null transformation
3485 +     * of each (key, value).
3486 +     *
3487 +     * @param parallelismThreshold the (estimated) number of elements
3488 +     * needed for this operation to be executed in parallel
3489 +     * @param transformer a function returning the transformation
3490 +     * for an element, or null if there is no transformation (in
3491 +     * which case the action is not applied)
3492 +     * @param action the action
3493 +     * @since 1.8
3494 +     */
3495 +    public <U> void forEach(long parallelismThreshold,
3496 +                            BiFunction<? super K, ? super V, ? extends U> transformer,
3497 +                            Consumer<? super U> action) {
3498 +        if (transformer == null || action == null)
3499 +            throw new NullPointerException();
3500 +        new ForEachTransformedMappingTask<K,V,U>
3501 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3502 +             transformer, action).invoke();
3503 +    }
3504 +
3505 +    /**
3506 +     * Returns a non-null result from applying the given search
3507 +     * function on each (key, value), or null if none.  Upon
3508 +     * success, further element processing is suppressed and the
3509 +     * results of any other parallel invocations of the search
3510 +     * function are ignored.
3511 +     *
3512 +     * @param parallelismThreshold the (estimated) number of elements
3513 +     * needed for this operation to be executed in parallel
3514 +     * @param searchFunction a function returning a non-null
3515 +     * result on success, else null
3516 +     * @return a non-null result from applying the given search
3517 +     * function on each (key, value), or null if none
3518 +     * @since 1.8
3519 +     */
3520 +    public <U> U search(long parallelismThreshold,
3521 +                        BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3522 +        if (searchFunction == null) throw new NullPointerException();
3523 +        return new SearchMappingsTask<K,V,U>
3524 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3525 +             searchFunction, new AtomicReference<U>()).invoke();
3526 +    }
3527 +
3528 +    /**
3529 +     * Returns the result of accumulating the given transformation
3530 +     * of all (key, value) pairs using the given reducer to
3531 +     * combine values, or null if none.
3532 +     *
3533 +     * @param parallelismThreshold the (estimated) number of elements
3534 +     * needed for this operation to be executed in parallel
3535 +     * @param transformer a function returning the transformation
3536 +     * for an element, or null if there is no transformation (in
3537 +     * which case it is not combined)
3538 +     * @param reducer a commutative associative combining function
3539 +     * @return the result of accumulating the given transformation
3540 +     * of all (key, value) pairs
3541 +     * @since 1.8
3542 +     */
3543 +    public <U> U reduce(long parallelismThreshold,
3544 +                        BiFunction<? super K, ? super V, ? extends U> transformer,
3545 +                        BiFunction<? super U, ? super U, ? extends U> reducer) {
3546 +        if (transformer == null || reducer == null)
3547 +            throw new NullPointerException();
3548 +        return new MapReduceMappingsTask<K,V,U>
3549 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3550 +             null, transformer, reducer).invoke();
3551 +    }
3552 +
3553 +    /**
3554 +     * Returns the result of accumulating the given transformation
3555 +     * of all (key, value) pairs using the given reducer to
3556 +     * combine values, and the given basis as an identity value.
3557 +     *
3558 +     * @param parallelismThreshold the (estimated) number of elements
3559 +     * needed for this operation to be executed in parallel
3560 +     * @param transformer a function returning the transformation
3561 +     * for an element
3562 +     * @param basis the identity (initial default value) for the reduction
3563 +     * @param reducer a commutative associative combining function
3564 +     * @return the result of accumulating the given transformation
3565 +     * of all (key, value) pairs
3566 +     * @since 1.8
3567 +     */
3568 +    public double reduceToDoubleIn(long parallelismThreshold,
3569 +                                   ToDoubleBiFunction<? super K, ? super V> transformer,
3570 +                                   double basis,
3571 +                                   DoubleBinaryOperator reducer) {
3572 +        if (transformer == null || reducer == null)
3573 +            throw new NullPointerException();
3574 +        return new MapReduceMappingsToDoubleTask<K,V>
3575 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3576 +             null, transformer, basis, reducer).invoke();
3577 +    }
3578 +
3579 +    /**
3580 +     * Returns the result of accumulating the given transformation
3581 +     * of all (key, value) pairs using the given reducer to
3582 +     * combine values, and the given basis as an identity value.
3583 +     *
3584 +     * @param parallelismThreshold the (estimated) number of elements
3585 +     * needed for this operation to be executed in parallel
3586 +     * @param transformer a function returning the transformation
3587 +     * for an element
3588 +     * @param basis the identity (initial default value) for the reduction
3589 +     * @param reducer a commutative associative combining function
3590 +     * @return the result of accumulating the given transformation
3591 +     * of all (key, value) pairs
3592 +     * @since 1.8
3593 +     */
3594 +    public long reduceToLong(long parallelismThreshold,
3595 +                             ToLongBiFunction<? super K, ? super V> transformer,
3596 +                             long basis,
3597 +                             LongBinaryOperator reducer) {
3598 +        if (transformer == null || reducer == null)
3599 +            throw new NullPointerException();
3600 +        return new MapReduceMappingsToLongTask<K,V>
3601 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3602 +             null, transformer, basis, reducer).invoke();
3603 +    }
3604 +
3605 +    /**
3606 +     * Returns the result of accumulating the given transformation
3607 +     * of all (key, value) pairs using the given reducer to
3608 +     * combine values, and the given basis as an identity value.
3609 +     *
3610 +     * @param parallelismThreshold the (estimated) number of elements
3611 +     * needed for this operation to be executed in parallel
3612 +     * @param transformer a function returning the transformation
3613 +     * for an element
3614 +     * @param basis the identity (initial default value) for the reduction
3615 +     * @param reducer a commutative associative combining function
3616 +     * @return the result of accumulating the given transformation
3617 +     * of all (key, value) pairs
3618 +     * @since 1.8
3619 +     */
3620 +    public int reduceToInt(long parallelismThreshold,
3621 +                           ToIntBiFunction<? super K, ? super V> transformer,
3622 +                           int basis,
3623 +                           IntBinaryOperator reducer) {
3624 +        if (transformer == null || reducer == null)
3625 +            throw new NullPointerException();
3626 +        return new MapReduceMappingsToIntTask<K,V>
3627 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3628 +             null, transformer, basis, reducer).invoke();
3629 +    }
3630 +
3631 +    /**
3632 +     * Performs the given action for each key.
3633 +     *
3634 +     * @param parallelismThreshold the (estimated) number of elements
3635 +     * needed for this operation to be executed in parallel
3636 +     * @param action the action
3637 +     * @since 1.8
3638 +     */
3639 +    public void forEachKey(long parallelismThreshold,
3640 +                           Consumer<? super K> action) {
3641 +        if (action == null) throw new NullPointerException();
3642 +        new ForEachKeyTask<K,V>
3643 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3644 +             action).invoke();
3645 +    }
3646 +
3647 +    /**
3648 +     * Performs the given action for each non-null transformation
3649 +     * of each key.
3650 +     *
3651 +     * @param parallelismThreshold the (estimated) number of elements
3652 +     * needed for this operation to be executed in parallel
3653 +     * @param transformer a function returning the transformation
3654 +     * for an element, or null if there is no transformation (in
3655 +     * which case the action is not applied)
3656 +     * @param action the action
3657 +     * @since 1.8
3658 +     */
3659 +    public <U> void forEachKey(long parallelismThreshold,
3660 +                               Function<? super K, ? extends U> transformer,
3661 +                               Consumer<? super U> action) {
3662 +        if (transformer == null || action == null)
3663 +            throw new NullPointerException();
3664 +        new ForEachTransformedKeyTask<K,V,U>
3665 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3666 +             transformer, action).invoke();
3667 +    }
3668 +
3669 +    /**
3670 +     * Returns a non-null result from applying the given search
3671 +     * function on each key, or null if none. Upon success,
3672 +     * further element processing is suppressed and the results of
3673 +     * any other parallel invocations of the search function are
3674 +     * ignored.
3675 +     *
3676 +     * @param parallelismThreshold the (estimated) number of elements
3677 +     * needed for this operation to be executed in parallel
3678 +     * @param searchFunction a function returning a non-null
3679 +     * result on success, else null
3680 +     * @return a non-null result from applying the given search
3681 +     * function on each key, or null if none
3682 +     * @since 1.8
3683 +     */
3684 +    public <U> U searchKeys(long parallelismThreshold,
3685 +                            Function<? super K, ? extends U> searchFunction) {
3686 +        if (searchFunction == null) throw new NullPointerException();
3687 +        return new SearchKeysTask<K,V,U>
3688 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3689 +             searchFunction, new AtomicReference<U>()).invoke();
3690 +    }
3691 +
3692 +    /**
3693 +     * Returns the result of accumulating all keys using the given
3694 +     * reducer to combine values, or null if none.
3695 +     *
3696 +     * @param parallelismThreshold the (estimated) number of elements
3697 +     * needed for this operation to be executed in parallel
3698 +     * @param reducer a commutative associative combining function
3699 +     * @return the result of accumulating all keys using the given
3700 +     * reducer to combine values, or null if none
3701 +     * @since 1.8
3702 +     */
3703 +    public K reduceKeys(long parallelismThreshold,
3704 +                        BiFunction<? super K, ? super K, ? extends K> reducer) {
3705 +        if (reducer == null) throw new NullPointerException();
3706 +        return new ReduceKeysTask<K,V>
3707 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3708 +             null, reducer).invoke();
3709 +    }
3710 +
3711 +    /**
3712 +     * Returns the result of accumulating the given transformation
3713 +     * of all keys using the given reducer to combine values, or
3714 +     * null if none.
3715 +     *
3716 +     * @param parallelismThreshold the (estimated) number of elements
3717 +     * needed for this operation to be executed in parallel
3718 +     * @param transformer a function returning the transformation
3719 +     * for an element, or null if there is no transformation (in
3720 +     * which case it is not combined)
3721 +     * @param reducer a commutative associative combining function
3722 +     * @return the result of accumulating the given transformation
3723 +     * of all keys
3724 +     * @since 1.8
3725 +     */
3726 +    public <U> U reduceKeys(long parallelismThreshold,
3727 +                            Function<? super K, ? extends U> transformer,
3728 +         BiFunction<? super U, ? super U, ? extends U> reducer) {
3729 +        if (transformer == null || reducer == null)
3730 +            throw new NullPointerException();
3731 +        return new MapReduceKeysTask<K,V,U>
3732 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3733 +             null, transformer, reducer).invoke();
3734 +    }
3735 +
3736 +    /**
3737 +     * Returns the result of accumulating the given transformation
3738 +     * of all keys using the given reducer to combine values, and
3739 +     * the given basis as an identity value.
3740 +     *
3741 +     * @param parallelismThreshold the (estimated) number of elements
3742 +     * needed for this operation to be executed in parallel
3743 +     * @param transformer a function returning the transformation
3744 +     * for an element
3745 +     * @param basis the identity (initial default value) for the reduction
3746 +     * @param reducer a commutative associative combining function
3747 +     * @return the result of accumulating the given transformation
3748 +     * of all keys
3749 +     * @since 1.8
3750 +     */
3751 +    public double reduceKeysToDouble(long parallelismThreshold,
3752 +                                     ToDoubleFunction<? super K> transformer,
3753 +                                     double basis,
3754 +                                     DoubleBinaryOperator reducer) {
3755 +        if (transformer == null || reducer == null)
3756 +            throw new NullPointerException();
3757 +        return new MapReduceKeysToDoubleTask<K,V>
3758 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3759 +             null, transformer, basis, reducer).invoke();
3760 +    }
3761 +
3762 +    /**
3763 +     * Returns the result of accumulating the given transformation
3764 +     * of all keys using the given reducer to combine values, and
3765 +     * the given basis as an identity value.
3766 +     *
3767 +     * @param parallelismThreshold the (estimated) number of elements
3768 +     * needed for this operation to be executed in parallel
3769 +     * @param transformer a function returning the transformation
3770 +     * for an element
3771 +     * @param basis the identity (initial default value) for the reduction
3772 +     * @param reducer a commutative associative combining function
3773 +     * @return the result of accumulating the given transformation
3774 +     * of all keys
3775 +     * @since 1.8
3776 +     */
3777 +    public long reduceKeysToLong(long parallelismThreshold,
3778 +                                 ToLongFunction<? super K> transformer,
3779 +                                 long basis,
3780 +                                 LongBinaryOperator reducer) {
3781 +        if (transformer == null || reducer == null)
3782 +            throw new NullPointerException();
3783 +        return new MapReduceKeysToLongTask<K,V>
3784 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3785 +             null, transformer, basis, reducer).invoke();
3786 +    }
3787 +
3788 +    /**
3789 +     * Returns the result of accumulating the given transformation
3790 +     * of all keys using the given reducer to combine values, and
3791 +     * the given basis as an identity value.
3792 +     *
3793 +     * @param parallelismThreshold the (estimated) number of elements
3794 +     * needed for this operation to be executed in parallel
3795 +     * @param transformer a function returning the transformation
3796 +     * for an element
3797 +     * @param basis the identity (initial default value) for the reduction
3798 +     * @param reducer a commutative associative combining function
3799 +     * @return the result of accumulating the given transformation
3800 +     * of all keys
3801 +     * @since 1.8
3802 +     */
3803 +    public int reduceKeysToInt(long parallelismThreshold,
3804 +                               ToIntFunction<? super K> transformer,
3805 +                               int basis,
3806 +                               IntBinaryOperator reducer) {
3807 +        if (transformer == null || reducer == null)
3808 +            throw new NullPointerException();
3809 +        return new MapReduceKeysToIntTask<K,V>
3810 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3811 +             null, transformer, basis, reducer).invoke();
3812 +    }
3813 +
3814 +    /**
3815 +     * Performs the given action for each value.
3816 +     *
3817 +     * @param parallelismThreshold the (estimated) number of elements
3818 +     * needed for this operation to be executed in parallel
3819 +     * @param action the action
3820 +     * @since 1.8
3821 +     */
3822 +    public void forEachValue(long parallelismThreshold,
3823 +                             Consumer<? super V> action) {
3824 +        if (action == null)
3825 +            throw new NullPointerException();
3826 +        new ForEachValueTask<K,V>
3827 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3828 +             action).invoke();
3829 +    }
3830 +
3831 +    /**
3832 +     * Performs the given action for each non-null transformation
3833 +     * of each value.
3834 +     *
3835 +     * @param parallelismThreshold the (estimated) number of elements
3836 +     * needed for this operation to be executed in parallel
3837 +     * @param transformer a function returning the transformation
3838 +     * for an element, or null if there is no transformation (in
3839 +     * which case the action is not applied)
3840 +     * @param action the action
3841 +     * @since 1.8
3842 +     */
3843 +    public <U> void forEachValue(long parallelismThreshold,
3844 +                                 Function<? super V, ? extends U> transformer,
3845 +                                 Consumer<? super U> action) {
3846 +        if (transformer == null || action == null)
3847 +            throw new NullPointerException();
3848 +        new ForEachTransformedValueTask<K,V,U>
3849 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3850 +             transformer, action).invoke();
3851 +    }
3852 +
3853 +    /**
3854 +     * Returns a non-null result from applying the given search
3855 +     * function on each value, or null if none.  Upon success,
3856 +     * further element processing is suppressed and the results of
3857 +     * any other parallel invocations of the search function are
3858 +     * ignored.
3859 +     *
3860 +     * @param parallelismThreshold the (estimated) number of elements
3861 +     * needed for this operation to be executed in parallel
3862 +     * @param searchFunction a function returning a non-null
3863 +     * result on success, else null
3864 +     * @return a non-null result from applying the given search
3865 +     * function on each value, or null if none
3866 +     * @since 1.8
3867 +     */
3868 +    public <U> U searchValues(long parallelismThreshold,
3869 +                              Function<? super V, ? extends U> searchFunction) {
3870 +        if (searchFunction == null) throw new NullPointerException();
3871 +        return new SearchValuesTask<K,V,U>
3872 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3873 +             searchFunction, new AtomicReference<U>()).invoke();
3874 +    }
3875 +
3876 +    /**
3877 +     * Returns the result of accumulating all values using the
3878 +     * given reducer to combine values, or null if none.
3879 +     *
3880 +     * @param parallelismThreshold the (estimated) number of elements
3881 +     * needed for this operation to be executed in parallel
3882 +     * @param reducer a commutative associative combining function
3883 +     * @return the result of accumulating all values
3884 +     * @since 1.8
3885 +     */
3886 +    public V reduceValues(long parallelismThreshold,
3887 +                          BiFunction<? super V, ? super V, ? extends V> reducer) {
3888 +        if (reducer == null) throw new NullPointerException();
3889 +        return new ReduceValuesTask<K,V>
3890 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3891 +             null, reducer).invoke();
3892 +    }
3893 +
3894 +    /**
3895 +     * Returns the result of accumulating the given transformation
3896 +     * of all values using the given reducer to combine values, or
3897 +     * null if none.
3898 +     *
3899 +     * @param parallelismThreshold the (estimated) number of elements
3900 +     * needed for this operation to be executed in parallel
3901 +     * @param transformer a function returning the transformation
3902 +     * for an element, or null if there is no transformation (in
3903 +     * which case it is not combined)
3904 +     * @param reducer a commutative associative combining function
3905 +     * @return the result of accumulating the given transformation
3906 +     * of all values
3907 +     * @since 1.8
3908 +     */
3909 +    public <U> U reduceValues(long parallelismThreshold,
3910 +                              Function<? super V, ? extends U> transformer,
3911 +                              BiFunction<? super U, ? super U, ? extends U> reducer) {
3912 +        if (transformer == null || reducer == null)
3913 +            throw new NullPointerException();
3914 +        return new MapReduceValuesTask<K,V,U>
3915 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3916 +             null, transformer, reducer).invoke();
3917 +    }
3918 +
3919 +    /**
3920 +     * Returns the result of accumulating the given transformation
3921 +     * of all values using the given reducer to combine values,
3922 +     * and the given basis as an identity value.
3923 +     *
3924 +     * @param parallelismThreshold the (estimated) number of elements
3925 +     * needed for this operation to be executed in parallel
3926 +     * @param transformer a function returning the transformation
3927 +     * for an element
3928 +     * @param basis the identity (initial default value) for the reduction
3929 +     * @param reducer a commutative associative combining function
3930 +     * @return the result of accumulating the given transformation
3931 +     * of all values
3932 +     * @since 1.8
3933 +     */
3934 +    public double reduceValuesToDouble(long parallelismThreshold,
3935 +                                       ToDoubleFunction<? super V> transformer,
3936 +                                       double basis,
3937 +                                       DoubleBinaryOperator reducer) {
3938 +        if (transformer == null || reducer == null)
3939 +            throw new NullPointerException();
3940 +        return new MapReduceValuesToDoubleTask<K,V>
3941 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3942 +             null, transformer, basis, reducer).invoke();
3943 +    }
3944 +
3945 +    /**
3946 +     * Returns the result of accumulating the given transformation
3947 +     * of all values using the given reducer to combine values,
3948 +     * and the given basis as an identity value.
3949 +     *
3950 +     * @param parallelismThreshold the (estimated) number of elements
3951 +     * needed for this operation to be executed in parallel
3952 +     * @param transformer a function returning the transformation
3953 +     * for an element
3954 +     * @param basis the identity (initial default value) for the reduction
3955 +     * @param reducer a commutative associative combining function
3956 +     * @return the result of accumulating the given transformation
3957 +     * of all values
3958 +     * @since 1.8
3959 +     */
3960 +    public long reduceValuesToLong(long parallelismThreshold,
3961 +                                   ToLongFunction<? super V> transformer,
3962 +                                   long basis,
3963 +                                   LongBinaryOperator reducer) {
3964 +        if (transformer == null || reducer == null)
3965 +            throw new NullPointerException();
3966 +        return new MapReduceValuesToLongTask<K,V>
3967 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3968 +             null, transformer, basis, reducer).invoke();
3969 +    }
3970 +
3971 +    /**
3972 +     * Returns the result of accumulating the given transformation
3973 +     * of all values using the given reducer to combine values,
3974 +     * and the given basis as an identity value.
3975 +     *
3976 +     * @param parallelismThreshold the (estimated) number of elements
3977 +     * needed for this operation to be executed in parallel
3978 +     * @param transformer a function returning the transformation
3979 +     * for an element
3980 +     * @param basis the identity (initial default value) for the reduction
3981 +     * @param reducer a commutative associative combining function
3982 +     * @return the result of accumulating the given transformation
3983 +     * of all values
3984 +     * @since 1.8
3985 +     */
3986 +    public int reduceValuesToInt(long parallelismThreshold,
3987 +                                 ToIntFunction<? super V> transformer,
3988 +                                 int basis,
3989 +                                 IntBinaryOperator reducer) {
3990 +        if (transformer == null || reducer == null)
3991 +            throw new NullPointerException();
3992 +        return new MapReduceValuesToIntTask<K,V>
3993 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3994 +             null, transformer, basis, reducer).invoke();
3995 +    }
3996 +
3997 +    /**
3998 +     * Performs the given action for each entry.
3999 +     *
4000 +     * @param parallelismThreshold the (estimated) number of elements
4001 +     * needed for this operation to be executed in parallel
4002 +     * @param action the action
4003 +     * @since 1.8
4004 +     */
4005 +    public void forEachEntry(long parallelismThreshold,
4006 +                             Consumer<? super Map.Entry<K,V>> action) {
4007 +        if (action == null) throw new NullPointerException();
4008 +        new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4009 +                                  action).invoke();
4010 +    }
4011 +
4012 +    /**
4013 +     * Performs the given action for each non-null transformation
4014 +     * of each entry.
4015 +     *
4016 +     * @param parallelismThreshold the (estimated) number of elements
4017 +     * needed for this operation to be executed in parallel
4018 +     * @param transformer a function returning the transformation
4019 +     * for an element, or null if there is no transformation (in
4020 +     * which case the action is not applied)
4021 +     * @param action the action
4022 +     * @since 1.8
4023 +     */
4024 +    public <U> void forEachEntry(long parallelismThreshold,
4025 +                                 Function<Map.Entry<K,V>, ? extends U> transformer,
4026 +                                 Consumer<? super U> action) {
4027 +        if (transformer == null || action == null)
4028 +            throw new NullPointerException();
4029 +        new ForEachTransformedEntryTask<K,V,U>
4030 +            (null, batchFor(parallelismThreshold), 0, 0, table,
4031 +             transformer, action).invoke();
4032 +    }
4033 +
4034 +    /**
4035 +     * Returns a non-null result from applying the given search
4036 +     * function on each entry, or null if none.  Upon success,
4037 +     * further element processing is suppressed and the results of
4038 +     * any other parallel invocations of the search function are
4039 +     * ignored.
4040 +     *
4041 +     * @param parallelismThreshold the (estimated) number of elements
4042 +     * needed for this operation to be executed in parallel
4043 +     * @param searchFunction a function returning a non-null
4044 +     * result on success, else null
4045 +     * @return a non-null result from applying the given search
4046 +     * function on each entry, or null if none
4047 +     * @since 1.8
4048 +     */
4049 +    public <U> U searchEntries(long parallelismThreshold,
4050 +                               Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4051 +        if (searchFunction == null) throw new NullPointerException();
4052 +        return new SearchEntriesTask<K,V,U>
4053 +            (null, batchFor(parallelismThreshold), 0, 0, table,
4054 +             searchFunction, new AtomicReference<U>()).invoke();
4055 +    }
4056 +
4057 +    /**
4058 +     * Returns the result of accumulating all entries using the
4059 +     * given reducer to combine values, or null if none.
4060 +     *
4061 +     * @param parallelismThreshold the (estimated) number of elements
4062 +     * needed for this operation to be executed in parallel
4063 +     * @param reducer a commutative associative combining function
4064 +     * @return the result of accumulating all entries
4065 +     * @since 1.8
4066 +     */
4067 +    public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4068 +                                        BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4069 +        if (reducer == null) throw new NullPointerException();
4070 +        return new ReduceEntriesTask<K,V>
4071 +            (null, batchFor(parallelismThreshold), 0, 0, table,
4072 +             null, reducer).invoke();
4073      }
4074  
4075 <    final class Values extends AbstractCollection<V> {
4076 <        public Iterator<V> iterator() {
4077 <            return new ValueIterator();
4075 >    /**
4076 >     * Returns the result of accumulating the given transformation
4077 >     * of all entries using the given reducer to combine values,
4078 >     * or null if none.
4079 >     *
4080 >     * @param parallelismThreshold the (estimated) number of elements
4081 >     * needed for this operation to be executed in parallel
4082 >     * @param transformer a function returning the transformation
4083 >     * for an element, or null if there is no transformation (in
4084 >     * which case it is not combined)
4085 >     * @param reducer a commutative associative combining function
4086 >     * @return the result of accumulating the given transformation
4087 >     * of all entries
4088 >     * @since 1.8
4089 >     */
4090 >    public <U> U reduceEntries(long parallelismThreshold,
4091 >                               Function<Map.Entry<K,V>, ? extends U> transformer,
4092 >                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4093 >        if (transformer == null || reducer == null)
4094 >            throw new NullPointerException();
4095 >        return new MapReduceEntriesTask<K,V,U>
4096 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4097 >             null, transformer, reducer).invoke();
4098 >    }
4099 >
4100 >    /**
4101 >     * Returns the result of accumulating the given transformation
4102 >     * of all entries using the given reducer to combine values,
4103 >     * and the given basis as an identity value.
4104 >     *
4105 >     * @param parallelismThreshold the (estimated) number of elements
4106 >     * needed for this operation to be executed in parallel
4107 >     * @param transformer a function returning the transformation
4108 >     * for an element
4109 >     * @param basis the identity (initial default value) for the reduction
4110 >     * @param reducer a commutative associative combining function
4111 >     * @return the result of accumulating the given transformation
4112 >     * of all entries
4113 >     * @since 1.8
4114 >     */
4115 >    public double reduceEntriesToDouble(long parallelismThreshold,
4116 >                                        ToDoubleFunction<Map.Entry<K,V>> transformer,
4117 >                                        double basis,
4118 >                                        DoubleBinaryOperator reducer) {
4119 >        if (transformer == null || reducer == null)
4120 >            throw new NullPointerException();
4121 >        return new MapReduceEntriesToDoubleTask<K,V>
4122 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4123 >             null, transformer, basis, reducer).invoke();
4124 >    }
4125 >
4126 >    /**
4127 >     * Returns the result of accumulating the given transformation
4128 >     * of all entries using the given reducer to combine values,
4129 >     * and the given basis as an identity value.
4130 >     *
4131 >     * @param parallelismThreshold the (estimated) number of elements
4132 >     * needed for this operation to be executed in parallel
4133 >     * @param transformer a function returning the transformation
4134 >     * for an element
4135 >     * @param basis the identity (initial default value) for the reduction
4136 >     * @param reducer a commutative associative combining function
4137 >     * @return the result of accumulating the given transformation
4138 >     * of all entries
4139 >     * @since 1.8
4140 >     */
4141 >    public long reduceEntriesToLong(long parallelismThreshold,
4142 >                                    ToLongFunction<Map.Entry<K,V>> transformer,
4143 >                                    long basis,
4144 >                                    LongBinaryOperator reducer) {
4145 >        if (transformer == null || reducer == null)
4146 >            throw new NullPointerException();
4147 >        return new MapReduceEntriesToLongTask<K,V>
4148 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4149 >             null, transformer, basis, reducer).invoke();
4150 >    }
4151 >
4152 >    /**
4153 >     * Returns the result of accumulating the given transformation
4154 >     * of all entries using the given reducer to combine values,
4155 >     * and the given basis as an identity value.
4156 >     *
4157 >     * @param parallelismThreshold the (estimated) number of elements
4158 >     * needed for this operation to be executed in parallel
4159 >     * @param transformer a function returning the transformation
4160 >     * for an element
4161 >     * @param basis the identity (initial default value) for the reduction
4162 >     * @param reducer a commutative associative combining function
4163 >     * @return the result of accumulating the given transformation
4164 >     * of all entries
4165 >     * @since 1.8
4166 >     */
4167 >    public int reduceEntriesToInt(long parallelismThreshold,
4168 >                                  ToIntFunction<Map.Entry<K,V>> transformer,
4169 >                                  int basis,
4170 >                                  IntBinaryOperator reducer) {
4171 >        if (transformer == null || reducer == null)
4172 >            throw new NullPointerException();
4173 >        return new MapReduceEntriesToIntTask<K,V>
4174 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4175 >             null, transformer, basis, reducer).invoke();
4176 >    }
4177 >
4178 >
4179 >    /* ----------------Views -------------- */
4180 >
4181 >    /**
4182 >     * Base class for views.
4183 >     */
4184 >    abstract static class CollectionView<K,V,E>
4185 >        implements Collection<E>, java.io.Serializable {
4186 >        private static final long serialVersionUID = 7249069246763182397L;
4187 >        final ConcurrentHashMap<K,V> map;
4188 >        CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4189 >
4190 >        /**
4191 >         * Returns the map backing this view.
4192 >         *
4193 >         * @return the map backing this view
4194 >         */
4195 >        public ConcurrentHashMap<K,V> getMap() { return map; }
4196 >
4197 >        /**
4198 >         * Removes all of the elements from this view, by removing all
4199 >         * the mappings from the map backing this view.
4200 >         */
4201 >        public final void clear()      { map.clear(); }
4202 >        public final int size()        { return map.size(); }
4203 >        public final boolean isEmpty() { return map.isEmpty(); }
4204 >
4205 >        // implementations below rely on concrete classes supplying these
4206 >        // abstract methods
4207 >        /**
4208 >         * Returns a "weakly consistent" iterator that will never
4209 >         * throw {@link ConcurrentModificationException}, and
4210 >         * guarantees to traverse elements as they existed upon
4211 >         * construction of the iterator, and may (but is not
4212 >         * guaranteed to) reflect any modifications subsequent to
4213 >         * construction.
4214 >         */
4215 >        public abstract Iterator<E> iterator();
4216 >        public abstract boolean contains(Object o);
4217 >        public abstract boolean remove(Object o);
4218 >
4219 >        private static final String oomeMsg = "Required array size too large";
4220 >
4221 >        public final Object[] toArray() {
4222 >            long sz = map.mappingCount();
4223 >            if (sz > MAX_ARRAY_SIZE)
4224 >                throw new OutOfMemoryError(oomeMsg);
4225 >            int n = (int)sz;
4226 >            Object[] r = new Object[n];
4227 >            int i = 0;
4228 >            for (E e : this) {
4229 >                if (i == n) {
4230 >                    if (n >= MAX_ARRAY_SIZE)
4231 >                        throw new OutOfMemoryError(oomeMsg);
4232 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4233 >                        n = MAX_ARRAY_SIZE;
4234 >                    else
4235 >                        n += (n >>> 1) + 1;
4236 >                    r = Arrays.copyOf(r, n);
4237 >                }
4238 >                r[i++] = e;
4239 >            }
4240 >            return (i == n) ? r : Arrays.copyOf(r, i);
4241 >        }
4242 >
4243 >        @SuppressWarnings("unchecked")
4244 >        public final <T> T[] toArray(T[] a) {
4245 >            long sz = map.mappingCount();
4246 >            if (sz > MAX_ARRAY_SIZE)
4247 >                throw new OutOfMemoryError(oomeMsg);
4248 >            int m = (int)sz;
4249 >            T[] r = (a.length >= m) ? a :
4250 >                (T[])java.lang.reflect.Array
4251 >                .newInstance(a.getClass().getComponentType(), m);
4252 >            int n = r.length;
4253 >            int i = 0;
4254 >            for (E e : this) {
4255 >                if (i == n) {
4256 >                    if (n >= MAX_ARRAY_SIZE)
4257 >                        throw new OutOfMemoryError(oomeMsg);
4258 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4259 >                        n = MAX_ARRAY_SIZE;
4260 >                    else
4261 >                        n += (n >>> 1) + 1;
4262 >                    r = Arrays.copyOf(r, n);
4263 >                }
4264 >                r[i++] = (T)e;
4265 >            }
4266 >            if (a == r && i < n) {
4267 >                r[i] = null; // null-terminate
4268 >                return r;
4269 >            }
4270 >            return (i == n) ? r : Arrays.copyOf(r, i);
4271          }
4272 <        public int size() {
4273 <            return ConcurrentHashMap.this.size();
4272 >
4273 >        /**
4274 >         * Returns a string representation of this collection.
4275 >         * The string representation consists of the string representations
4276 >         * of the collection's elements in the order they are returned by
4277 >         * its iterator, enclosed in square brackets ({@code "[]"}).
4278 >         * Adjacent elements are separated by the characters {@code ", "}
4279 >         * (comma and space).  Elements are converted to strings as by
4280 >         * {@link String#valueOf(Object)}.
4281 >         *
4282 >         * @return a string representation of this collection
4283 >         */
4284 >        public final String toString() {
4285 >            StringBuilder sb = new StringBuilder();
4286 >            sb.append('[');
4287 >            Iterator<E> it = iterator();
4288 >            if (it.hasNext()) {
4289 >                for (;;) {
4290 >                    Object e = it.next();
4291 >                    sb.append(e == this ? "(this Collection)" : e);
4292 >                    if (!it.hasNext())
4293 >                        break;
4294 >                    sb.append(',').append(' ');
4295 >                }
4296 >            }
4297 >            return sb.append(']').toString();
4298          }
4299 <        public boolean isEmpty() {
4300 <            return ConcurrentHashMap.this.isEmpty();
4299 >
4300 >        public final boolean containsAll(Collection<?> c) {
4301 >            if (c != this) {
4302 >                for (Object e : c) {
4303 >                    if (e == null || !contains(e))
4304 >                        return false;
4305 >                }
4306 >            }
4307 >            return true;
4308          }
4309 <        public boolean contains(Object o) {
4310 <            return ConcurrentHashMap.this.containsValue(o);
4309 >
4310 >        public final boolean removeAll(Collection<?> c) {
4311 >            boolean modified = false;
4312 >            for (Iterator<E> it = iterator(); it.hasNext();) {
4313 >                if (c.contains(it.next())) {
4314 >                    it.remove();
4315 >                    modified = true;
4316 >                }
4317 >            }
4318 >            return modified;
4319 >        }
4320 >
4321 >        public final boolean retainAll(Collection<?> c) {
4322 >            boolean modified = false;
4323 >            for (Iterator<E> it = iterator(); it.hasNext();) {
4324 >                if (!c.contains(it.next())) {
4325 >                    it.remove();
4326 >                    modified = true;
4327 >                }
4328 >            }
4329 >            return modified;
4330          }
4331 <        public void clear() {
4332 <            ConcurrentHashMap.this.clear();
4331 >
4332 >    }
4333 >
4334 >    /**
4335 >     * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4336 >     * which additions may optionally be enabled by mapping to a
4337 >     * common value.  This class cannot be directly instantiated.
4338 >     * See {@link #keySet() keySet()},
4339 >     * {@link #keySet(Object) keySet(V)},
4340 >     * {@link #newKeySet() newKeySet()},
4341 >     * {@link #newKeySet(int) newKeySet(int)}.
4342 >     *
4343 >     * @since 1.8
4344 >     */
4345 >    public static class KeySetView<K,V> extends CollectionView<K,V,K>
4346 >        implements Set<K>, java.io.Serializable {
4347 >        private static final long serialVersionUID = 7249069246763182397L;
4348 >        private final V value;
4349 >        KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4350 >            super(map);
4351 >            this.value = value;
4352 >        }
4353 >
4354 >        /**
4355 >         * Returns the default mapped value for additions,
4356 >         * or {@code null} if additions are not supported.
4357 >         *
4358 >         * @return the default mapped value for additions, or {@code null}
4359 >         * if not supported
4360 >         */
4361 >        public V getMappedValue() { return value; }
4362 >
4363 >        /**
4364 >         * {@inheritDoc}
4365 >         * @throws NullPointerException if the specified key is null
4366 >         */
4367 >        public boolean contains(Object o) { return map.containsKey(o); }
4368 >
4369 >        /**
4370 >         * Removes the key from this map view, by removing the key (and its
4371 >         * corresponding value) from the backing map.  This method does
4372 >         * nothing if the key is not in the map.
4373 >         *
4374 >         * @param  o the key to be removed from the backing map
4375 >         * @return {@code true} if the backing map contained the specified key
4376 >         * @throws NullPointerException if the specified key is null
4377 >         */
4378 >        public boolean remove(Object o) { return map.remove(o) != null; }
4379 >
4380 >        /**
4381 >         * @return an iterator over the keys of the backing map
4382 >         */
4383 >        public Iterator<K> iterator() {
4384 >            Node<K,V>[] t;
4385 >            ConcurrentHashMap<K,V> m = map;
4386 >            int f = (t = m.table) == null ? 0 : t.length;
4387 >            return new KeyIterator<K,V>(t, f, 0, f, m);
4388 >        }
4389 >
4390 >        /**
4391 >         * Adds the specified key to this set view by mapping the key to
4392 >         * the default mapped value in the backing map, if defined.
4393 >         *
4394 >         * @param e key to be added
4395 >         * @return {@code true} if this set changed as a result of the call
4396 >         * @throws NullPointerException if the specified key is null
4397 >         * @throws UnsupportedOperationException if no default mapped value
4398 >         * for additions was provided
4399 >         */
4400 >        public boolean add(K e) {
4401 >            V v;
4402 >            if ((v = value) == null)
4403 >                throw new UnsupportedOperationException();
4404 >            return map.putVal(e, v, true) == null;
4405 >        }
4406 >
4407 >        /**
4408 >         * Adds all of the elements in the specified collection to this set,
4409 >         * as if by calling {@link #add} on each one.
4410 >         *
4411 >         * @param c the elements to be inserted into this set
4412 >         * @return {@code true} if this set changed as a result of the call
4413 >         * @throws NullPointerException if the collection or any of its
4414 >         * elements are {@code null}
4415 >         * @throws UnsupportedOperationException if no default mapped value
4416 >         * for additions was provided
4417 >         */
4418 >        public boolean addAll(Collection<? extends K> c) {
4419 >            boolean added = false;
4420 >            V v;
4421 >            if ((v = value) == null)
4422 >                throw new UnsupportedOperationException();
4423 >            for (K e : c) {
4424 >                if (map.putVal(e, v, true) == null)
4425 >                    added = true;
4426 >            }
4427 >            return added;
4428 >        }
4429 >
4430 >        public int hashCode() {
4431 >            int h = 0;
4432 >            for (K e : this)
4433 >                h += e.hashCode();
4434 >            return h;
4435 >        }
4436 >
4437 >        public boolean equals(Object o) {
4438 >            Set<?> c;
4439 >            return ((o instanceof Set) &&
4440 >                    ((c = (Set<?>)o) == this ||
4441 >                     (containsAll(c) && c.containsAll(this))));
4442 >        }
4443 >
4444 >        public Spliterator<K> spliterator() {
4445 >            Node<K,V>[] t;
4446 >            ConcurrentHashMap<K,V> m = map;
4447 >            long n = m.sumCount();
4448 >            int f = (t = m.table) == null ? 0 : t.length;
4449 >            return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4450 >        }
4451 >
4452 >        public void forEach(Consumer<? super K> action) {
4453 >            if (action == null) throw new NullPointerException();
4454 >            Node<K,V>[] t;
4455 >            if ((t = map.table) != null) {
4456 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4457 >                for (Node<K,V> p; (p = it.advance()) != null; )
4458 >                    action.accept(p.key);
4459 >            }
4460          }
4461      }
4462  
4463 <    final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
4464 <        public Iterator<Map.Entry<K,V>> iterator() {
4465 <            return new EntryIterator();
4463 >    /**
4464 >     * A view of a ConcurrentHashMap as a {@link Collection} of
4465 >     * values, in which additions are disabled. This class cannot be
4466 >     * directly instantiated. See {@link #values()}.
4467 >     */
4468 >    static final class ValuesView<K,V> extends CollectionView<K,V,V>
4469 >        implements Collection<V>, java.io.Serializable {
4470 >        private static final long serialVersionUID = 2249069246763182397L;
4471 >        ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4472 >        public final boolean contains(Object o) {
4473 >            return map.containsValue(o);
4474 >        }
4475 >
4476 >        public final boolean remove(Object o) {
4477 >            if (o != null) {
4478 >                for (Iterator<V> it = iterator(); it.hasNext();) {
4479 >                    if (o.equals(it.next())) {
4480 >                        it.remove();
4481 >                        return true;
4482 >                    }
4483 >                }
4484 >            }
4485 >            return false;
4486          }
4487 +
4488 +        public final Iterator<V> iterator() {
4489 +            ConcurrentHashMap<K,V> m = map;
4490 +            Node<K,V>[] t;
4491 +            int f = (t = m.table) == null ? 0 : t.length;
4492 +            return new ValueIterator<K,V>(t, f, 0, f, m);
4493 +        }
4494 +
4495 +        public final boolean add(V e) {
4496 +            throw new UnsupportedOperationException();
4497 +        }
4498 +        public final boolean addAll(Collection<? extends V> c) {
4499 +            throw new UnsupportedOperationException();
4500 +        }
4501 +
4502 +        public Spliterator<V> spliterator() {
4503 +            Node<K,V>[] t;
4504 +            ConcurrentHashMap<K,V> m = map;
4505 +            long n = m.sumCount();
4506 +            int f = (t = m.table) == null ? 0 : t.length;
4507 +            return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4508 +        }
4509 +
4510 +        public void forEach(Consumer<? super V> action) {
4511 +            if (action == null) throw new NullPointerException();
4512 +            Node<K,V>[] t;
4513 +            if ((t = map.table) != null) {
4514 +                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4515 +                for (Node<K,V> p; (p = it.advance()) != null; )
4516 +                    action.accept(p.val);
4517 +            }
4518 +        }
4519 +    }
4520 +
4521 +    /**
4522 +     * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4523 +     * entries.  This class cannot be directly instantiated. See
4524 +     * {@link #entrySet()}.
4525 +     */
4526 +    static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4527 +        implements Set<Map.Entry<K,V>>, java.io.Serializable {
4528 +        private static final long serialVersionUID = 2249069246763182397L;
4529 +        EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4530 +
4531          public boolean contains(Object o) {
4532 <            if (!(o instanceof Map.Entry))
4533 <                return false;
4534 <            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
4535 <            V v = ConcurrentHashMap.this.get(e.getKey());
4536 <            return v != null && v.equals(e.getValue());
4532 >            Object k, v, r; Map.Entry<?,?> e;
4533 >            return ((o instanceof Map.Entry) &&
4534 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4535 >                    (r = map.get(k)) != null &&
4536 >                    (v = e.getValue()) != null &&
4537 >                    (v == r || v.equals(r)));
4538          }
4539 +
4540          public boolean remove(Object o) {
4541 <            if (!(o instanceof Map.Entry))
4542 <                return false;
4543 <            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
4544 <            return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
4541 >            Object k, v; Map.Entry<?,?> e;
4542 >            return ((o instanceof Map.Entry) &&
4543 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4544 >                    (v = e.getValue()) != null &&
4545 >                    map.remove(k, v));
4546 >        }
4547 >
4548 >        /**
4549 >         * @return an iterator over the entries of the backing map
4550 >         */
4551 >        public Iterator<Map.Entry<K,V>> iterator() {
4552 >            ConcurrentHashMap<K,V> m = map;
4553 >            Node<K,V>[] t;
4554 >            int f = (t = m.table) == null ? 0 : t.length;
4555 >            return new EntryIterator<K,V>(t, f, 0, f, m);
4556 >        }
4557 >
4558 >        public boolean add(Entry<K,V> e) {
4559 >            return map.putVal(e.getKey(), e.getValue(), false) == null;
4560          }
4561 <        public int size() {
4562 <            return ConcurrentHashMap.this.size();
4561 >
4562 >        public boolean addAll(Collection<? extends Entry<K,V>> c) {
4563 >            boolean added = false;
4564 >            for (Entry<K,V> e : c) {
4565 >                if (add(e))
4566 >                    added = true;
4567 >            }
4568 >            return added;
4569          }
4570 <        public boolean isEmpty() {
4571 <            return ConcurrentHashMap.this.isEmpty();
4570 >
4571 >        public final int hashCode() {
4572 >            int h = 0;
4573 >            Node<K,V>[] t;
4574 >            if ((t = map.table) != null) {
4575 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4576 >                for (Node<K,V> p; (p = it.advance()) != null; ) {
4577 >                    h += p.hashCode();
4578 >                }
4579 >            }
4580 >            return h;
4581          }
4582 <        public void clear() {
4583 <            ConcurrentHashMap.this.clear();
4582 >
4583 >        public final boolean equals(Object o) {
4584 >            Set<?> c;
4585 >            return ((o instanceof Set) &&
4586 >                    ((c = (Set<?>)o) == this ||
4587 >                     (containsAll(c) && c.containsAll(this))));
4588 >        }
4589 >
4590 >        public Spliterator<Map.Entry<K,V>> spliterator() {
4591 >            Node<K,V>[] t;
4592 >            ConcurrentHashMap<K,V> m = map;
4593 >            long n = m.sumCount();
4594 >            int f = (t = m.table) == null ? 0 : t.length;
4595 >            return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4596 >        }
4597 >
4598 >        public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4599 >            if (action == null) throw new NullPointerException();
4600 >            Node<K,V>[] t;
4601 >            if ((t = map.table) != null) {
4602 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4603 >                for (Node<K,V> p; (p = it.advance()) != null; )
4604 >                    action.accept(new MapEntry<K,V>(p.key, p.val, map));
4605 >            }
4606          }
4607 +
4608      }
4609  
4610 <    /* ---------------- Serialization Support -------------- */
4610 >    // -------------------------------------------------------
4611  
4612      /**
4613 <     * Saves the state of the <tt>ConcurrentHashMap</tt> instance to a
4614 <     * stream (i.e., serializes it).
1402 <     * @param s the stream
1403 <     * @serialData
1404 <     * the key (Object) and value (Object)
1405 <     * for each key-value mapping, followed by a null pair.
1406 <     * The key-value mappings are emitted in no particular order.
4613 >     * Base class for bulk tasks. Repeats some fields and code from
4614 >     * class Traverser, because we need to subclass CountedCompleter.
4615       */
4616 <    private void writeObject(java.io.ObjectOutputStream s) throws IOException {
4617 <        // force all segments for serialization compatibility
4618 <        for (int k = 0; k < segments.length; ++k)
4619 <            ensureSegment(k);
4620 <        s.defaultWriteObject();
4621 <
4622 <        final Segment<K,V>[] segments = this.segments;
4623 <        for (int k = 0; k < segments.length; ++k) {
4624 <            Segment<K,V> seg = segmentAt(segments, k);
4625 <            seg.lock();
4626 <            try {
4627 <                HashEntry<K,V>[] tab = seg.table;
4628 <                for (int i = 0; i < tab.length; ++i) {
4629 <                    HashEntry<K,V> e;
4630 <                    for (e = entryAt(tab, i); e != null; e = e.next) {
4631 <                        s.writeObject(e.key);
4632 <                        s.writeObject(e.value);
4616 >    abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4617 >        Node<K,V>[] tab;        // same as Traverser
4618 >        Node<K,V> next;
4619 >        int index;
4620 >        int baseIndex;
4621 >        int baseLimit;
4622 >        final int baseSize;
4623 >        int batch;              // split control
4624 >
4625 >        BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4626 >            super(par);
4627 >            this.batch = b;
4628 >            this.index = this.baseIndex = i;
4629 >            if ((this.tab = t) == null)
4630 >                this.baseSize = this.baseLimit = 0;
4631 >            else if (par == null)
4632 >                this.baseSize = this.baseLimit = t.length;
4633 >            else {
4634 >                this.baseLimit = f;
4635 >                this.baseSize = par.baseSize;
4636 >            }
4637 >        }
4638 >
4639 >        /**
4640 >         * Same as Traverser version
4641 >         */
4642 >        final Node<K,V> advance() {
4643 >            Node<K,V> e;
4644 >            if ((e = next) != null)
4645 >                e = e.next;
4646 >            for (;;) {
4647 >                Node<K,V>[] t; int i, n; K ek;  // must use locals in checks
4648 >                if (e != null)
4649 >                    return next = e;
4650 >                if (baseIndex >= baseLimit || (t = tab) == null ||
4651 >                    (n = t.length) <= (i = index) || i < 0)
4652 >                    return next = null;
4653 >                if ((e = tabAt(t, index)) != null && e.key == null) {
4654 >                    if (e instanceof ForwardingNode) {
4655 >                        tab = ((ForwardingNode<K,V>)e).nextTable;
4656 >                        e = null;
4657 >                        continue;
4658                      }
4659 +                    else if (e instanceof TreeBin)
4660 +                        e = ((TreeBin<K,V>)e).first;
4661 +                    else
4662 +                        e = null;
4663                  }
4664 <            } finally {
4665 <                seg.unlock();
4664 >                if ((index += baseSize) >= n)
4665 >                    index = ++baseIndex;    // visit upper slots if present
4666              }
4667          }
1431        s.writeObject(null);
1432        s.writeObject(null);
4668      }
4669  
4670 <    /**
4671 <     * Reconstitutes the <tt>ConcurrentHashMap</tt> instance from a
4672 <     * stream (i.e., deserializes it).
4673 <     * @param s the stream
4674 <     */
4675 <    @SuppressWarnings("unchecked")
4676 <    private void readObject(java.io.ObjectInputStream s)
4677 <        throws IOException, ClassNotFoundException {
4678 <        s.defaultReadObject();
4670 >    /*
4671 >     * Task classes. Coded in a regular but ugly format/style to
4672 >     * simplify checks that each variant differs in the right way from
4673 >     * others. The null screenings exist because compilers cannot tell
4674 >     * that we've already null-checked task arguments, so we force
4675 >     * simplest hoisted bypass to help avoid convoluted traps.
4676 >     */
4677 >    @SuppressWarnings("serial")
4678 >    static final class ForEachKeyTask<K,V>
4679 >        extends BulkTask<K,V,Void> {
4680 >        final Consumer<? super K> action;
4681 >        ForEachKeyTask
4682 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4683 >             Consumer<? super K> action) {
4684 >            super(p, b, i, f, t);
4685 >            this.action = action;
4686 >        }
4687 >        public final void compute() {
4688 >            final Consumer<? super K> action;
4689 >            if ((action = this.action) != null) {
4690 >                for (int i = baseIndex, f, h; batch > 0 &&
4691 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4692 >                    addToPendingCount(1);
4693 >                    new ForEachKeyTask<K,V>
4694 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4695 >                         action).fork();
4696 >                }
4697 >                for (Node<K,V> p; (p = advance()) != null;)
4698 >                    action.accept(p.key);
4699 >                propagateCompletion();
4700 >            }
4701 >        }
4702 >    }
4703  
4704 <        // Re-initialize segments to be minimally sized, and let grow.
4705 <        int cap = MIN_SEGMENT_TABLE_CAPACITY;
4706 <        final Segment<K,V>[] segments = this.segments;
4707 <        for (int k = 0; k < segments.length; ++k) {
4708 <            Segment<K,V> seg = segments[k];
4709 <            if (seg != null) {
4710 <                seg.threshold = (int)(cap * seg.loadFactor);
4711 <                seg.table = (HashEntry<K,V>[]) new HashEntry[cap];
4704 >    @SuppressWarnings("serial")
4705 >    static final class ForEachValueTask<K,V>
4706 >        extends BulkTask<K,V,Void> {
4707 >        final Consumer<? super V> action;
4708 >        ForEachValueTask
4709 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4710 >             Consumer<? super V> action) {
4711 >            super(p, b, i, f, t);
4712 >            this.action = action;
4713 >        }
4714 >        public final void compute() {
4715 >            final Consumer<? super V> action;
4716 >            if ((action = this.action) != null) {
4717 >                for (int i = baseIndex, f, h; batch > 0 &&
4718 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4719 >                    addToPendingCount(1);
4720 >                    new ForEachValueTask<K,V>
4721 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4722 >                         action).fork();
4723 >                }
4724 >                for (Node<K,V> p; (p = advance()) != null;)
4725 >                    action.accept(p.val);
4726 >                propagateCompletion();
4727              }
4728          }
4729 +    }
4730  
4731 <        // Read the keys and values, and put the mappings in the table
4732 <        for (;;) {
4733 <            K key = (K) s.readObject();
4734 <            V value = (V) s.readObject();
4735 <            if (key == null)
4736 <                break;
4737 <            put(key, value);
4731 >    @SuppressWarnings("serial")
4732 >    static final class ForEachEntryTask<K,V>
4733 >        extends BulkTask<K,V,Void> {
4734 >        final Consumer<? super Entry<K,V>> action;
4735 >        ForEachEntryTask
4736 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4737 >             Consumer<? super Entry<K,V>> action) {
4738 >            super(p, b, i, f, t);
4739 >            this.action = action;
4740 >        }
4741 >        public final void compute() {
4742 >            final Consumer<? super Entry<K,V>> action;
4743 >            if ((action = this.action) != null) {
4744 >                for (int i = baseIndex, f, h; batch > 0 &&
4745 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4746 >                    addToPendingCount(1);
4747 >                    new ForEachEntryTask<K,V>
4748 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4749 >                         action).fork();
4750 >                }
4751 >                for (Node<K,V> p; (p = advance()) != null; )
4752 >                    action.accept(p);
4753 >                propagateCompletion();
4754 >            }
4755 >        }
4756 >    }
4757 >
4758 >    @SuppressWarnings("serial")
4759 >    static final class ForEachMappingTask<K,V>
4760 >        extends BulkTask<K,V,Void> {
4761 >        final BiConsumer<? super K, ? super V> action;
4762 >        ForEachMappingTask
4763 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4764 >             BiConsumer<? super K,? super V> action) {
4765 >            super(p, b, i, f, t);
4766 >            this.action = action;
4767 >        }
4768 >        public final void compute() {
4769 >            final BiConsumer<? super K, ? super V> action;
4770 >            if ((action = this.action) != null) {
4771 >                for (int i = baseIndex, f, h; batch > 0 &&
4772 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4773 >                    addToPendingCount(1);
4774 >                    new ForEachMappingTask<K,V>
4775 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4776 >                         action).fork();
4777 >                }
4778 >                for (Node<K,V> p; (p = advance()) != null; )
4779 >                    action.accept(p.key, p.val);
4780 >                propagateCompletion();
4781 >            }
4782 >        }
4783 >    }
4784 >
4785 >    @SuppressWarnings("serial")
4786 >    static final class ForEachTransformedKeyTask<K,V,U>
4787 >        extends BulkTask<K,V,Void> {
4788 >        final Function<? super K, ? extends U> transformer;
4789 >        final Consumer<? super U> action;
4790 >        ForEachTransformedKeyTask
4791 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4792 >             Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
4793 >            super(p, b, i, f, t);
4794 >            this.transformer = transformer; this.action = action;
4795 >        }
4796 >        public final void compute() {
4797 >            final Function<? super K, ? extends U> transformer;
4798 >            final Consumer<? super U> action;
4799 >            if ((transformer = this.transformer) != null &&
4800 >                (action = this.action) != null) {
4801 >                for (int i = baseIndex, f, h; batch > 0 &&
4802 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4803 >                    addToPendingCount(1);
4804 >                    new ForEachTransformedKeyTask<K,V,U>
4805 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4806 >                         transformer, action).fork();
4807 >                }
4808 >                for (Node<K,V> p; (p = advance()) != null; ) {
4809 >                    U u;
4810 >                    if ((u = transformer.apply(p.key)) != null)
4811 >                        action.accept(u);
4812 >                }
4813 >                propagateCompletion();
4814 >            }
4815 >        }
4816 >    }
4817 >
4818 >    @SuppressWarnings("serial")
4819 >    static final class ForEachTransformedValueTask<K,V,U>
4820 >        extends BulkTask<K,V,Void> {
4821 >        final Function<? super V, ? extends U> transformer;
4822 >        final Consumer<? super U> action;
4823 >        ForEachTransformedValueTask
4824 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4825 >             Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
4826 >            super(p, b, i, f, t);
4827 >            this.transformer = transformer; this.action = action;
4828 >        }
4829 >        public final void compute() {
4830 >            final Function<? super V, ? extends U> transformer;
4831 >            final Consumer<? super U> action;
4832 >            if ((transformer = this.transformer) != null &&
4833 >                (action = this.action) != null) {
4834 >                for (int i = baseIndex, f, h; batch > 0 &&
4835 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4836 >                    addToPendingCount(1);
4837 >                    new ForEachTransformedValueTask<K,V,U>
4838 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4839 >                         transformer, action).fork();
4840 >                }
4841 >                for (Node<K,V> p; (p = advance()) != null; ) {
4842 >                    U u;
4843 >                    if ((u = transformer.apply(p.val)) != null)
4844 >                        action.accept(u);
4845 >                }
4846 >                propagateCompletion();
4847 >            }
4848 >        }
4849 >    }
4850 >
4851 >    @SuppressWarnings("serial")
4852 >    static final class ForEachTransformedEntryTask<K,V,U>
4853 >        extends BulkTask<K,V,Void> {
4854 >        final Function<Map.Entry<K,V>, ? extends U> transformer;
4855 >        final Consumer<? super U> action;
4856 >        ForEachTransformedEntryTask
4857 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4858 >             Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
4859 >            super(p, b, i, f, t);
4860 >            this.transformer = transformer; this.action = action;
4861 >        }
4862 >        public final void compute() {
4863 >            final Function<Map.Entry<K,V>, ? extends U> transformer;
4864 >            final Consumer<? super U> action;
4865 >            if ((transformer = this.transformer) != null &&
4866 >                (action = this.action) != null) {
4867 >                for (int i = baseIndex, f, h; batch > 0 &&
4868 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4869 >                    addToPendingCount(1);
4870 >                    new ForEachTransformedEntryTask<K,V,U>
4871 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4872 >                         transformer, action).fork();
4873 >                }
4874 >                for (Node<K,V> p; (p = advance()) != null; ) {
4875 >                    U u;
4876 >                    if ((u = transformer.apply(p)) != null)
4877 >                        action.accept(u);
4878 >                }
4879 >                propagateCompletion();
4880 >            }
4881 >        }
4882 >    }
4883 >
4884 >    @SuppressWarnings("serial")
4885 >    static final class ForEachTransformedMappingTask<K,V,U>
4886 >        extends BulkTask<K,V,Void> {
4887 >        final BiFunction<? super K, ? super V, ? extends U> transformer;
4888 >        final Consumer<? super U> action;
4889 >        ForEachTransformedMappingTask
4890 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4891 >             BiFunction<? super K, ? super V, ? extends U> transformer,
4892 >             Consumer<? super U> action) {
4893 >            super(p, b, i, f, t);
4894 >            this.transformer = transformer; this.action = action;
4895 >        }
4896 >        public final void compute() {
4897 >            final BiFunction<? super K, ? super V, ? extends U> transformer;
4898 >            final Consumer<? super U> action;
4899 >            if ((transformer = this.transformer) != null &&
4900 >                (action = this.action) != null) {
4901 >                for (int i = baseIndex, f, h; batch > 0 &&
4902 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4903 >                    addToPendingCount(1);
4904 >                    new ForEachTransformedMappingTask<K,V,U>
4905 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4906 >                         transformer, action).fork();
4907 >                }
4908 >                for (Node<K,V> p; (p = advance()) != null; ) {
4909 >                    U u;
4910 >                    if ((u = transformer.apply(p.key, p.val)) != null)
4911 >                        action.accept(u);
4912 >                }
4913 >                propagateCompletion();
4914 >            }
4915 >        }
4916 >    }
4917 >
4918 >    @SuppressWarnings("serial")
4919 >    static final class SearchKeysTask<K,V,U>
4920 >        extends BulkTask<K,V,U> {
4921 >        final Function<? super K, ? extends U> searchFunction;
4922 >        final AtomicReference<U> result;
4923 >        SearchKeysTask
4924 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4925 >             Function<? super K, ? extends U> searchFunction,
4926 >             AtomicReference<U> result) {
4927 >            super(p, b, i, f, t);
4928 >            this.searchFunction = searchFunction; this.result = result;
4929 >        }
4930 >        public final U getRawResult() { return result.get(); }
4931 >        public final void compute() {
4932 >            final Function<? super K, ? extends U> searchFunction;
4933 >            final AtomicReference<U> result;
4934 >            if ((searchFunction = this.searchFunction) != null &&
4935 >                (result = this.result) != null) {
4936 >                for (int i = baseIndex, f, h; batch > 0 &&
4937 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4938 >                    if (result.get() != null)
4939 >                        return;
4940 >                    addToPendingCount(1);
4941 >                    new SearchKeysTask<K,V,U>
4942 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4943 >                         searchFunction, result).fork();
4944 >                }
4945 >                while (result.get() == null) {
4946 >                    U u;
4947 >                    Node<K,V> p;
4948 >                    if ((p = advance()) == null) {
4949 >                        propagateCompletion();
4950 >                        break;
4951 >                    }
4952 >                    if ((u = searchFunction.apply(p.key)) != null) {
4953 >                        if (result.compareAndSet(null, u))
4954 >                            quietlyCompleteRoot();
4955 >                        break;
4956 >                    }
4957 >                }
4958 >            }
4959 >        }
4960 >    }
4961 >
4962 >    @SuppressWarnings("serial")
4963 >    static final class SearchValuesTask<K,V,U>
4964 >        extends BulkTask<K,V,U> {
4965 >        final Function<? super V, ? extends U> searchFunction;
4966 >        final AtomicReference<U> result;
4967 >        SearchValuesTask
4968 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4969 >             Function<? super V, ? extends U> searchFunction,
4970 >             AtomicReference<U> result) {
4971 >            super(p, b, i, f, t);
4972 >            this.searchFunction = searchFunction; this.result = result;
4973 >        }
4974 >        public final U getRawResult() { return result.get(); }
4975 >        public final void compute() {
4976 >            final Function<? super V, ? extends U> searchFunction;
4977 >            final AtomicReference<U> result;
4978 >            if ((searchFunction = this.searchFunction) != null &&
4979 >                (result = this.result) != null) {
4980 >                for (int i = baseIndex, f, h; batch > 0 &&
4981 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4982 >                    if (result.get() != null)
4983 >                        return;
4984 >                    addToPendingCount(1);
4985 >                    new SearchValuesTask<K,V,U>
4986 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4987 >                         searchFunction, result).fork();
4988 >                }
4989 >                while (result.get() == null) {
4990 >                    U u;
4991 >                    Node<K,V> p;
4992 >                    if ((p = advance()) == null) {
4993 >                        propagateCompletion();
4994 >                        break;
4995 >                    }
4996 >                    if ((u = searchFunction.apply(p.val)) != null) {
4997 >                        if (result.compareAndSet(null, u))
4998 >                            quietlyCompleteRoot();
4999 >                        break;
5000 >                    }
5001 >                }
5002 >            }
5003 >        }
5004 >    }
5005 >
5006 >    @SuppressWarnings("serial")
5007 >    static final class SearchEntriesTask<K,V,U>
5008 >        extends BulkTask<K,V,U> {
5009 >        final Function<Entry<K,V>, ? extends U> searchFunction;
5010 >        final AtomicReference<U> result;
5011 >        SearchEntriesTask
5012 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5013 >             Function<Entry<K,V>, ? extends U> searchFunction,
5014 >             AtomicReference<U> result) {
5015 >            super(p, b, i, f, t);
5016 >            this.searchFunction = searchFunction; this.result = result;
5017 >        }
5018 >        public final U getRawResult() { return result.get(); }
5019 >        public final void compute() {
5020 >            final Function<Entry<K,V>, ? extends U> searchFunction;
5021 >            final AtomicReference<U> result;
5022 >            if ((searchFunction = this.searchFunction) != null &&
5023 >                (result = this.result) != null) {
5024 >                for (int i = baseIndex, f, h; batch > 0 &&
5025 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5026 >                    if (result.get() != null)
5027 >                        return;
5028 >                    addToPendingCount(1);
5029 >                    new SearchEntriesTask<K,V,U>
5030 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5031 >                         searchFunction, result).fork();
5032 >                }
5033 >                while (result.get() == null) {
5034 >                    U u;
5035 >                    Node<K,V> p;
5036 >                    if ((p = advance()) == null) {
5037 >                        propagateCompletion();
5038 >                        break;
5039 >                    }
5040 >                    if ((u = searchFunction.apply(p)) != null) {
5041 >                        if (result.compareAndSet(null, u))
5042 >                            quietlyCompleteRoot();
5043 >                        return;
5044 >                    }
5045 >                }
5046 >            }
5047 >        }
5048 >    }
5049 >
5050 >    @SuppressWarnings("serial")
5051 >    static final class SearchMappingsTask<K,V,U>
5052 >        extends BulkTask<K,V,U> {
5053 >        final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5054 >        final AtomicReference<U> result;
5055 >        SearchMappingsTask
5056 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5057 >             BiFunction<? super K, ? super V, ? extends U> searchFunction,
5058 >             AtomicReference<U> result) {
5059 >            super(p, b, i, f, t);
5060 >            this.searchFunction = searchFunction; this.result = result;
5061 >        }
5062 >        public final U getRawResult() { return result.get(); }
5063 >        public final void compute() {
5064 >            final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5065 >            final AtomicReference<U> result;
5066 >            if ((searchFunction = this.searchFunction) != null &&
5067 >                (result = this.result) != null) {
5068 >                for (int i = baseIndex, f, h; batch > 0 &&
5069 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5070 >                    if (result.get() != null)
5071 >                        return;
5072 >                    addToPendingCount(1);
5073 >                    new SearchMappingsTask<K,V,U>
5074 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5075 >                         searchFunction, result).fork();
5076 >                }
5077 >                while (result.get() == null) {
5078 >                    U u;
5079 >                    Node<K,V> p;
5080 >                    if ((p = advance()) == null) {
5081 >                        propagateCompletion();
5082 >                        break;
5083 >                    }
5084 >                    if ((u = searchFunction.apply(p.key, p.val)) != null) {
5085 >                        if (result.compareAndSet(null, u))
5086 >                            quietlyCompleteRoot();
5087 >                        break;
5088 >                    }
5089 >                }
5090 >            }
5091 >        }
5092 >    }
5093 >
5094 >    @SuppressWarnings("serial")
5095 >    static final class ReduceKeysTask<K,V>
5096 >        extends BulkTask<K,V,K> {
5097 >        final BiFunction<? super K, ? super K, ? extends K> reducer;
5098 >        K result;
5099 >        ReduceKeysTask<K,V> rights, nextRight;
5100 >        ReduceKeysTask
5101 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5102 >             ReduceKeysTask<K,V> nextRight,
5103 >             BiFunction<? super K, ? super K, ? extends K> reducer) {
5104 >            super(p, b, i, f, t); this.nextRight = nextRight;
5105 >            this.reducer = reducer;
5106 >        }
5107 >        public final K getRawResult() { return result; }
5108 >        public final void compute() {
5109 >            final BiFunction<? super K, ? super K, ? extends K> reducer;
5110 >            if ((reducer = this.reducer) != null) {
5111 >                for (int i = baseIndex, f, h; batch > 0 &&
5112 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5113 >                    addToPendingCount(1);
5114 >                    (rights = new ReduceKeysTask<K,V>
5115 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5116 >                      rights, reducer)).fork();
5117 >                }
5118 >                K r = null;
5119 >                for (Node<K,V> p; (p = advance()) != null; ) {
5120 >                    K u = p.key;
5121 >                    r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5122 >                }
5123 >                result = r;
5124 >                CountedCompleter<?> c;
5125 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5126 >                    @SuppressWarnings("unchecked") ReduceKeysTask<K,V>
5127 >                        t = (ReduceKeysTask<K,V>)c,
5128 >                        s = t.rights;
5129 >                    while (s != null) {
5130 >                        K tr, sr;
5131 >                        if ((sr = s.result) != null)
5132 >                            t.result = (((tr = t.result) == null) ? sr :
5133 >                                        reducer.apply(tr, sr));
5134 >                        s = t.rights = s.nextRight;
5135 >                    }
5136 >                }
5137 >            }
5138 >        }
5139 >    }
5140 >
5141 >    @SuppressWarnings("serial")
5142 >    static final class ReduceValuesTask<K,V>
5143 >        extends BulkTask<K,V,V> {
5144 >        final BiFunction<? super V, ? super V, ? extends V> reducer;
5145 >        V result;
5146 >        ReduceValuesTask<K,V> rights, nextRight;
5147 >        ReduceValuesTask
5148 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5149 >             ReduceValuesTask<K,V> nextRight,
5150 >             BiFunction<? super V, ? super V, ? extends V> reducer) {
5151 >            super(p, b, i, f, t); this.nextRight = nextRight;
5152 >            this.reducer = reducer;
5153 >        }
5154 >        public final V getRawResult() { return result; }
5155 >        public final void compute() {
5156 >            final BiFunction<? super V, ? super V, ? extends V> reducer;
5157 >            if ((reducer = this.reducer) != null) {
5158 >                for (int i = baseIndex, f, h; batch > 0 &&
5159 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5160 >                    addToPendingCount(1);
5161 >                    (rights = new ReduceValuesTask<K,V>
5162 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5163 >                      rights, reducer)).fork();
5164 >                }
5165 >                V r = null;
5166 >                for (Node<K,V> p; (p = advance()) != null; ) {
5167 >                    V v = p.val;
5168 >                    r = (r == null) ? v : reducer.apply(r, v);
5169 >                }
5170 >                result = r;
5171 >                CountedCompleter<?> c;
5172 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5173 >                    @SuppressWarnings("unchecked") ReduceValuesTask<K,V>
5174 >                        t = (ReduceValuesTask<K,V>)c,
5175 >                        s = t.rights;
5176 >                    while (s != null) {
5177 >                        V tr, sr;
5178 >                        if ((sr = s.result) != null)
5179 >                            t.result = (((tr = t.result) == null) ? sr :
5180 >                                        reducer.apply(tr, sr));
5181 >                        s = t.rights = s.nextRight;
5182 >                    }
5183 >                }
5184 >            }
5185 >        }
5186 >    }
5187 >
5188 >    @SuppressWarnings("serial")
5189 >    static final class ReduceEntriesTask<K,V>
5190 >        extends BulkTask<K,V,Map.Entry<K,V>> {
5191 >        final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5192 >        Map.Entry<K,V> result;
5193 >        ReduceEntriesTask<K,V> rights, nextRight;
5194 >        ReduceEntriesTask
5195 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5196 >             ReduceEntriesTask<K,V> nextRight,
5197 >             BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5198 >            super(p, b, i, f, t); this.nextRight = nextRight;
5199 >            this.reducer = reducer;
5200 >        }
5201 >        public final Map.Entry<K,V> getRawResult() { return result; }
5202 >        public final void compute() {
5203 >            final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5204 >            if ((reducer = this.reducer) != null) {
5205 >                for (int i = baseIndex, f, h; batch > 0 &&
5206 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5207 >                    addToPendingCount(1);
5208 >                    (rights = new ReduceEntriesTask<K,V>
5209 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5210 >                      rights, reducer)).fork();
5211 >                }
5212 >                Map.Entry<K,V> r = null;
5213 >                for (Node<K,V> p; (p = advance()) != null; )
5214 >                    r = (r == null) ? p : reducer.apply(r, p);
5215 >                result = r;
5216 >                CountedCompleter<?> c;
5217 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5218 >                    @SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
5219 >                        t = (ReduceEntriesTask<K,V>)c,
5220 >                        s = t.rights;
5221 >                    while (s != null) {
5222 >                        Map.Entry<K,V> tr, sr;
5223 >                        if ((sr = s.result) != null)
5224 >                            t.result = (((tr = t.result) == null) ? sr :
5225 >                                        reducer.apply(tr, sr));
5226 >                        s = t.rights = s.nextRight;
5227 >                    }
5228 >                }
5229 >            }
5230 >        }
5231 >    }
5232 >
5233 >    @SuppressWarnings("serial")
5234 >    static final class MapReduceKeysTask<K,V,U>
5235 >        extends BulkTask<K,V,U> {
5236 >        final Function<? super K, ? extends U> transformer;
5237 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5238 >        U result;
5239 >        MapReduceKeysTask<K,V,U> rights, nextRight;
5240 >        MapReduceKeysTask
5241 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5242 >             MapReduceKeysTask<K,V,U> nextRight,
5243 >             Function<? super K, ? extends U> transformer,
5244 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5245 >            super(p, b, i, f, t); this.nextRight = nextRight;
5246 >            this.transformer = transformer;
5247 >            this.reducer = reducer;
5248 >        }
5249 >        public final U getRawResult() { return result; }
5250 >        public final void compute() {
5251 >            final Function<? super K, ? extends U> transformer;
5252 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5253 >            if ((transformer = this.transformer) != null &&
5254 >                (reducer = this.reducer) != null) {
5255 >                for (int i = baseIndex, f, h; batch > 0 &&
5256 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5257 >                    addToPendingCount(1);
5258 >                    (rights = new MapReduceKeysTask<K,V,U>
5259 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5260 >                      rights, transformer, reducer)).fork();
5261 >                }
5262 >                U r = null;
5263 >                for (Node<K,V> p; (p = advance()) != null; ) {
5264 >                    U u;
5265 >                    if ((u = transformer.apply(p.key)) != null)
5266 >                        r = (r == null) ? u : reducer.apply(r, u);
5267 >                }
5268 >                result = r;
5269 >                CountedCompleter<?> c;
5270 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5271 >                    @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
5272 >                        t = (MapReduceKeysTask<K,V,U>)c,
5273 >                        s = t.rights;
5274 >                    while (s != null) {
5275 >                        U tr, sr;
5276 >                        if ((sr = s.result) != null)
5277 >                            t.result = (((tr = t.result) == null) ? sr :
5278 >                                        reducer.apply(tr, sr));
5279 >                        s = t.rights = s.nextRight;
5280 >                    }
5281 >                }
5282 >            }
5283 >        }
5284 >    }
5285 >
5286 >    @SuppressWarnings("serial")
5287 >    static final class MapReduceValuesTask<K,V,U>
5288 >        extends BulkTask<K,V,U> {
5289 >        final Function<? super V, ? extends U> transformer;
5290 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5291 >        U result;
5292 >        MapReduceValuesTask<K,V,U> rights, nextRight;
5293 >        MapReduceValuesTask
5294 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5295 >             MapReduceValuesTask<K,V,U> nextRight,
5296 >             Function<? super V, ? extends U> transformer,
5297 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5298 >            super(p, b, i, f, t); this.nextRight = nextRight;
5299 >            this.transformer = transformer;
5300 >            this.reducer = reducer;
5301 >        }
5302 >        public final U getRawResult() { return result; }
5303 >        public final void compute() {
5304 >            final Function<? super V, ? extends U> transformer;
5305 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5306 >            if ((transformer = this.transformer) != null &&
5307 >                (reducer = this.reducer) != null) {
5308 >                for (int i = baseIndex, f, h; batch > 0 &&
5309 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5310 >                    addToPendingCount(1);
5311 >                    (rights = new MapReduceValuesTask<K,V,U>
5312 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5313 >                      rights, transformer, reducer)).fork();
5314 >                }
5315 >                U r = null;
5316 >                for (Node<K,V> p; (p = advance()) != null; ) {
5317 >                    U u;
5318 >                    if ((u = transformer.apply(p.val)) != null)
5319 >                        r = (r == null) ? u : reducer.apply(r, u);
5320 >                }
5321 >                result = r;
5322 >                CountedCompleter<?> c;
5323 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5324 >                    @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
5325 >                        t = (MapReduceValuesTask<K,V,U>)c,
5326 >                        s = t.rights;
5327 >                    while (s != null) {
5328 >                        U tr, sr;
5329 >                        if ((sr = s.result) != null)
5330 >                            t.result = (((tr = t.result) == null) ? sr :
5331 >                                        reducer.apply(tr, sr));
5332 >                        s = t.rights = s.nextRight;
5333 >                    }
5334 >                }
5335 >            }
5336 >        }
5337 >    }
5338 >
5339 >    @SuppressWarnings("serial")
5340 >    static final class MapReduceEntriesTask<K,V,U>
5341 >        extends BulkTask<K,V,U> {
5342 >        final Function<Map.Entry<K,V>, ? extends U> transformer;
5343 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5344 >        U result;
5345 >        MapReduceEntriesTask<K,V,U> rights, nextRight;
5346 >        MapReduceEntriesTask
5347 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5348 >             MapReduceEntriesTask<K,V,U> nextRight,
5349 >             Function<Map.Entry<K,V>, ? extends U> transformer,
5350 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5351 >            super(p, b, i, f, t); this.nextRight = nextRight;
5352 >            this.transformer = transformer;
5353 >            this.reducer = reducer;
5354 >        }
5355 >        public final U getRawResult() { return result; }
5356 >        public final void compute() {
5357 >            final Function<Map.Entry<K,V>, ? extends U> transformer;
5358 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5359 >            if ((transformer = this.transformer) != null &&
5360 >                (reducer = this.reducer) != null) {
5361 >                for (int i = baseIndex, f, h; batch > 0 &&
5362 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5363 >                    addToPendingCount(1);
5364 >                    (rights = new MapReduceEntriesTask<K,V,U>
5365 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5366 >                      rights, transformer, reducer)).fork();
5367 >                }
5368 >                U r = null;
5369 >                for (Node<K,V> p; (p = advance()) != null; ) {
5370 >                    U u;
5371 >                    if ((u = transformer.apply(p)) != null)
5372 >                        r = (r == null) ? u : reducer.apply(r, u);
5373 >                }
5374 >                result = r;
5375 >                CountedCompleter<?> c;
5376 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5377 >                    @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U>
5378 >                        t = (MapReduceEntriesTask<K,V,U>)c,
5379 >                        s = t.rights;
5380 >                    while (s != null) {
5381 >                        U tr, sr;
5382 >                        if ((sr = s.result) != null)
5383 >                            t.result = (((tr = t.result) == null) ? sr :
5384 >                                        reducer.apply(tr, sr));
5385 >                        s = t.rights = s.nextRight;
5386 >                    }
5387 >                }
5388 >            }
5389 >        }
5390 >    }
5391 >
5392 >    @SuppressWarnings("serial")
5393 >    static final class MapReduceMappingsTask<K,V,U>
5394 >        extends BulkTask<K,V,U> {
5395 >        final BiFunction<? super K, ? super V, ? extends U> transformer;
5396 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5397 >        U result;
5398 >        MapReduceMappingsTask<K,V,U> rights, nextRight;
5399 >        MapReduceMappingsTask
5400 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5401 >             MapReduceMappingsTask<K,V,U> nextRight,
5402 >             BiFunction<? super K, ? super V, ? extends U> transformer,
5403 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5404 >            super(p, b, i, f, t); this.nextRight = nextRight;
5405 >            this.transformer = transformer;
5406 >            this.reducer = reducer;
5407 >        }
5408 >        public final U getRawResult() { return result; }
5409 >        public final void compute() {
5410 >            final BiFunction<? super K, ? super V, ? extends U> transformer;
5411 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5412 >            if ((transformer = this.transformer) != null &&
5413 >                (reducer = this.reducer) != null) {
5414 >                for (int i = baseIndex, f, h; batch > 0 &&
5415 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5416 >                    addToPendingCount(1);
5417 >                    (rights = new MapReduceMappingsTask<K,V,U>
5418 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5419 >                      rights, transformer, reducer)).fork();
5420 >                }
5421 >                U r = null;
5422 >                for (Node<K,V> p; (p = advance()) != null; ) {
5423 >                    U u;
5424 >                    if ((u = transformer.apply(p.key, p.val)) != null)
5425 >                        r = (r == null) ? u : reducer.apply(r, u);
5426 >                }
5427 >                result = r;
5428 >                CountedCompleter<?> c;
5429 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5430 >                    @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U>
5431 >                        t = (MapReduceMappingsTask<K,V,U>)c,
5432 >                        s = t.rights;
5433 >                    while (s != null) {
5434 >                        U tr, sr;
5435 >                        if ((sr = s.result) != null)
5436 >                            t.result = (((tr = t.result) == null) ? sr :
5437 >                                        reducer.apply(tr, sr));
5438 >                        s = t.rights = s.nextRight;
5439 >                    }
5440 >                }
5441 >            }
5442 >        }
5443 >    }
5444 >
5445 >    @SuppressWarnings("serial")
5446 >    static final class MapReduceKeysToDoubleTask<K,V>
5447 >        extends BulkTask<K,V,Double> {
5448 >        final ToDoubleFunction<? super K> transformer;
5449 >        final DoubleBinaryOperator reducer;
5450 >        final double basis;
5451 >        double result;
5452 >        MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5453 >        MapReduceKeysToDoubleTask
5454 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5455 >             MapReduceKeysToDoubleTask<K,V> nextRight,
5456 >             ToDoubleFunction<? super K> transformer,
5457 >             double basis,
5458 >             DoubleBinaryOperator reducer) {
5459 >            super(p, b, i, f, t); this.nextRight = nextRight;
5460 >            this.transformer = transformer;
5461 >            this.basis = basis; this.reducer = reducer;
5462 >        }
5463 >        public final Double getRawResult() { return result; }
5464 >        public final void compute() {
5465 >            final ToDoubleFunction<? super K> transformer;
5466 >            final DoubleBinaryOperator reducer;
5467 >            if ((transformer = this.transformer) != null &&
5468 >                (reducer = this.reducer) != null) {
5469 >                double r = this.basis;
5470 >                for (int i = baseIndex, f, h; batch > 0 &&
5471 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5472 >                    addToPendingCount(1);
5473 >                    (rights = new MapReduceKeysToDoubleTask<K,V>
5474 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5475 >                      rights, transformer, r, reducer)).fork();
5476 >                }
5477 >                for (Node<K,V> p; (p = advance()) != null; )
5478 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5479 >                result = r;
5480 >                CountedCompleter<?> c;
5481 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5482 >                    @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V>
5483 >                        t = (MapReduceKeysToDoubleTask<K,V>)c,
5484 >                        s = t.rights;
5485 >                    while (s != null) {
5486 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5487 >                        s = t.rights = s.nextRight;
5488 >                    }
5489 >                }
5490 >            }
5491 >        }
5492 >    }
5493 >
5494 >    @SuppressWarnings("serial")
5495 >    static final class MapReduceValuesToDoubleTask<K,V>
5496 >        extends BulkTask<K,V,Double> {
5497 >        final ToDoubleFunction<? super V> transformer;
5498 >        final DoubleBinaryOperator reducer;
5499 >        final double basis;
5500 >        double result;
5501 >        MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5502 >        MapReduceValuesToDoubleTask
5503 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5504 >             MapReduceValuesToDoubleTask<K,V> nextRight,
5505 >             ToDoubleFunction<? super V> transformer,
5506 >             double basis,
5507 >             DoubleBinaryOperator reducer) {
5508 >            super(p, b, i, f, t); this.nextRight = nextRight;
5509 >            this.transformer = transformer;
5510 >            this.basis = basis; this.reducer = reducer;
5511 >        }
5512 >        public final Double getRawResult() { return result; }
5513 >        public final void compute() {
5514 >            final ToDoubleFunction<? super V> transformer;
5515 >            final DoubleBinaryOperator reducer;
5516 >            if ((transformer = this.transformer) != null &&
5517 >                (reducer = this.reducer) != null) {
5518 >                double r = this.basis;
5519 >                for (int i = baseIndex, f, h; batch > 0 &&
5520 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5521 >                    addToPendingCount(1);
5522 >                    (rights = new MapReduceValuesToDoubleTask<K,V>
5523 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5524 >                      rights, transformer, r, reducer)).fork();
5525 >                }
5526 >                for (Node<K,V> p; (p = advance()) != null; )
5527 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5528 >                result = r;
5529 >                CountedCompleter<?> c;
5530 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5531 >                    @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
5532 >                        t = (MapReduceValuesToDoubleTask<K,V>)c,
5533 >                        s = t.rights;
5534 >                    while (s != null) {
5535 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5536 >                        s = t.rights = s.nextRight;
5537 >                    }
5538 >                }
5539 >            }
5540 >        }
5541 >    }
5542 >
5543 >    @SuppressWarnings("serial")
5544 >    static final class MapReduceEntriesToDoubleTask<K,V>
5545 >        extends BulkTask<K,V,Double> {
5546 >        final ToDoubleFunction<Map.Entry<K,V>> transformer;
5547 >        final DoubleBinaryOperator reducer;
5548 >        final double basis;
5549 >        double result;
5550 >        MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5551 >        MapReduceEntriesToDoubleTask
5552 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5553 >             MapReduceEntriesToDoubleTask<K,V> nextRight,
5554 >             ToDoubleFunction<Map.Entry<K,V>> transformer,
5555 >             double basis,
5556 >             DoubleBinaryOperator reducer) {
5557 >            super(p, b, i, f, t); this.nextRight = nextRight;
5558 >            this.transformer = transformer;
5559 >            this.basis = basis; this.reducer = reducer;
5560 >        }
5561 >        public final Double getRawResult() { return result; }
5562 >        public final void compute() {
5563 >            final ToDoubleFunction<Map.Entry<K,V>> transformer;
5564 >            final DoubleBinaryOperator reducer;
5565 >            if ((transformer = this.transformer) != null &&
5566 >                (reducer = this.reducer) != null) {
5567 >                double r = this.basis;
5568 >                for (int i = baseIndex, f, h; batch > 0 &&
5569 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5570 >                    addToPendingCount(1);
5571 >                    (rights = new MapReduceEntriesToDoubleTask<K,V>
5572 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5573 >                      rights, transformer, r, reducer)).fork();
5574 >                }
5575 >                for (Node<K,V> p; (p = advance()) != null; )
5576 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5577 >                result = r;
5578 >                CountedCompleter<?> c;
5579 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5580 >                    @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V>
5581 >                        t = (MapReduceEntriesToDoubleTask<K,V>)c,
5582 >                        s = t.rights;
5583 >                    while (s != null) {
5584 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5585 >                        s = t.rights = s.nextRight;
5586 >                    }
5587 >                }
5588 >            }
5589 >        }
5590 >    }
5591 >
5592 >    @SuppressWarnings("serial")
5593 >    static final class MapReduceMappingsToDoubleTask<K,V>
5594 >        extends BulkTask<K,V,Double> {
5595 >        final ToDoubleBiFunction<? super K, ? super V> transformer;
5596 >        final DoubleBinaryOperator reducer;
5597 >        final double basis;
5598 >        double result;
5599 >        MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5600 >        MapReduceMappingsToDoubleTask
5601 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5602 >             MapReduceMappingsToDoubleTask<K,V> nextRight,
5603 >             ToDoubleBiFunction<? super K, ? super V> transformer,
5604 >             double basis,
5605 >             DoubleBinaryOperator reducer) {
5606 >            super(p, b, i, f, t); this.nextRight = nextRight;
5607 >            this.transformer = transformer;
5608 >            this.basis = basis; this.reducer = reducer;
5609 >        }
5610 >        public final Double getRawResult() { return result; }
5611 >        public final void compute() {
5612 >            final ToDoubleBiFunction<? super K, ? super V> transformer;
5613 >            final DoubleBinaryOperator reducer;
5614 >            if ((transformer = this.transformer) != null &&
5615 >                (reducer = this.reducer) != null) {
5616 >                double r = this.basis;
5617 >                for (int i = baseIndex, f, h; batch > 0 &&
5618 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5619 >                    addToPendingCount(1);
5620 >                    (rights = new MapReduceMappingsToDoubleTask<K,V>
5621 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5622 >                      rights, transformer, r, reducer)).fork();
5623 >                }
5624 >                for (Node<K,V> p; (p = advance()) != null; )
5625 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5626 >                result = r;
5627 >                CountedCompleter<?> c;
5628 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5629 >                    @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V>
5630 >                        t = (MapReduceMappingsToDoubleTask<K,V>)c,
5631 >                        s = t.rights;
5632 >                    while (s != null) {
5633 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5634 >                        s = t.rights = s.nextRight;
5635 >                    }
5636 >                }
5637 >            }
5638 >        }
5639 >    }
5640 >
5641 >    @SuppressWarnings("serial")
5642 >    static final class MapReduceKeysToLongTask<K,V>
5643 >        extends BulkTask<K,V,Long> {
5644 >        final ToLongFunction<? super K> transformer;
5645 >        final LongBinaryOperator reducer;
5646 >        final long basis;
5647 >        long result;
5648 >        MapReduceKeysToLongTask<K,V> rights, nextRight;
5649 >        MapReduceKeysToLongTask
5650 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5651 >             MapReduceKeysToLongTask<K,V> nextRight,
5652 >             ToLongFunction<? super K> transformer,
5653 >             long basis,
5654 >             LongBinaryOperator reducer) {
5655 >            super(p, b, i, f, t); this.nextRight = nextRight;
5656 >            this.transformer = transformer;
5657 >            this.basis = basis; this.reducer = reducer;
5658 >        }
5659 >        public final Long getRawResult() { return result; }
5660 >        public final void compute() {
5661 >            final ToLongFunction<? super K> transformer;
5662 >            final LongBinaryOperator reducer;
5663 >            if ((transformer = this.transformer) != null &&
5664 >                (reducer = this.reducer) != null) {
5665 >                long r = this.basis;
5666 >                for (int i = baseIndex, f, h; batch > 0 &&
5667 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5668 >                    addToPendingCount(1);
5669 >                    (rights = new MapReduceKeysToLongTask<K,V>
5670 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5671 >                      rights, transformer, r, reducer)).fork();
5672 >                }
5673 >                for (Node<K,V> p; (p = advance()) != null; )
5674 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
5675 >                result = r;
5676 >                CountedCompleter<?> c;
5677 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5678 >                    @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
5679 >                        t = (MapReduceKeysToLongTask<K,V>)c,
5680 >                        s = t.rights;
5681 >                    while (s != null) {
5682 >                        t.result = reducer.applyAsLong(t.result, s.result);
5683 >                        s = t.rights = s.nextRight;
5684 >                    }
5685 >                }
5686 >            }
5687 >        }
5688 >    }
5689 >
5690 >    @SuppressWarnings("serial")
5691 >    static final class MapReduceValuesToLongTask<K,V>
5692 >        extends BulkTask<K,V,Long> {
5693 >        final ToLongFunction<? super V> transformer;
5694 >        final LongBinaryOperator reducer;
5695 >        final long basis;
5696 >        long result;
5697 >        MapReduceValuesToLongTask<K,V> rights, nextRight;
5698 >        MapReduceValuesToLongTask
5699 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5700 >             MapReduceValuesToLongTask<K,V> nextRight,
5701 >             ToLongFunction<? super V> transformer,
5702 >             long basis,
5703 >             LongBinaryOperator reducer) {
5704 >            super(p, b, i, f, t); this.nextRight = nextRight;
5705 >            this.transformer = transformer;
5706 >            this.basis = basis; this.reducer = reducer;
5707 >        }
5708 >        public final Long getRawResult() { return result; }
5709 >        public final void compute() {
5710 >            final ToLongFunction<? super V> transformer;
5711 >            final LongBinaryOperator reducer;
5712 >            if ((transformer = this.transformer) != null &&
5713 >                (reducer = this.reducer) != null) {
5714 >                long r = this.basis;
5715 >                for (int i = baseIndex, f, h; batch > 0 &&
5716 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5717 >                    addToPendingCount(1);
5718 >                    (rights = new MapReduceValuesToLongTask<K,V>
5719 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5720 >                      rights, transformer, r, reducer)).fork();
5721 >                }
5722 >                for (Node<K,V> p; (p = advance()) != null; )
5723 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
5724 >                result = r;
5725 >                CountedCompleter<?> c;
5726 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5727 >                    @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V>
5728 >                        t = (MapReduceValuesToLongTask<K,V>)c,
5729 >                        s = t.rights;
5730 >                    while (s != null) {
5731 >                        t.result = reducer.applyAsLong(t.result, s.result);
5732 >                        s = t.rights = s.nextRight;
5733 >                    }
5734 >                }
5735 >            }
5736 >        }
5737 >    }
5738 >
5739 >    @SuppressWarnings("serial")
5740 >    static final class MapReduceEntriesToLongTask<K,V>
5741 >        extends BulkTask<K,V,Long> {
5742 >        final ToLongFunction<Map.Entry<K,V>> transformer;
5743 >        final LongBinaryOperator reducer;
5744 >        final long basis;
5745 >        long result;
5746 >        MapReduceEntriesToLongTask<K,V> rights, nextRight;
5747 >        MapReduceEntriesToLongTask
5748 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5749 >             MapReduceEntriesToLongTask<K,V> nextRight,
5750 >             ToLongFunction<Map.Entry<K,V>> transformer,
5751 >             long basis,
5752 >             LongBinaryOperator reducer) {
5753 >            super(p, b, i, f, t); this.nextRight = nextRight;
5754 >            this.transformer = transformer;
5755 >            this.basis = basis; this.reducer = reducer;
5756 >        }
5757 >        public final Long getRawResult() { return result; }
5758 >        public final void compute() {
5759 >            final ToLongFunction<Map.Entry<K,V>> transformer;
5760 >            final LongBinaryOperator reducer;
5761 >            if ((transformer = this.transformer) != null &&
5762 >                (reducer = this.reducer) != null) {
5763 >                long r = this.basis;
5764 >                for (int i = baseIndex, f, h; batch > 0 &&
5765 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5766 >                    addToPendingCount(1);
5767 >                    (rights = new MapReduceEntriesToLongTask<K,V>
5768 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5769 >                      rights, transformer, r, reducer)).fork();
5770 >                }
5771 >                for (Node<K,V> p; (p = advance()) != null; )
5772 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p));
5773 >                result = r;
5774 >                CountedCompleter<?> c;
5775 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5776 >                    @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
5777 >                        t = (MapReduceEntriesToLongTask<K,V>)c,
5778 >                        s = t.rights;
5779 >                    while (s != null) {
5780 >                        t.result = reducer.applyAsLong(t.result, s.result);
5781 >                        s = t.rights = s.nextRight;
5782 >                    }
5783 >                }
5784 >            }
5785 >        }
5786 >    }
5787 >
5788 >    @SuppressWarnings("serial")
5789 >    static final class MapReduceMappingsToLongTask<K,V>
5790 >        extends BulkTask<K,V,Long> {
5791 >        final ToLongBiFunction<? super K, ? super V> transformer;
5792 >        final LongBinaryOperator reducer;
5793 >        final long basis;
5794 >        long result;
5795 >        MapReduceMappingsToLongTask<K,V> rights, nextRight;
5796 >        MapReduceMappingsToLongTask
5797 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5798 >             MapReduceMappingsToLongTask<K,V> nextRight,
5799 >             ToLongBiFunction<? super K, ? super V> transformer,
5800 >             long basis,
5801 >             LongBinaryOperator reducer) {
5802 >            super(p, b, i, f, t); this.nextRight = nextRight;
5803 >            this.transformer = transformer;
5804 >            this.basis = basis; this.reducer = reducer;
5805 >        }
5806 >        public final Long getRawResult() { return result; }
5807 >        public final void compute() {
5808 >            final ToLongBiFunction<? super K, ? super V> transformer;
5809 >            final LongBinaryOperator reducer;
5810 >            if ((transformer = this.transformer) != null &&
5811 >                (reducer = this.reducer) != null) {
5812 >                long r = this.basis;
5813 >                for (int i = baseIndex, f, h; batch > 0 &&
5814 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5815 >                    addToPendingCount(1);
5816 >                    (rights = new MapReduceMappingsToLongTask<K,V>
5817 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5818 >                      rights, transformer, r, reducer)).fork();
5819 >                }
5820 >                for (Node<K,V> p; (p = advance()) != null; )
5821 >                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
5822 >                result = r;
5823 >                CountedCompleter<?> c;
5824 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5825 >                    @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V>
5826 >                        t = (MapReduceMappingsToLongTask<K,V>)c,
5827 >                        s = t.rights;
5828 >                    while (s != null) {
5829 >                        t.result = reducer.applyAsLong(t.result, s.result);
5830 >                        s = t.rights = s.nextRight;
5831 >                    }
5832 >                }
5833 >            }
5834 >        }
5835 >    }
5836 >
5837 >    @SuppressWarnings("serial")
5838 >    static final class MapReduceKeysToIntTask<K,V>
5839 >        extends BulkTask<K,V,Integer> {
5840 >        final ToIntFunction<? super K> transformer;
5841 >        final IntBinaryOperator reducer;
5842 >        final int basis;
5843 >        int result;
5844 >        MapReduceKeysToIntTask<K,V> rights, nextRight;
5845 >        MapReduceKeysToIntTask
5846 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5847 >             MapReduceKeysToIntTask<K,V> nextRight,
5848 >             ToIntFunction<? super K> transformer,
5849 >             int basis,
5850 >             IntBinaryOperator reducer) {
5851 >            super(p, b, i, f, t); this.nextRight = nextRight;
5852 >            this.transformer = transformer;
5853 >            this.basis = basis; this.reducer = reducer;
5854 >        }
5855 >        public final Integer getRawResult() { return result; }
5856 >        public final void compute() {
5857 >            final ToIntFunction<? super K> transformer;
5858 >            final IntBinaryOperator reducer;
5859 >            if ((transformer = this.transformer) != null &&
5860 >                (reducer = this.reducer) != null) {
5861 >                int r = this.basis;
5862 >                for (int i = baseIndex, f, h; batch > 0 &&
5863 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5864 >                    addToPendingCount(1);
5865 >                    (rights = new MapReduceKeysToIntTask<K,V>
5866 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5867 >                      rights, transformer, r, reducer)).fork();
5868 >                }
5869 >                for (Node<K,V> p; (p = advance()) != null; )
5870 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
5871 >                result = r;
5872 >                CountedCompleter<?> c;
5873 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5874 >                    @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
5875 >                        t = (MapReduceKeysToIntTask<K,V>)c,
5876 >                        s = t.rights;
5877 >                    while (s != null) {
5878 >                        t.result = reducer.applyAsInt(t.result, s.result);
5879 >                        s = t.rights = s.nextRight;
5880 >                    }
5881 >                }
5882 >            }
5883 >        }
5884 >    }
5885 >
5886 >    @SuppressWarnings("serial")
5887 >    static final class MapReduceValuesToIntTask<K,V>
5888 >        extends BulkTask<K,V,Integer> {
5889 >        final ToIntFunction<? super V> transformer;
5890 >        final IntBinaryOperator reducer;
5891 >        final int basis;
5892 >        int result;
5893 >        MapReduceValuesToIntTask<K,V> rights, nextRight;
5894 >        MapReduceValuesToIntTask
5895 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5896 >             MapReduceValuesToIntTask<K,V> nextRight,
5897 >             ToIntFunction<? super V> transformer,
5898 >             int basis,
5899 >             IntBinaryOperator reducer) {
5900 >            super(p, b, i, f, t); this.nextRight = nextRight;
5901 >            this.transformer = transformer;
5902 >            this.basis = basis; this.reducer = reducer;
5903 >        }
5904 >        public final Integer getRawResult() { return result; }
5905 >        public final void compute() {
5906 >            final ToIntFunction<? super V> transformer;
5907 >            final IntBinaryOperator reducer;
5908 >            if ((transformer = this.transformer) != null &&
5909 >                (reducer = this.reducer) != null) {
5910 >                int r = this.basis;
5911 >                for (int i = baseIndex, f, h; batch > 0 &&
5912 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5913 >                    addToPendingCount(1);
5914 >                    (rights = new MapReduceValuesToIntTask<K,V>
5915 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5916 >                      rights, transformer, r, reducer)).fork();
5917 >                }
5918 >                for (Node<K,V> p; (p = advance()) != null; )
5919 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
5920 >                result = r;
5921 >                CountedCompleter<?> c;
5922 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5923 >                    @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V>
5924 >                        t = (MapReduceValuesToIntTask<K,V>)c,
5925 >                        s = t.rights;
5926 >                    while (s != null) {
5927 >                        t.result = reducer.applyAsInt(t.result, s.result);
5928 >                        s = t.rights = s.nextRight;
5929 >                    }
5930 >                }
5931 >            }
5932 >        }
5933 >    }
5934 >
5935 >    @SuppressWarnings("serial")
5936 >    static final class MapReduceEntriesToIntTask<K,V>
5937 >        extends BulkTask<K,V,Integer> {
5938 >        final ToIntFunction<Map.Entry<K,V>> transformer;
5939 >        final IntBinaryOperator reducer;
5940 >        final int basis;
5941 >        int result;
5942 >        MapReduceEntriesToIntTask<K,V> rights, nextRight;
5943 >        MapReduceEntriesToIntTask
5944 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5945 >             MapReduceEntriesToIntTask<K,V> nextRight,
5946 >             ToIntFunction<Map.Entry<K,V>> transformer,
5947 >             int basis,
5948 >             IntBinaryOperator reducer) {
5949 >            super(p, b, i, f, t); this.nextRight = nextRight;
5950 >            this.transformer = transformer;
5951 >            this.basis = basis; this.reducer = reducer;
5952 >        }
5953 >        public final Integer getRawResult() { return result; }
5954 >        public final void compute() {
5955 >            final ToIntFunction<Map.Entry<K,V>> transformer;
5956 >            final IntBinaryOperator reducer;
5957 >            if ((transformer = this.transformer) != null &&
5958 >                (reducer = this.reducer) != null) {
5959 >                int r = this.basis;
5960 >                for (int i = baseIndex, f, h; batch > 0 &&
5961 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5962 >                    addToPendingCount(1);
5963 >                    (rights = new MapReduceEntriesToIntTask<K,V>
5964 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5965 >                      rights, transformer, r, reducer)).fork();
5966 >                }
5967 >                for (Node<K,V> p; (p = advance()) != null; )
5968 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p));
5969 >                result = r;
5970 >                CountedCompleter<?> c;
5971 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5972 >                    @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V>
5973 >                        t = (MapReduceEntriesToIntTask<K,V>)c,
5974 >                        s = t.rights;
5975 >                    while (s != null) {
5976 >                        t.result = reducer.applyAsInt(t.result, s.result);
5977 >                        s = t.rights = s.nextRight;
5978 >                    }
5979 >                }
5980 >            }
5981 >        }
5982 >    }
5983 >
5984 >    @SuppressWarnings("serial")
5985 >    static final class MapReduceMappingsToIntTask<K,V>
5986 >        extends BulkTask<K,V,Integer> {
5987 >        final ToIntBiFunction<? super K, ? super V> transformer;
5988 >        final IntBinaryOperator reducer;
5989 >        final int basis;
5990 >        int result;
5991 >        MapReduceMappingsToIntTask<K,V> rights, nextRight;
5992 >        MapReduceMappingsToIntTask
5993 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5994 >             MapReduceMappingsToIntTask<K,V> nextRight,
5995 >             ToIntBiFunction<? super K, ? super V> transformer,
5996 >             int basis,
5997 >             IntBinaryOperator reducer) {
5998 >            super(p, b, i, f, t); this.nextRight = nextRight;
5999 >            this.transformer = transformer;
6000 >            this.basis = basis; this.reducer = reducer;
6001 >        }
6002 >        public final Integer getRawResult() { return result; }
6003 >        public final void compute() {
6004 >            final ToIntBiFunction<? super K, ? super V> transformer;
6005 >            final IntBinaryOperator reducer;
6006 >            if ((transformer = this.transformer) != null &&
6007 >                (reducer = this.reducer) != null) {
6008 >                int r = this.basis;
6009 >                for (int i = baseIndex, f, h; batch > 0 &&
6010 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6011 >                    addToPendingCount(1);
6012 >                    (rights = new MapReduceMappingsToIntTask<K,V>
6013 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
6014 >                      rights, transformer, r, reducer)).fork();
6015 >                }
6016 >                for (Node<K,V> p; (p = advance()) != null; )
6017 >                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6018 >                result = r;
6019 >                CountedCompleter<?> c;
6020 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6021 >                    @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
6022 >                        t = (MapReduceMappingsToIntTask<K,V>)c,
6023 >                        s = t.rights;
6024 >                    while (s != null) {
6025 >                        t.result = reducer.applyAsInt(t.result, s.result);
6026 >                        s = t.rights = s.nextRight;
6027 >                    }
6028 >                }
6029 >            }
6030          }
6031      }
6032  
6033      // Unsafe mechanics
6034 <    private static final sun.misc.Unsafe UNSAFE;
6035 <    private static final long SBASE;
6036 <    private static final int SSHIFT;
6037 <    private static final long TBASE;
6038 <    private static final int TSHIFT;
6034 >    private static final sun.misc.Unsafe U;
6035 >    private static final long SIZECTL;
6036 >    private static final long TRANSFERINDEX;
6037 >    private static final long TRANSFERORIGIN;
6038 >    private static final long BASECOUNT;
6039 >    private static final long CELLSBUSY;
6040 >    private static final long CELLVALUE;
6041 >    private static final long ABASE;
6042 >    private static final int ASHIFT;
6043  
6044      static {
1474        int ss, ts;
6045          try {
6046 <            UNSAFE = sun.misc.Unsafe.getUnsafe();
6047 <            Class tc = HashEntry[].class;
6048 <            Class sc = Segment[].class;
6049 <            TBASE = UNSAFE.arrayBaseOffset(tc);
6050 <            SBASE = UNSAFE.arrayBaseOffset(sc);
6051 <            ts = UNSAFE.arrayIndexScale(tc);
6052 <            ss = UNSAFE.arrayIndexScale(sc);
6046 >            U = sun.misc.Unsafe.getUnsafe();
6047 >            Class<?> k = ConcurrentHashMap.class;
6048 >            SIZECTL = U.objectFieldOffset
6049 >                (k.getDeclaredField("sizeCtl"));
6050 >            TRANSFERINDEX = U.objectFieldOffset
6051 >                (k.getDeclaredField("transferIndex"));
6052 >            TRANSFERORIGIN = U.objectFieldOffset
6053 >                (k.getDeclaredField("transferOrigin"));
6054 >            BASECOUNT = U.objectFieldOffset
6055 >                (k.getDeclaredField("baseCount"));
6056 >            CELLSBUSY = U.objectFieldOffset
6057 >                (k.getDeclaredField("cellsBusy"));
6058 >            Class<?> ck = CounterCell.class;
6059 >            CELLVALUE = U.objectFieldOffset
6060 >                (ck.getDeclaredField("value"));
6061 >            Class<?> sc = Node[].class;
6062 >            ABASE = U.arrayBaseOffset(sc);
6063 >            int scale = U.arrayIndexScale(sc);
6064 >            if ((scale & (scale - 1)) != 0)
6065 >                throw new Error("data type scale not a power of two");
6066 >            ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6067          } catch (Exception e) {
6068              throw new Error(e);
6069          }
1486        if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
1487            throw new Error("data type scale not a power of two");
1488        SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
1489        TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
6070      }
1491
6071   }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines