ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
(Generate patch)

Comparing jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java (file contents):
Revision 1.15 by tim, Wed Aug 6 18:22:09 2003 UTC vs.
Revision 1.285 by jsr166, Sun Sep 20 17:03:22 2015 UTC

# Line 1 | Line 1
1   /*
2   * Written by Doug Lea with assistance from members of JCP JSR-166
3 < * Expert Group and released to the public domain. Use, modify, and
4 < * redistribute this code in any way without acknowledgement.
3 > * Expert Group and released to the public domain, as explained at
4 > * http://creativecommons.org/publicdomain/zero/1.0/
5   */
6  
7   package java.util.concurrent;
8 < import java.util.concurrent.locks.*;
9 < import java.util.*;
8 >
9 > import java.io.ObjectStreamField;
10   import java.io.Serializable;
11 < import java.io.IOException;
12 < import java.io.ObjectInputStream;
13 < import java.io.ObjectOutputStream;
11 > import java.lang.reflect.ParameterizedType;
12 > import java.lang.reflect.Type;
13 > import java.util.AbstractMap;
14 > import java.util.Arrays;
15 > import java.util.Collection;
16 > import java.util.Enumeration;
17 > import java.util.HashMap;
18 > import java.util.Hashtable;
19 > import java.util.Iterator;
20 > import java.util.Map;
21 > import java.util.NoSuchElementException;
22 > import java.util.Set;
23 > import java.util.Spliterator;
24 > import java.util.concurrent.atomic.AtomicReference;
25 > import java.util.concurrent.locks.LockSupport;
26 > import java.util.concurrent.locks.ReentrantLock;
27 > import java.util.function.BiConsumer;
28 > import java.util.function.BiFunction;
29 > import java.util.function.Consumer;
30 > import java.util.function.DoubleBinaryOperator;
31 > import java.util.function.Function;
32 > import java.util.function.IntBinaryOperator;
33 > import java.util.function.LongBinaryOperator;
34 > import java.util.function.Predicate;
35 > import java.util.function.ToDoubleBiFunction;
36 > import java.util.function.ToDoubleFunction;
37 > import java.util.function.ToIntBiFunction;
38 > import java.util.function.ToIntFunction;
39 > import java.util.function.ToLongBiFunction;
40 > import java.util.function.ToLongFunction;
41 > import java.util.stream.Stream;
42  
43   /**
44   * A hash table supporting full concurrency of retrievals and
45 < * adjustable expected concurrency for updates. This class obeys the
46 < * same functional specification as
47 < * <tt>java.util.Hashtable</tt>. However, even though all operations
48 < * are thread-safe, retrieval operations do <em>not</em> entail
49 < * locking, and there is <em>not</em> any support for locking the
50 < * entire table in a way that prevents all access.  This class is
51 < * fully interoperable with Hashtable in programs that rely on its
45 > * high expected concurrency for updates. This class obeys the
46 > * same functional specification as {@link java.util.Hashtable}, and
47 > * includes versions of methods corresponding to each method of
48 > * {@code Hashtable}. However, even though all operations are
49 > * thread-safe, retrieval operations do <em>not</em> entail locking,
50 > * and there is <em>not</em> any support for locking the entire table
51 > * in a way that prevents all access.  This class is fully
52 > * interoperable with {@code Hashtable} in programs that rely on its
53   * thread safety but not on its synchronization details.
54   *
55 < * <p> Retrieval operations (including <tt>get</tt>) ordinarily
56 < * overlap with update operations (including <tt>put</tt> and
57 < * <tt>remove</tt>). Retrievals reflect the results of the most
55 > * <p>Retrieval operations (including {@code get}) generally do not
56 > * block, so may overlap with update operations (including {@code put}
57 > * and {@code remove}). Retrievals reflect the results of the most
58   * recently <em>completed</em> update operations holding upon their
59 < * onset.  For aggregate operations such as <tt>putAll</tt> and
60 < * <tt>clear</tt>, concurrent retrievals may reflect insertion or
61 < * removal of only some entries.  Similarly, Iterators and
62 < * Enumerations return elements reflecting the state of the hash table
63 < * at some point at or since the creation of the iterator/enumeration.
64 < * They do <em>not</em> throw ConcurrentModificationException.
65 < * However, Iterators are designed to be used by only one thread at a
66 < * time.
67 < *
68 < * <p> The allowed concurrency among update operations is controlled
69 < * by the optional <tt>segments</tt> constructor argument (default
70 < * 16). The table is divided into this many independent parts, each of
71 < * which can be updated concurrently. Because placement in hash tables
72 < * is essentially random, the actual concurrency will vary. As a rough
73 < * rule of thumb, you should choose at least as many segments as you
74 < * expect concurrent threads. However, using more segments than you
75 < * need can waste space and time. Using a value of 1 for
76 < * <tt>segments</tt> results in a table that is concurrently readable
77 < * but can only be updated by one thread at a time.
59 > * onset. (More formally, an update operation for a given key bears a
60 > * <em>happens-before</em> relation with any (non-null) retrieval for
61 > * that key reporting the updated value.)  For aggregate operations
62 > * such as {@code putAll} and {@code clear}, concurrent retrievals may
63 > * reflect insertion or removal of only some entries.  Similarly,
64 > * Iterators, Spliterators and Enumerations return elements reflecting the
65 > * state of the hash table at some point at or since the creation of the
66 > * iterator/enumeration.  They do <em>not</em> throw {@link
67 > * java.util.ConcurrentModificationException ConcurrentModificationException}.
68 > * However, iterators are designed to be used by only one thread at a time.
69 > * Bear in mind that the results of aggregate status methods including
70 > * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
71 > * useful only when a map is not undergoing concurrent updates in other threads.
72 > * Otherwise the results of these methods reflect transient states
73 > * that may be adequate for monitoring or estimation purposes, but not
74 > * for program control.
75 > *
76 > * <p>The table is dynamically expanded when there are too many
77 > * collisions (i.e., keys that have distinct hash codes but fall into
78 > * the same slot modulo the table size), with the expected average
79 > * effect of maintaining roughly two bins per mapping (corresponding
80 > * to a 0.75 load factor threshold for resizing). There may be much
81 > * variance around this average as mappings are added and removed, but
82 > * overall, this maintains a commonly accepted time/space tradeoff for
83 > * hash tables.  However, resizing this or any other kind of hash
84 > * table may be a relatively slow operation. When possible, it is a
85 > * good idea to provide a size estimate as an optional {@code
86 > * initialCapacity} constructor argument. An additional optional
87 > * {@code loadFactor} constructor argument provides a further means of
88 > * customizing initial table capacity by specifying the table density
89 > * to be used in calculating the amount of space to allocate for the
90 > * given number of elements.  Also, for compatibility with previous
91 > * versions of this class, constructors may optionally specify an
92 > * expected {@code concurrencyLevel} as an additional hint for
93 > * internal sizing.  Note that using many keys with exactly the same
94 > * {@code hashCode()} is a sure way to slow down performance of any
95 > * hash table. To ameliorate impact, when keys are {@link Comparable},
96 > * this class may use comparison order among keys to help break ties.
97 > *
98 > * <p>A {@link Set} projection of a ConcurrentHashMap may be created
99 > * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
100 > * (using {@link #keySet(Object)} when only keys are of interest, and the
101 > * mapped values are (perhaps transiently) not used or all take the
102 > * same mapping value.
103 > *
104 > * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
105 > * form of histogram or multiset) by using {@link
106 > * java.util.concurrent.atomic.LongAdder} values and initializing via
107 > * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
108 > * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
109 > * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
110 > *
111 > * <p>This class and its views and iterators implement all of the
112 > * <em>optional</em> methods of the {@link Map} and {@link Iterator}
113 > * interfaces.
114 > *
115 > * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
116 > * does <em>not</em> allow {@code null} to be used as a key or value.
117 > *
118 > * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
119 > * operations that, unlike most {@link Stream} methods, are designed
120 > * to be safely, and often sensibly, applied even with maps that are
121 > * being concurrently updated by other threads; for example, when
122 > * computing a snapshot summary of the values in a shared registry.
123 > * There are three kinds of operation, each with four forms, accepting
124 > * functions with keys, values, entries, and (key, value) pairs as
125 > * arguments and/or return values. Because the elements of a
126 > * ConcurrentHashMap are not ordered in any particular way, and may be
127 > * processed in different orders in different parallel executions, the
128 > * correctness of supplied functions should not depend on any
129 > * ordering, or on any other objects or values that may transiently
130 > * change while computation is in progress; and except for forEach
131 > * actions, should ideally be side-effect-free. Bulk operations on
132 > * {@link java.util.Map.Entry} objects do not support method {@code
133 > * setValue}.
134 > *
135 > * <ul>
136 > * <li>forEach: Performs a given action on each element.
137 > * A variant form applies a given transformation on each element
138 > * before performing the action.
139 > *
140 > * <li>search: Returns the first available non-null result of
141 > * applying a given function on each element; skipping further
142 > * search when a result is found.
143 > *
144 > * <li>reduce: Accumulates each element.  The supplied reduction
145 > * function cannot rely on ordering (more formally, it should be
146 > * both associative and commutative).  There are five variants:
147 > *
148 > * <ul>
149 > *
150 > * <li>Plain reductions. (There is not a form of this method for
151 > * (key, value) function arguments since there is no corresponding
152 > * return type.)
153 > *
154 > * <li>Mapped reductions that accumulate the results of a given
155 > * function applied to each element.
156 > *
157 > * <li>Reductions to scalar doubles, longs, and ints, using a
158 > * given basis value.
159 > *
160 > * </ul>
161 > * </ul>
162 > *
163 > * <p>These bulk operations accept a {@code parallelismThreshold}
164 > * argument. Methods proceed sequentially if the current map size is
165 > * estimated to be less than the given threshold. Using a value of
166 > * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
167 > * of {@code 1} results in maximal parallelism by partitioning into
168 > * enough subtasks to fully utilize the {@link
169 > * ForkJoinPool#commonPool()} that is used for all parallel
170 > * computations. Normally, you would initially choose one of these
171 > * extreme values, and then measure performance of using in-between
172 > * values that trade off overhead versus throughput.
173 > *
174 > * <p>The concurrency properties of bulk operations follow
175 > * from those of ConcurrentHashMap: Any non-null result returned
176 > * from {@code get(key)} and related access methods bears a
177 > * happens-before relation with the associated insertion or
178 > * update.  The result of any bulk operation reflects the
179 > * composition of these per-element relations (but is not
180 > * necessarily atomic with respect to the map as a whole unless it
181 > * is somehow known to be quiescent).  Conversely, because keys
182 > * and values in the map are never null, null serves as a reliable
183 > * atomic indicator of the current lack of any result.  To
184 > * maintain this property, null serves as an implicit basis for
185 > * all non-scalar reduction operations. For the double, long, and
186 > * int versions, the basis should be one that, when combined with
187 > * any other value, returns that other value (more formally, it
188 > * should be the identity element for the reduction). Most common
189 > * reductions have these properties; for example, computing a sum
190 > * with basis 0 or a minimum with basis MAX_VALUE.
191 > *
192 > * <p>Search and transformation functions provided as arguments
193 > * should similarly return null to indicate the lack of any result
194 > * (in which case it is not used). In the case of mapped
195 > * reductions, this also enables transformations to serve as
196 > * filters, returning null (or, in the case of primitive
197 > * specializations, the identity basis) if the element should not
198 > * be combined. You can create compound transformations and
199 > * filterings by composing them yourself under this "null means
200 > * there is nothing there now" rule before using them in search or
201 > * reduce operations.
202 > *
203 > * <p>Methods accepting and/or returning Entry arguments maintain
204 > * key-value associations. They may be useful for example when
205 > * finding the key for the greatest value. Note that "plain" Entry
206 > * arguments can be supplied using {@code new
207 > * AbstractMap.SimpleEntry(k,v)}.
208 > *
209 > * <p>Bulk operations may complete abruptly, throwing an
210 > * exception encountered in the application of a supplied
211 > * function. Bear in mind when handling such exceptions that other
212 > * concurrently executing functions could also have thrown
213 > * exceptions, or would have done so if the first exception had
214 > * not occurred.
215 > *
216 > * <p>Speedups for parallel compared to sequential forms are common
217 > * but not guaranteed.  Parallel operations involving brief functions
218 > * on small maps may execute more slowly than sequential forms if the
219 > * underlying work to parallelize the computation is more expensive
220 > * than the computation itself.  Similarly, parallelization may not
221 > * lead to much actual parallelism if all processors are busy
222 > * performing unrelated tasks.
223 > *
224 > * <p>All arguments to all task methods must be non-null.
225   *
226 < * <p> Like Hashtable but unlike java.util.HashMap, this class does
227 < * NOT allow <tt>null</tt> to be used as a key or value.
226 > * <p>This class is a member of the
227 > * <a href="{@docRoot}/../technotes/guides/collections/index.html">
228 > * Java Collections Framework</a>.
229   *
230   * @since 1.5
231   * @author Doug Lea
232 + * @param <K> the type of keys maintained by this map
233 + * @param <V> the type of mapped values
234   */
235 < public class ConcurrentHashMap<K, V> extends AbstractMap<K, V>
236 <        implements ConcurrentMap<K, V>, Cloneable, Serializable {
235 > public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
236 >    implements ConcurrentMap<K,V>, Serializable {
237 >    private static final long serialVersionUID = 7249069246763182397L;
238  
239      /*
240 <     * The basic strategy is to subdivide the table among Segments,
241 <     * each of which itself is a concurrently readable hash table.
240 >     * Overview:
241 >     *
242 >     * The primary design goal of this hash table is to maintain
243 >     * concurrent readability (typically method get(), but also
244 >     * iterators and related methods) while minimizing update
245 >     * contention. Secondary goals are to keep space consumption about
246 >     * the same or better than java.util.HashMap, and to support high
247 >     * initial insertion rates on an empty table by many threads.
248 >     *
249 >     * This map usually acts as a binned (bucketed) hash table.  Each
250 >     * key-value mapping is held in a Node.  Most nodes are instances
251 >     * of the basic Node class with hash, key, value, and next
252 >     * fields. However, various subclasses exist: TreeNodes are
253 >     * arranged in balanced trees, not lists.  TreeBins hold the roots
254 >     * of sets of TreeNodes. ForwardingNodes are placed at the heads
255 >     * of bins during resizing. ReservationNodes are used as
256 >     * placeholders while establishing values in computeIfAbsent and
257 >     * related methods.  The types TreeBin, ForwardingNode, and
258 >     * ReservationNode do not hold normal user keys, values, or
259 >     * hashes, and are readily distinguishable during search etc
260 >     * because they have negative hash fields and null key and value
261 >     * fields. (These special nodes are either uncommon or transient,
262 >     * so the impact of carrying around some unused fields is
263 >     * insignificant.)
264 >     *
265 >     * The table is lazily initialized to a power-of-two size upon the
266 >     * first insertion.  Each bin in the table normally contains a
267 >     * list of Nodes (most often, the list has only zero or one Node).
268 >     * Table accesses require volatile/atomic reads, writes, and
269 >     * CASes.  Because there is no other way to arrange this without
270 >     * adding further indirections, we use intrinsics
271 >     * (sun.misc.Unsafe) operations.
272 >     *
273 >     * We use the top (sign) bit of Node hash fields for control
274 >     * purposes -- it is available anyway because of addressing
275 >     * constraints.  Nodes with negative hash fields are specially
276 >     * handled or ignored in map methods.
277 >     *
278 >     * Insertion (via put or its variants) of the first node in an
279 >     * empty bin is performed by just CASing it to the bin.  This is
280 >     * by far the most common case for put operations under most
281 >     * key/hash distributions.  Other update operations (insert,
282 >     * delete, and replace) require locks.  We do not want to waste
283 >     * the space required to associate a distinct lock object with
284 >     * each bin, so instead use the first node of a bin list itself as
285 >     * a lock. Locking support for these locks relies on builtin
286 >     * "synchronized" monitors.
287 >     *
288 >     * Using the first node of a list as a lock does not by itself
289 >     * suffice though: When a node is locked, any update must first
290 >     * validate that it is still the first node after locking it, and
291 >     * retry if not. Because new nodes are always appended to lists,
292 >     * once a node is first in a bin, it remains first until deleted
293 >     * or the bin becomes invalidated (upon resizing).
294 >     *
295 >     * The main disadvantage of per-bin locks is that other update
296 >     * operations on other nodes in a bin list protected by the same
297 >     * lock can stall, for example when user equals() or mapping
298 >     * functions take a long time.  However, statistically, under
299 >     * random hash codes, this is not a common problem.  Ideally, the
300 >     * frequency of nodes in bins follows a Poisson distribution
301 >     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
302 >     * parameter of about 0.5 on average, given the resizing threshold
303 >     * of 0.75, although with a large variance because of resizing
304 >     * granularity. Ignoring variance, the expected occurrences of
305 >     * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
306 >     * first values are:
307 >     *
308 >     * 0:    0.60653066
309 >     * 1:    0.30326533
310 >     * 2:    0.07581633
311 >     * 3:    0.01263606
312 >     * 4:    0.00157952
313 >     * 5:    0.00015795
314 >     * 6:    0.00001316
315 >     * 7:    0.00000094
316 >     * 8:    0.00000006
317 >     * more: less than 1 in ten million
318 >     *
319 >     * Lock contention probability for two threads accessing distinct
320 >     * elements is roughly 1 / (8 * #elements) under random hashes.
321 >     *
322 >     * Actual hash code distributions encountered in practice
323 >     * sometimes deviate significantly from uniform randomness.  This
324 >     * includes the case when N > (1<<30), so some keys MUST collide.
325 >     * Similarly for dumb or hostile usages in which multiple keys are
326 >     * designed to have identical hash codes or ones that differs only
327 >     * in masked-out high bits. So we use a secondary strategy that
328 >     * applies when the number of nodes in a bin exceeds a
329 >     * threshold. These TreeBins use a balanced tree to hold nodes (a
330 >     * specialized form of red-black trees), bounding search time to
331 >     * O(log N).  Each search step in a TreeBin is at least twice as
332 >     * slow as in a regular list, but given that N cannot exceed
333 >     * (1<<64) (before running out of addresses) this bounds search
334 >     * steps, lock hold times, etc, to reasonable constants (roughly
335 >     * 100 nodes inspected per operation worst case) so long as keys
336 >     * are Comparable (which is very common -- String, Long, etc).
337 >     * TreeBin nodes (TreeNodes) also maintain the same "next"
338 >     * traversal pointers as regular nodes, so can be traversed in
339 >     * iterators in the same way.
340 >     *
341 >     * The table is resized when occupancy exceeds a percentage
342 >     * threshold (nominally, 0.75, but see below).  Any thread
343 >     * noticing an overfull bin may assist in resizing after the
344 >     * initiating thread allocates and sets up the replacement array.
345 >     * However, rather than stalling, these other threads may proceed
346 >     * with insertions etc.  The use of TreeBins shields us from the
347 >     * worst case effects of overfilling while resizes are in
348 >     * progress.  Resizing proceeds by transferring bins, one by one,
349 >     * from the table to the next table. However, threads claim small
350 >     * blocks of indices to transfer (via field transferIndex) before
351 >     * doing so, reducing contention.  A generation stamp in field
352 >     * sizeCtl ensures that resizings do not overlap. Because we are
353 >     * using power-of-two expansion, the elements from each bin must
354 >     * either stay at same index, or move with a power of two
355 >     * offset. We eliminate unnecessary node creation by catching
356 >     * cases where old nodes can be reused because their next fields
357 >     * won't change.  On average, only about one-sixth of them need
358 >     * cloning when a table doubles. The nodes they replace will be
359 >     * garbage collectable as soon as they are no longer referenced by
360 >     * any reader thread that may be in the midst of concurrently
361 >     * traversing table.  Upon transfer, the old table bin contains
362 >     * only a special forwarding node (with hash field "MOVED") that
363 >     * contains the next table as its key. On encountering a
364 >     * forwarding node, access and update operations restart, using
365 >     * the new table.
366 >     *
367 >     * Each bin transfer requires its bin lock, which can stall
368 >     * waiting for locks while resizing. However, because other
369 >     * threads can join in and help resize rather than contend for
370 >     * locks, average aggregate waits become shorter as resizing
371 >     * progresses.  The transfer operation must also ensure that all
372 >     * accessible bins in both the old and new table are usable by any
373 >     * traversal.  This is arranged in part by proceeding from the
374 >     * last bin (table.length - 1) up towards the first.  Upon seeing
375 >     * a forwarding node, traversals (see class Traverser) arrange to
376 >     * move to the new table without revisiting nodes.  To ensure that
377 >     * no intervening nodes are skipped even when moved out of order,
378 >     * a stack (see class TableStack) is created on first encounter of
379 >     * a forwarding node during a traversal, to maintain its place if
380 >     * later processing the current table. The need for these
381 >     * save/restore mechanics is relatively rare, but when one
382 >     * forwarding node is encountered, typically many more will be.
383 >     * So Traversers use a simple caching scheme to avoid creating so
384 >     * many new TableStack nodes. (Thanks to Peter Levart for
385 >     * suggesting use of a stack here.)
386 >     *
387 >     * The traversal scheme also applies to partial traversals of
388 >     * ranges of bins (via an alternate Traverser constructor)
389 >     * to support partitioned aggregate operations.  Also, read-only
390 >     * operations give up if ever forwarded to a null table, which
391 >     * provides support for shutdown-style clearing, which is also not
392 >     * currently implemented.
393 >     *
394 >     * Lazy table initialization minimizes footprint until first use,
395 >     * and also avoids resizings when the first operation is from a
396 >     * putAll, constructor with map argument, or deserialization.
397 >     * These cases attempt to override the initial capacity settings,
398 >     * but harmlessly fail to take effect in cases of races.
399 >     *
400 >     * The element count is maintained using a specialization of
401 >     * LongAdder. We need to incorporate a specialization rather than
402 >     * just use a LongAdder in order to access implicit
403 >     * contention-sensing that leads to creation of multiple
404 >     * CounterCells.  The counter mechanics avoid contention on
405 >     * updates but can encounter cache thrashing if read too
406 >     * frequently during concurrent access. To avoid reading so often,
407 >     * resizing under contention is attempted only upon adding to a
408 >     * bin already holding two or more nodes. Under uniform hash
409 >     * distributions, the probability of this occurring at threshold
410 >     * is around 13%, meaning that only about 1 in 8 puts check
411 >     * threshold (and after resizing, many fewer do so).
412 >     *
413 >     * TreeBins use a special form of comparison for search and
414 >     * related operations (which is the main reason we cannot use
415 >     * existing collections such as TreeMaps). TreeBins contain
416 >     * Comparable elements, but may contain others, as well as
417 >     * elements that are Comparable but not necessarily Comparable for
418 >     * the same T, so we cannot invoke compareTo among them. To handle
419 >     * this, the tree is ordered primarily by hash value, then by
420 >     * Comparable.compareTo order if applicable.  On lookup at a node,
421 >     * if elements are not comparable or compare as 0 then both left
422 >     * and right children may need to be searched in the case of tied
423 >     * hash values. (This corresponds to the full list search that
424 >     * would be necessary if all elements were non-Comparable and had
425 >     * tied hashes.) On insertion, to keep a total ordering (or as
426 >     * close as is required here) across rebalancings, we compare
427 >     * classes and identityHashCodes as tie-breakers. The red-black
428 >     * balancing code is updated from pre-jdk-collections
429 >     * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
430 >     * based in turn on Cormen, Leiserson, and Rivest "Introduction to
431 >     * Algorithms" (CLR).
432 >     *
433 >     * TreeBins also require an additional locking mechanism.  While
434 >     * list traversal is always possible by readers even during
435 >     * updates, tree traversal is not, mainly because of tree-rotations
436 >     * that may change the root node and/or its linkages.  TreeBins
437 >     * include a simple read-write lock mechanism parasitic on the
438 >     * main bin-synchronization strategy: Structural adjustments
439 >     * associated with an insertion or removal are already bin-locked
440 >     * (and so cannot conflict with other writers) but must wait for
441 >     * ongoing readers to finish. Since there can be only one such
442 >     * waiter, we use a simple scheme using a single "waiter" field to
443 >     * block writers.  However, readers need never block.  If the root
444 >     * lock is held, they proceed along the slow traversal path (via
445 >     * next-pointers) until the lock becomes available or the list is
446 >     * exhausted, whichever comes first. These cases are not fast, but
447 >     * maximize aggregate expected throughput.
448 >     *
449 >     * Maintaining API and serialization compatibility with previous
450 >     * versions of this class introduces several oddities. Mainly: We
451 >     * leave untouched but unused constructor arguments referring to
452 >     * concurrencyLevel. We accept a loadFactor constructor argument,
453 >     * but apply it only to initial table capacity (which is the only
454 >     * time that we can guarantee to honor it.) We also declare an
455 >     * unused "Segment" class that is instantiated in minimal form
456 >     * only when serializing.
457 >     *
458 >     * Also, solely for compatibility with previous versions of this
459 >     * class, it extends AbstractMap, even though all of its methods
460 >     * are overridden, so it is just useless baggage.
461 >     *
462 >     * This file is organized to make things a little easier to follow
463 >     * while reading than they might otherwise: First the main static
464 >     * declarations and utilities, then fields, then main public
465 >     * methods (with a few factorings of multiple public methods into
466 >     * internal ones), then sizing methods, trees, traversers, and
467 >     * bulk operations.
468       */
469  
470      /* ---------------- Constants -------------- */
471  
472      /**
473 <     * The default initial number of table slots for this table (32).
474 <     * Used when not otherwise specified in constructor.
473 >     * The largest possible table capacity.  This value must be
474 >     * exactly 1<<30 to stay within Java array allocation and indexing
475 >     * bounds for power of two table sizes, and is further required
476 >     * because the top two bits of 32bit hash fields are used for
477 >     * control purposes.
478 >     */
479 >    private static final int MAXIMUM_CAPACITY = 1 << 30;
480 >
481 >    /**
482 >     * The default initial table capacity.  Must be a power of 2
483 >     * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
484 >     */
485 >    private static final int DEFAULT_CAPACITY = 16;
486 >
487 >    /**
488 >     * The largest possible (non-power of two) array size.
489 >     * Needed by toArray and related methods.
490 >     */
491 >    static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
492 >
493 >    /**
494 >     * The default concurrency level for this table. Unused but
495 >     * defined for compatibility with previous versions of this class.
496 >     */
497 >    private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
498 >
499 >    /**
500 >     * The load factor for this table. Overrides of this value in
501 >     * constructors affect only the initial table capacity.  The
502 >     * actual floating point value isn't normally used -- it is
503 >     * simpler to use expressions such as {@code n - (n >>> 2)} for
504 >     * the associated resizing threshold.
505 >     */
506 >    private static final float LOAD_FACTOR = 0.75f;
507 >
508 >    /**
509 >     * The bin count threshold for using a tree rather than list for a
510 >     * bin.  Bins are converted to trees when adding an element to a
511 >     * bin with at least this many nodes. The value must be greater
512 >     * than 2, and should be at least 8 to mesh with assumptions in
513 >     * tree removal about conversion back to plain bins upon
514 >     * shrinkage.
515 >     */
516 >    static final int TREEIFY_THRESHOLD = 8;
517 >
518 >    /**
519 >     * The bin count threshold for untreeifying a (split) bin during a
520 >     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
521 >     * most 6 to mesh with shrinkage detection under removal.
522 >     */
523 >    static final int UNTREEIFY_THRESHOLD = 6;
524 >
525 >    /**
526 >     * The smallest table capacity for which bins may be treeified.
527 >     * (Otherwise the table is resized if too many nodes in a bin.)
528 >     * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
529 >     * conflicts between resizing and treeification thresholds.
530 >     */
531 >    static final int MIN_TREEIFY_CAPACITY = 64;
532 >
533 >    /**
534 >     * Minimum number of rebinnings per transfer step. Ranges are
535 >     * subdivided to allow multiple resizer threads.  This value
536 >     * serves as a lower bound to avoid resizers encountering
537 >     * excessive memory contention.  The value should be at least
538 >     * DEFAULT_CAPACITY.
539 >     */
540 >    private static final int MIN_TRANSFER_STRIDE = 16;
541 >
542 >    /**
543 >     * The number of bits used for generation stamp in sizeCtl.
544 >     * Must be at least 6 for 32bit arrays.
545 >     */
546 >    private static final int RESIZE_STAMP_BITS = 16;
547 >
548 >    /**
549 >     * The maximum number of threads that can help resize.
550 >     * Must fit in 32 - RESIZE_STAMP_BITS bits.
551 >     */
552 >    private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
553 >
554 >    /**
555 >     * The bit shift for recording size stamp in sizeCtl.
556 >     */
557 >    private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
558 >
559 >    /*
560 >     * Encodings for Node hash fields. See above for explanation.
561 >     */
562 >    static final int MOVED     = -1; // hash for forwarding nodes
563 >    static final int TREEBIN   = -2; // hash for roots of trees
564 >    static final int RESERVED  = -3; // hash for transient reservations
565 >    static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
566 >
567 >    /** Number of CPUS, to place bounds on some sizings */
568 >    static final int NCPU = Runtime.getRuntime().availableProcessors();
569 >
570 >    /** For serialization compatibility. */
571 >    private static final ObjectStreamField[] serialPersistentFields = {
572 >        new ObjectStreamField("segments", Segment[].class),
573 >        new ObjectStreamField("segmentMask", Integer.TYPE),
574 >        new ObjectStreamField("segmentShift", Integer.TYPE),
575 >    };
576 >
577 >    /* ---------------- Nodes -------------- */
578 >
579 >    /**
580 >     * Key-value entry.  This class is never exported out as a
581 >     * user-mutable Map.Entry (i.e., one supporting setValue; see
582 >     * MapEntry below), but can be used for read-only traversals used
583 >     * in bulk tasks.  Subclasses of Node with a negative hash field
584 >     * are special, and contain null keys and values (but are never
585 >     * exported).  Otherwise, keys and vals are never null.
586 >     */
587 >    static class Node<K,V> implements Map.Entry<K,V> {
588 >        final int hash;
589 >        final K key;
590 >        volatile V val;
591 >        volatile Node<K,V> next;
592 >
593 >        Node(int hash, K key, V val, Node<K,V> next) {
594 >            this.hash = hash;
595 >            this.key = key;
596 >            this.val = val;
597 >            this.next = next;
598 >        }
599 >
600 >        public final K getKey()     { return key; }
601 >        public final V getValue()   { return val; }
602 >        public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
603 >        public final String toString() {
604 >            return Helpers.mapEntryToString(key, val);
605 >        }
606 >        public final V setValue(V value) {
607 >            throw new UnsupportedOperationException();
608 >        }
609 >
610 >        public final boolean equals(Object o) {
611 >            Object k, v, u; Map.Entry<?,?> e;
612 >            return ((o instanceof Map.Entry) &&
613 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
614 >                    (v = e.getValue()) != null &&
615 >                    (k == key || k.equals(key)) &&
616 >                    (v == (u = val) || v.equals(u)));
617 >        }
618 >
619 >        /**
620 >         * Virtualized support for map.get(); overridden in subclasses.
621 >         */
622 >        Node<K,V> find(int h, Object k) {
623 >            Node<K,V> e = this;
624 >            if (k != null) {
625 >                do {
626 >                    K ek;
627 >                    if (e.hash == h &&
628 >                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
629 >                        return e;
630 >                } while ((e = e.next) != null);
631 >            }
632 >            return null;
633 >        }
634 >    }
635 >
636 >    /* ---------------- Static utilities -------------- */
637 >
638 >    /**
639 >     * Spreads (XORs) higher bits of hash to lower and also forces top
640 >     * bit to 0. Because the table uses power-of-two masking, sets of
641 >     * hashes that vary only in bits above the current mask will
642 >     * always collide. (Among known examples are sets of Float keys
643 >     * holding consecutive whole numbers in small tables.)  So we
644 >     * apply a transform that spreads the impact of higher bits
645 >     * downward. There is a tradeoff between speed, utility, and
646 >     * quality of bit-spreading. Because many common sets of hashes
647 >     * are already reasonably distributed (so don't benefit from
648 >     * spreading), and because we use trees to handle large sets of
649 >     * collisions in bins, we just XOR some shifted bits in the
650 >     * cheapest possible way to reduce systematic lossage, as well as
651 >     * to incorporate impact of the highest bits that would otherwise
652 >     * never be used in index calculations because of table bounds.
653       */
654 <    private static int DEFAULT_INITIAL_CAPACITY = 16;
654 >    static final int spread(int h) {
655 >        return (h ^ (h >>> 16)) & HASH_BITS;
656 >    }
657  
658      /**
659 <     * The maximum capacity, used if a higher value is implicitly
660 <     * specified by either of the constructors with arguments.  MUST
75 <     * be a power of two <= 1<<30.
659 >     * Returns a power of two table size for the given desired capacity.
660 >     * See Hackers Delight, sec 3.2
661       */
662 <    static final int MAXIMUM_CAPACITY = 1 << 30;
662 >    private static final int tableSizeFor(int c) {
663 >        int n = c - 1;
664 >        n |= n >>> 1;
665 >        n |= n >>> 2;
666 >        n |= n >>> 4;
667 >        n |= n >>> 8;
668 >        n |= n >>> 16;
669 >        return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
670 >    }
671  
672      /**
673 <     * The default load factor for this table.  Used when not
674 <     * otherwise specified in constructor.
673 >     * Returns x's Class if it is of the form "class C implements
674 >     * Comparable<C>", else null.
675       */
676 <    static final float DEFAULT_LOAD_FACTOR = 0.75f;
676 >    static Class<?> comparableClassFor(Object x) {
677 >        if (x instanceof Comparable) {
678 >            Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
679 >            if ((c = x.getClass()) == String.class) // bypass checks
680 >                return c;
681 >            if ((ts = c.getGenericInterfaces()) != null) {
682 >                for (int i = 0; i < ts.length; ++i) {
683 >                    if (((t = ts[i]) instanceof ParameterizedType) &&
684 >                        ((p = (ParameterizedType)t).getRawType() ==
685 >                         Comparable.class) &&
686 >                        (as = p.getActualTypeArguments()) != null &&
687 >                        as.length == 1 && as[0] == c) // type arg is c
688 >                        return c;
689 >                }
690 >            }
691 >        }
692 >        return null;
693 >    }
694  
695      /**
696 <     * The default number of concurrency control segments.
697 <     **/
698 <    private static final int DEFAULT_SEGMENTS = 16;
696 >     * Returns k.compareTo(x) if x matches kc (k's screened comparable
697 >     * class), else 0.
698 >     */
699 >    @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
700 >    static int compareComparables(Class<?> kc, Object k, Object x) {
701 >        return (x == null || x.getClass() != kc ? 0 :
702 >                ((Comparable)k).compareTo(x));
703 >    }
704 >
705 >    /* ---------------- Table element access -------------- */
706 >
707 >    /*
708 >     * Volatile access methods are used for table elements as well as
709 >     * elements of in-progress next table while resizing.  All uses of
710 >     * the tab arguments must be null checked by callers.  All callers
711 >     * also paranoically precheck that tab's length is not zero (or an
712 >     * equivalent check), thus ensuring that any index argument taking
713 >     * the form of a hash value anded with (length - 1) is a valid
714 >     * index.  Note that, to be correct wrt arbitrary concurrency
715 >     * errors by users, these checks must operate on local variables,
716 >     * which accounts for some odd-looking inline assignments below.
717 >     * Note that calls to setTabAt always occur within locked regions,
718 >     * and so in principle require only release ordering, not
719 >     * full volatile semantics, but are currently coded as volatile
720 >     * writes to be conservative.
721 >     */
722 >
723 >    @SuppressWarnings("unchecked")
724 >    static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
725 >        return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
726 >    }
727 >
728 >    static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
729 >                                        Node<K,V> c, Node<K,V> v) {
730 >        return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
731 >    }
732 >
733 >    static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
734 >        U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
735 >    }
736  
737      /* ---------------- Fields -------------- */
738  
739      /**
740 <     * Mask value for indexing into segments. The upper bits of a
741 <     * key's hash code are used to choose the segment.
742 <     **/
743 <    private final int segmentMask;
740 >     * The array of bins. Lazily initialized upon first insertion.
741 >     * Size is always a power of two. Accessed directly by iterators.
742 >     */
743 >    transient volatile Node<K,V>[] table;
744 >
745 >    /**
746 >     * The next table to use; non-null only while resizing.
747 >     */
748 >    private transient volatile Node<K,V>[] nextTable;
749 >
750 >    /**
751 >     * Base counter value, used mainly when there is no contention,
752 >     * but also as a fallback during table initialization
753 >     * races. Updated via CAS.
754 >     */
755 >    private transient volatile long baseCount;
756 >
757 >    /**
758 >     * Table initialization and resizing control.  When negative, the
759 >     * table is being initialized or resized: -1 for initialization,
760 >     * else -(1 + the number of active resizing threads).  Otherwise,
761 >     * when table is null, holds the initial table size to use upon
762 >     * creation, or 0 for default. After initialization, holds the
763 >     * next element count value upon which to resize the table.
764 >     */
765 >    private transient volatile int sizeCtl;
766 >
767 >    /**
768 >     * The next table index (plus one) to split while resizing.
769 >     */
770 >    private transient volatile int transferIndex;
771 >
772 >    /**
773 >     * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
774 >     */
775 >    private transient volatile int cellsBusy;
776 >
777 >    /**
778 >     * Table of counter cells. When non-null, size is a power of 2.
779 >     */
780 >    private transient volatile CounterCell[] counterCells;
781 >
782 >    // views
783 >    private transient KeySetView<K,V> keySet;
784 >    private transient ValuesView<K,V> values;
785 >    private transient EntrySetView<K,V> entrySet;
786 >
787 >
788 >    /* ---------------- Public operations -------------- */
789 >
790 >    /**
791 >     * Creates a new, empty map with the default initial table size (16).
792 >     */
793 >    public ConcurrentHashMap() {
794 >    }
795 >
796 >    /**
797 >     * Creates a new, empty map with an initial table size
798 >     * accommodating the specified number of elements without the need
799 >     * to dynamically resize.
800 >     *
801 >     * @param initialCapacity The implementation performs internal
802 >     * sizing to accommodate this many elements.
803 >     * @throws IllegalArgumentException if the initial capacity of
804 >     * elements is negative
805 >     */
806 >    public ConcurrentHashMap(int initialCapacity) {
807 >        if (initialCapacity < 0)
808 >            throw new IllegalArgumentException();
809 >        int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
810 >                   MAXIMUM_CAPACITY :
811 >                   tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
812 >        this.sizeCtl = cap;
813 >    }
814 >
815 >    /**
816 >     * Creates a new map with the same mappings as the given map.
817 >     *
818 >     * @param m the map
819 >     */
820 >    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
821 >        this.sizeCtl = DEFAULT_CAPACITY;
822 >        putAll(m);
823 >    }
824  
825      /**
826 <     * Shift value for indexing within segments.
827 <     **/
828 <    private final int segmentShift;
826 >     * Creates a new, empty map with an initial table size based on
827 >     * the given number of elements ({@code initialCapacity}) and
828 >     * initial table density ({@code loadFactor}).
829 >     *
830 >     * @param initialCapacity the initial capacity. The implementation
831 >     * performs internal sizing to accommodate this many elements,
832 >     * given the specified load factor.
833 >     * @param loadFactor the load factor (table density) for
834 >     * establishing the initial table size
835 >     * @throws IllegalArgumentException if the initial capacity of
836 >     * elements is negative or the load factor is nonpositive
837 >     *
838 >     * @since 1.6
839 >     */
840 >    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
841 >        this(initialCapacity, loadFactor, 1);
842 >    }
843  
844      /**
845 <     * The segments, each of which is a specialized hash table
845 >     * Creates a new, empty map with an initial table size based on
846 >     * the given number of elements ({@code initialCapacity}), table
847 >     * density ({@code loadFactor}), and number of concurrently
848 >     * updating threads ({@code concurrencyLevel}).
849 >     *
850 >     * @param initialCapacity the initial capacity. The implementation
851 >     * performs internal sizing to accommodate this many elements,
852 >     * given the specified load factor.
853 >     * @param loadFactor the load factor (table density) for
854 >     * establishing the initial table size
855 >     * @param concurrencyLevel the estimated number of concurrently
856 >     * updating threads. The implementation may use this value as
857 >     * a sizing hint.
858 >     * @throws IllegalArgumentException if the initial capacity is
859 >     * negative or the load factor or concurrencyLevel are
860 >     * nonpositive
861       */
862 <    private final Segment[] segments;
862 >    public ConcurrentHashMap(int initialCapacity,
863 >                             float loadFactor, int concurrencyLevel) {
864 >        if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
865 >            throw new IllegalArgumentException();
866 >        if (initialCapacity < concurrencyLevel)   // Use at least as many bins
867 >            initialCapacity = concurrencyLevel;   // as estimated threads
868 >        long size = (long)(1.0 + (long)initialCapacity / loadFactor);
869 >        int cap = (size >= (long)MAXIMUM_CAPACITY) ?
870 >            MAXIMUM_CAPACITY : tableSizeFor((int)size);
871 >        this.sizeCtl = cap;
872 >    }
873  
874 <    private transient Set<K> keySet;
109 <    private transient Set<Map.Entry<K,V>> entrySet;
110 <    private transient Collection<V> values;
874 >    // Original (since JDK1.2) Map methods
875  
876 <    /* ---------------- Small Utilities -------------- */
876 >    /**
877 >     * {@inheritDoc}
878 >     */
879 >    public int size() {
880 >        long n = sumCount();
881 >        return ((n < 0L) ? 0 :
882 >                (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
883 >                (int)n);
884 >    }
885  
886      /**
887 <     * Return a hash code for non-null Object x.
116 <     * Uses the same hash code spreader as most other j.u hash tables.
117 <     * @param x the object serving as a key
118 <     * @return the hash code
887 >     * {@inheritDoc}
888       */
889 <    private static int hash(Object x) {
890 <        int h = x.hashCode();
891 <        h += ~(h << 9);
892 <        h ^=  (h >>> 14);
893 <        h +=  (h << 4);
894 <        h ^=  (h >>> 10);
889 >    public boolean isEmpty() {
890 >        return sumCount() <= 0L; // ignore transient negative values
891 >    }
892 >
893 >    /**
894 >     * Returns the value to which the specified key is mapped,
895 >     * or {@code null} if this map contains no mapping for the key.
896 >     *
897 >     * <p>More formally, if this map contains a mapping from a key
898 >     * {@code k} to a value {@code v} such that {@code key.equals(k)},
899 >     * then this method returns {@code v}; otherwise it returns
900 >     * {@code null}.  (There can be at most one such mapping.)
901 >     *
902 >     * @throws NullPointerException if the specified key is null
903 >     */
904 >    public V get(Object key) {
905 >        Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
906 >        int h = spread(key.hashCode());
907 >        if ((tab = table) != null && (n = tab.length) > 0 &&
908 >            (e = tabAt(tab, (n - 1) & h)) != null) {
909 >            if ((eh = e.hash) == h) {
910 >                if ((ek = e.key) == key || (ek != null && key.equals(ek)))
911 >                    return e.val;
912 >            }
913 >            else if (eh < 0)
914 >                return (p = e.find(h, key)) != null ? p.val : null;
915 >            while ((e = e.next) != null) {
916 >                if (e.hash == h &&
917 >                    ((ek = e.key) == key || (ek != null && key.equals(ek))))
918 >                    return e.val;
919 >            }
920 >        }
921 >        return null;
922 >    }
923 >
924 >    /**
925 >     * Tests if the specified object is a key in this table.
926 >     *
927 >     * @param  key possible key
928 >     * @return {@code true} if and only if the specified object
929 >     *         is a key in this table, as determined by the
930 >     *         {@code equals} method; {@code false} otherwise
931 >     * @throws NullPointerException if the specified key is null
932 >     */
933 >    public boolean containsKey(Object key) {
934 >        return get(key) != null;
935 >    }
936 >
937 >    /**
938 >     * Returns {@code true} if this map maps one or more keys to the
939 >     * specified value. Note: This method may require a full traversal
940 >     * of the map, and is much slower than method {@code containsKey}.
941 >     *
942 >     * @param value value whose presence in this map is to be tested
943 >     * @return {@code true} if this map maps one or more keys to the
944 >     *         specified value
945 >     * @throws NullPointerException if the specified value is null
946 >     */
947 >    public boolean containsValue(Object value) {
948 >        if (value == null)
949 >            throw new NullPointerException();
950 >        Node<K,V>[] t;
951 >        if ((t = table) != null) {
952 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
953 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
954 >                V v;
955 >                if ((v = p.val) == value || (v != null && value.equals(v)))
956 >                    return true;
957 >            }
958 >        }
959 >        return false;
960 >    }
961 >
962 >    /**
963 >     * Maps the specified key to the specified value in this table.
964 >     * Neither the key nor the value can be null.
965 >     *
966 >     * <p>The value can be retrieved by calling the {@code get} method
967 >     * with a key that is equal to the original key.
968 >     *
969 >     * @param key key with which the specified value is to be associated
970 >     * @param value value to be associated with the specified key
971 >     * @return the previous value associated with {@code key}, or
972 >     *         {@code null} if there was no mapping for {@code key}
973 >     * @throws NullPointerException if the specified key or value is null
974 >     */
975 >    public V put(K key, V value) {
976 >        return putVal(key, value, false);
977 >    }
978 >
979 >    /** Implementation for put and putIfAbsent */
980 >    final V putVal(K key, V value, boolean onlyIfAbsent) {
981 >        if (key == null || value == null) throw new NullPointerException();
982 >        int hash = spread(key.hashCode());
983 >        int binCount = 0;
984 >        for (Node<K,V>[] tab = table;;) {
985 >            Node<K,V> f; int n, i, fh;
986 >            if (tab == null || (n = tab.length) == 0)
987 >                tab = initTable();
988 >            else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
989 >                if (casTabAt(tab, i, null,
990 >                             new Node<K,V>(hash, key, value, null)))
991 >                    break;                   // no lock when adding to empty bin
992 >            }
993 >            else if ((fh = f.hash) == MOVED)
994 >                tab = helpTransfer(tab, f);
995 >            else {
996 >                V oldVal = null;
997 >                synchronized (f) {
998 >                    if (tabAt(tab, i) == f) {
999 >                        if (fh >= 0) {
1000 >                            binCount = 1;
1001 >                            for (Node<K,V> e = f;; ++binCount) {
1002 >                                K ek;
1003 >                                if (e.hash == hash &&
1004 >                                    ((ek = e.key) == key ||
1005 >                                     (ek != null && key.equals(ek)))) {
1006 >                                    oldVal = e.val;
1007 >                                    if (!onlyIfAbsent)
1008 >                                        e.val = value;
1009 >                                    break;
1010 >                                }
1011 >                                Node<K,V> pred = e;
1012 >                                if ((e = e.next) == null) {
1013 >                                    pred.next = new Node<K,V>(hash, key,
1014 >                                                              value, null);
1015 >                                    break;
1016 >                                }
1017 >                            }
1018 >                        }
1019 >                        else if (f instanceof TreeBin) {
1020 >                            Node<K,V> p;
1021 >                            binCount = 2;
1022 >                            if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1023 >                                                           value)) != null) {
1024 >                                oldVal = p.val;
1025 >                                if (!onlyIfAbsent)
1026 >                                    p.val = value;
1027 >                            }
1028 >                        }
1029 >                        else if (f instanceof ReservationNode)
1030 >                            throw new IllegalStateException("Recursive update");
1031 >                    }
1032 >                }
1033 >                if (binCount != 0) {
1034 >                    if (binCount >= TREEIFY_THRESHOLD)
1035 >                        treeifyBin(tab, i);
1036 >                    if (oldVal != null)
1037 >                        return oldVal;
1038 >                    break;
1039 >                }
1040 >            }
1041 >        }
1042 >        addCount(1L, binCount);
1043 >        return null;
1044 >    }
1045 >
1046 >    /**
1047 >     * Copies all of the mappings from the specified map to this one.
1048 >     * These mappings replace any mappings that this map had for any of the
1049 >     * keys currently in the specified map.
1050 >     *
1051 >     * @param m mappings to be stored in this map
1052 >     */
1053 >    public void putAll(Map<? extends K, ? extends V> m) {
1054 >        tryPresize(m.size());
1055 >        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1056 >            putVal(e.getKey(), e.getValue(), false);
1057 >    }
1058 >
1059 >    /**
1060 >     * Removes the key (and its corresponding value) from this map.
1061 >     * This method does nothing if the key is not in the map.
1062 >     *
1063 >     * @param  key the key that needs to be removed
1064 >     * @return the previous value associated with {@code key}, or
1065 >     *         {@code null} if there was no mapping for {@code key}
1066 >     * @throws NullPointerException if the specified key is null
1067 >     */
1068 >    public V remove(Object key) {
1069 >        return replaceNode(key, null, null);
1070 >    }
1071 >
1072 >    /**
1073 >     * Implementation for the four public remove/replace methods:
1074 >     * Replaces node value with v, conditional upon match of cv if
1075 >     * non-null.  If resulting value is null, delete.
1076 >     */
1077 >    final V replaceNode(Object key, V value, Object cv) {
1078 >        int hash = spread(key.hashCode());
1079 >        for (Node<K,V>[] tab = table;;) {
1080 >            Node<K,V> f; int n, i, fh;
1081 >            if (tab == null || (n = tab.length) == 0 ||
1082 >                (f = tabAt(tab, i = (n - 1) & hash)) == null)
1083 >                break;
1084 >            else if ((fh = f.hash) == MOVED)
1085 >                tab = helpTransfer(tab, f);
1086 >            else {
1087 >                V oldVal = null;
1088 >                boolean validated = false;
1089 >                synchronized (f) {
1090 >                    if (tabAt(tab, i) == f) {
1091 >                        if (fh >= 0) {
1092 >                            validated = true;
1093 >                            for (Node<K,V> e = f, pred = null;;) {
1094 >                                K ek;
1095 >                                if (e.hash == hash &&
1096 >                                    ((ek = e.key) == key ||
1097 >                                     (ek != null && key.equals(ek)))) {
1098 >                                    V ev = e.val;
1099 >                                    if (cv == null || cv == ev ||
1100 >                                        (ev != null && cv.equals(ev))) {
1101 >                                        oldVal = ev;
1102 >                                        if (value != null)
1103 >                                            e.val = value;
1104 >                                        else if (pred != null)
1105 >                                            pred.next = e.next;
1106 >                                        else
1107 >                                            setTabAt(tab, i, e.next);
1108 >                                    }
1109 >                                    break;
1110 >                                }
1111 >                                pred = e;
1112 >                                if ((e = e.next) == null)
1113 >                                    break;
1114 >                            }
1115 >                        }
1116 >                        else if (f instanceof TreeBin) {
1117 >                            validated = true;
1118 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1119 >                            TreeNode<K,V> r, p;
1120 >                            if ((r = t.root) != null &&
1121 >                                (p = r.findTreeNode(hash, key, null)) != null) {
1122 >                                V pv = p.val;
1123 >                                if (cv == null || cv == pv ||
1124 >                                    (pv != null && cv.equals(pv))) {
1125 >                                    oldVal = pv;
1126 >                                    if (value != null)
1127 >                                        p.val = value;
1128 >                                    else if (t.removeTreeNode(p))
1129 >                                        setTabAt(tab, i, untreeify(t.first));
1130 >                                }
1131 >                            }
1132 >                        }
1133 >                        else if (f instanceof ReservationNode)
1134 >                            throw new IllegalStateException("Recursive update");
1135 >                    }
1136 >                }
1137 >                if (validated) {
1138 >                    if (oldVal != null) {
1139 >                        if (value == null)
1140 >                            addCount(-1L, -1);
1141 >                        return oldVal;
1142 >                    }
1143 >                    break;
1144 >                }
1145 >            }
1146 >        }
1147 >        return null;
1148 >    }
1149 >
1150 >    /**
1151 >     * Removes all of the mappings from this map.
1152 >     */
1153 >    public void clear() {
1154 >        long delta = 0L; // negative number of deletions
1155 >        int i = 0;
1156 >        Node<K,V>[] tab = table;
1157 >        while (tab != null && i < tab.length) {
1158 >            int fh;
1159 >            Node<K,V> f = tabAt(tab, i);
1160 >            if (f == null)
1161 >                ++i;
1162 >            else if ((fh = f.hash) == MOVED) {
1163 >                tab = helpTransfer(tab, f);
1164 >                i = 0; // restart
1165 >            }
1166 >            else {
1167 >                synchronized (f) {
1168 >                    if (tabAt(tab, i) == f) {
1169 >                        Node<K,V> p = (fh >= 0 ? f :
1170 >                                       (f instanceof TreeBin) ?
1171 >                                       ((TreeBin<K,V>)f).first : null);
1172 >                        while (p != null) {
1173 >                            --delta;
1174 >                            p = p.next;
1175 >                        }
1176 >                        setTabAt(tab, i++, null);
1177 >                    }
1178 >                }
1179 >            }
1180 >        }
1181 >        if (delta != 0L)
1182 >            addCount(delta, -1);
1183 >    }
1184 >
1185 >    /**
1186 >     * Returns a {@link Set} view of the keys contained in this map.
1187 >     * The set is backed by the map, so changes to the map are
1188 >     * reflected in the set, and vice-versa. The set supports element
1189 >     * removal, which removes the corresponding mapping from this map,
1190 >     * via the {@code Iterator.remove}, {@code Set.remove},
1191 >     * {@code removeAll}, {@code retainAll}, and {@code clear}
1192 >     * operations.  It does not support the {@code add} or
1193 >     * {@code addAll} operations.
1194 >     *
1195 >     * <p>The view's iterators and spliterators are
1196 >     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1197 >     *
1198 >     * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1199 >     * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1200 >     *
1201 >     * @return the set view
1202 >     */
1203 >    public KeySetView<K,V> keySet() {
1204 >        KeySetView<K,V> ks;
1205 >        return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1206 >    }
1207 >
1208 >    /**
1209 >     * Returns a {@link Collection} view of the values contained in this map.
1210 >     * The collection is backed by the map, so changes to the map are
1211 >     * reflected in the collection, and vice-versa.  The collection
1212 >     * supports element removal, which removes the corresponding
1213 >     * mapping from this map, via the {@code Iterator.remove},
1214 >     * {@code Collection.remove}, {@code removeAll},
1215 >     * {@code retainAll}, and {@code clear} operations.  It does not
1216 >     * support the {@code add} or {@code addAll} operations.
1217 >     *
1218 >     * <p>The view's iterators and spliterators are
1219 >     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1220 >     *
1221 >     * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1222 >     * and {@link Spliterator#NONNULL}.
1223 >     *
1224 >     * @return the collection view
1225 >     */
1226 >    public Collection<V> values() {
1227 >        ValuesView<K,V> vs;
1228 >        return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1229 >    }
1230 >
1231 >    /**
1232 >     * Returns a {@link Set} view of the mappings contained in this map.
1233 >     * The set is backed by the map, so changes to the map are
1234 >     * reflected in the set, and vice-versa.  The set supports element
1235 >     * removal, which removes the corresponding mapping from the map,
1236 >     * via the {@code Iterator.remove}, {@code Set.remove},
1237 >     * {@code removeAll}, {@code retainAll}, and {@code clear}
1238 >     * operations.
1239 >     *
1240 >     * <p>The view's iterators and spliterators are
1241 >     * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1242 >     *
1243 >     * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1244 >     * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1245 >     *
1246 >     * @return the set view
1247 >     */
1248 >    public Set<Map.Entry<K,V>> entrySet() {
1249 >        EntrySetView<K,V> es;
1250 >        return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1251 >    }
1252 >
1253 >    /**
1254 >     * Returns the hash code value for this {@link Map}, i.e.,
1255 >     * the sum of, for each key-value pair in the map,
1256 >     * {@code key.hashCode() ^ value.hashCode()}.
1257 >     *
1258 >     * @return the hash code value for this map
1259 >     */
1260 >    public int hashCode() {
1261 >        int h = 0;
1262 >        Node<K,V>[] t;
1263 >        if ((t = table) != null) {
1264 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1265 >            for (Node<K,V> p; (p = it.advance()) != null; )
1266 >                h += p.key.hashCode() ^ p.val.hashCode();
1267 >        }
1268          return h;
1269      }
1270  
1271      /**
1272 <     * Return the segment that should be used for key with given hash
1272 >     * Returns a string representation of this map.  The string
1273 >     * representation consists of a list of key-value mappings (in no
1274 >     * particular order) enclosed in braces ("{@code {}}").  Adjacent
1275 >     * mappings are separated by the characters {@code ", "} (comma
1276 >     * and space).  Each key-value mapping is rendered as the key
1277 >     * followed by an equals sign ("{@code =}") followed by the
1278 >     * associated value.
1279 >     *
1280 >     * @return a string representation of this map
1281       */
1282 <    private Segment<K,V> segmentFor(int hash) {
1283 <        return (Segment<K,V>) segments[(hash >>> segmentShift) & segmentMask];
1282 >    public String toString() {
1283 >        Node<K,V>[] t;
1284 >        int f = (t = table) == null ? 0 : t.length;
1285 >        Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1286 >        StringBuilder sb = new StringBuilder();
1287 >        sb.append('{');
1288 >        Node<K,V> p;
1289 >        if ((p = it.advance()) != null) {
1290 >            for (;;) {
1291 >                K k = p.key;
1292 >                V v = p.val;
1293 >                sb.append(k == this ? "(this Map)" : k);
1294 >                sb.append('=');
1295 >                sb.append(v == this ? "(this Map)" : v);
1296 >                if ((p = it.advance()) == null)
1297 >                    break;
1298 >                sb.append(',').append(' ');
1299 >            }
1300 >        }
1301 >        return sb.append('}').toString();
1302      }
1303  
1304 <    /* ---------------- Inner Classes -------------- */
1304 >    /**
1305 >     * Compares the specified object with this map for equality.
1306 >     * Returns {@code true} if the given object is a map with the same
1307 >     * mappings as this map.  This operation may return misleading
1308 >     * results if either map is concurrently modified during execution
1309 >     * of this method.
1310 >     *
1311 >     * @param o object to be compared for equality with this map
1312 >     * @return {@code true} if the specified object is equal to this map
1313 >     */
1314 >    public boolean equals(Object o) {
1315 >        if (o != this) {
1316 >            if (!(o instanceof Map))
1317 >                return false;
1318 >            Map<?,?> m = (Map<?,?>) o;
1319 >            Node<K,V>[] t;
1320 >            int f = (t = table) == null ? 0 : t.length;
1321 >            Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1322 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1323 >                V val = p.val;
1324 >                Object v = m.get(p.key);
1325 >                if (v == null || (v != val && !v.equals(val)))
1326 >                    return false;
1327 >            }
1328 >            for (Map.Entry<?,?> e : m.entrySet()) {
1329 >                Object mk, mv, v;
1330 >                if ((mk = e.getKey()) == null ||
1331 >                    (mv = e.getValue()) == null ||
1332 >                    (v = get(mk)) == null ||
1333 >                    (mv != v && !mv.equals(v)))
1334 >                    return false;
1335 >            }
1336 >        }
1337 >        return true;
1338 >    }
1339  
1340      /**
1341 <     * Segments are specialized versions of hash tables.  This
1342 <     * subclasses from ReentrantLock opportunistically, just to
1343 <     * simplify some locking and avoid separate construction.
1344 <     **/
1345 <    private static final class Segment<K,V> extends ReentrantLock implements Serializable {
1341 >     * Stripped-down version of helper class used in previous version,
1342 >     * declared for the sake of serialization compatibility.
1343 >     */
1344 >    static class Segment<K,V> extends ReentrantLock implements Serializable {
1345 >        private static final long serialVersionUID = 2249069246763182397L;
1346 >        final float loadFactor;
1347 >        Segment(float lf) { this.loadFactor = lf; }
1348 >    }
1349 >
1350 >    /**
1351 >     * Saves the state of the {@code ConcurrentHashMap} instance to a
1352 >     * stream (i.e., serializes it).
1353 >     * @param s the stream
1354 >     * @throws java.io.IOException if an I/O error occurs
1355 >     * @serialData
1356 >     * the key (Object) and value (Object)
1357 >     * for each key-value mapping, followed by a null pair.
1358 >     * The key-value mappings are emitted in no particular order.
1359 >     */
1360 >    private void writeObject(java.io.ObjectOutputStream s)
1361 >        throws java.io.IOException {
1362 >        // For serialization compatibility
1363 >        // Emulate segment calculation from previous version of this class
1364 >        int sshift = 0;
1365 >        int ssize = 1;
1366 >        while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1367 >            ++sshift;
1368 >            ssize <<= 1;
1369 >        }
1370 >        int segmentShift = 32 - sshift;
1371 >        int segmentMask = ssize - 1;
1372 >        @SuppressWarnings("unchecked")
1373 >        Segment<K,V>[] segments = (Segment<K,V>[])
1374 >            new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1375 >        for (int i = 0; i < segments.length; ++i)
1376 >            segments[i] = new Segment<K,V>(LOAD_FACTOR);
1377 >        java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1378 >        streamFields.put("segments", segments);
1379 >        streamFields.put("segmentShift", segmentShift);
1380 >        streamFields.put("segmentMask", segmentMask);
1381 >        s.writeFields();
1382 >
1383 >        Node<K,V>[] t;
1384 >        if ((t = table) != null) {
1385 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1386 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1387 >                s.writeObject(p.key);
1388 >                s.writeObject(p.val);
1389 >            }
1390 >        }
1391 >        s.writeObject(null);
1392 >        s.writeObject(null);
1393 >        segments = null; // throw away
1394 >    }
1395 >
1396 >    /**
1397 >     * Reconstitutes the instance from a stream (that is, deserializes it).
1398 >     * @param s the stream
1399 >     * @throws ClassNotFoundException if the class of a serialized object
1400 >     *         could not be found
1401 >     * @throws java.io.IOException if an I/O error occurs
1402 >     */
1403 >    private void readObject(java.io.ObjectInputStream s)
1404 >        throws java.io.IOException, ClassNotFoundException {
1405          /*
1406 <         * Segments maintain a table of entry lists that are ALWAYS
1407 <         * kept in a consistent state, so can be read without locking.
1408 <         * Next fields of nodes are immutable (final).  All list
1409 <         * additions are performed at the front of each bin. This
1410 <         * makes it easy to check changes, and also fast to traverse.
150 <         * When nodes would otherwise be changed, new nodes are
151 <         * created to replace them. This works well for hash tables
152 <         * since the bin lists tend to be short. (The average length
153 <         * is less than two for the default load factor threshold.)
154 <         *
155 <         * Read operations can thus proceed without locking, but rely
156 <         * on a memory barrier to ensure that completed write
157 <         * operations performed by other threads are
158 <         * noticed. Conveniently, the "count" field, tracking the
159 <         * number of elements, can also serve as the volatile variable
160 <         * providing proper read/write barriers. This is convenient
161 <         * because this field needs to be read in many read operations
162 <         * anyway. The use of volatiles for this purpose is only
163 <         * guaranteed to work in accord with reuirements in
164 <         * multithreaded environments when run on JVMs conforming to
165 <         * the clarified JSR133 memory model specification.  This true
166 <         * for hotspot as of release 1.4.
167 <         *
168 <         * Implementors note. The basic rules for all this are:
169 <         *
170 <         *   - All unsynchronized read operations must first read the
171 <         *     "count" field, and should not look at table entries if
172 <         *     it is 0.
173 <         *
174 <         *   - All synchronized write operations should write to
175 <         *     the "count" field after updating. The operations must not
176 <         *     take any action that could even momentarily cause
177 <         *     a concurrent read operation to see inconsistent
178 <         *     data. This is made easier by the nature of the read
179 <         *     operations in Map. For example, no operation
180 <         *     can reveal that the table has grown but the threshold
181 <         *     has not yet been updated, so there are no atomicity
182 <         *     requirements for this with respect to reads.
183 <         *
184 <         * As a guide, all critical volatile reads and writes are marked
185 <         * in code comments.
1406 >         * To improve performance in typical cases, we create nodes
1407 >         * while reading, then place in table once size is known.
1408 >         * However, we must also validate uniqueness and deal with
1409 >         * overpopulated bins while doing so, which requires
1410 >         * specialized versions of putVal mechanics.
1411           */
1412 +        sizeCtl = -1; // force exclusion for table construction
1413 +        s.defaultReadObject();
1414 +        long size = 0L;
1415 +        Node<K,V> p = null;
1416 +        for (;;) {
1417 +            @SuppressWarnings("unchecked")
1418 +            K k = (K) s.readObject();
1419 +            @SuppressWarnings("unchecked")
1420 +            V v = (V) s.readObject();
1421 +            if (k != null && v != null) {
1422 +                p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1423 +                ++size;
1424 +            }
1425 +            else
1426 +                break;
1427 +        }
1428 +        if (size == 0L)
1429 +            sizeCtl = 0;
1430 +        else {
1431 +            int n;
1432 +            if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1433 +                n = MAXIMUM_CAPACITY;
1434 +            else {
1435 +                int sz = (int)size;
1436 +                n = tableSizeFor(sz + (sz >>> 1) + 1);
1437 +            }
1438 +            @SuppressWarnings("unchecked")
1439 +            Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1440 +            int mask = n - 1;
1441 +            long added = 0L;
1442 +            while (p != null) {
1443 +                boolean insertAtFront;
1444 +                Node<K,V> next = p.next, first;
1445 +                int h = p.hash, j = h & mask;
1446 +                if ((first = tabAt(tab, j)) == null)
1447 +                    insertAtFront = true;
1448 +                else {
1449 +                    K k = p.key;
1450 +                    if (first.hash < 0) {
1451 +                        TreeBin<K,V> t = (TreeBin<K,V>)first;
1452 +                        if (t.putTreeVal(h, k, p.val) == null)
1453 +                            ++added;
1454 +                        insertAtFront = false;
1455 +                    }
1456 +                    else {
1457 +                        int binCount = 0;
1458 +                        insertAtFront = true;
1459 +                        Node<K,V> q; K qk;
1460 +                        for (q = first; q != null; q = q.next) {
1461 +                            if (q.hash == h &&
1462 +                                ((qk = q.key) == k ||
1463 +                                 (qk != null && k.equals(qk)))) {
1464 +                                insertAtFront = false;
1465 +                                break;
1466 +                            }
1467 +                            ++binCount;
1468 +                        }
1469 +                        if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1470 +                            insertAtFront = false;
1471 +                            ++added;
1472 +                            p.next = first;
1473 +                            TreeNode<K,V> hd = null, tl = null;
1474 +                            for (q = p; q != null; q = q.next) {
1475 +                                TreeNode<K,V> t = new TreeNode<K,V>
1476 +                                    (q.hash, q.key, q.val, null, null);
1477 +                                if ((t.prev = tl) == null)
1478 +                                    hd = t;
1479 +                                else
1480 +                                    tl.next = t;
1481 +                                tl = t;
1482 +                            }
1483 +                            setTabAt(tab, j, new TreeBin<K,V>(hd));
1484 +                        }
1485 +                    }
1486 +                }
1487 +                if (insertAtFront) {
1488 +                    ++added;
1489 +                    p.next = first;
1490 +                    setTabAt(tab, j, p);
1491 +                }
1492 +                p = next;
1493 +            }
1494 +            table = tab;
1495 +            sizeCtl = n - (n >>> 2);
1496 +            baseCount = added;
1497 +        }
1498 +    }
1499  
1500 <        /**
1501 <         * The number of elements in this segment's region.
1502 <         **/
1503 <        transient volatile int count;
1500 >    // ConcurrentMap methods
1501 >
1502 >    /**
1503 >     * {@inheritDoc}
1504 >     *
1505 >     * @return the previous value associated with the specified key,
1506 >     *         or {@code null} if there was no mapping for the key
1507 >     * @throws NullPointerException if the specified key or value is null
1508 >     */
1509 >    public V putIfAbsent(K key, V value) {
1510 >        return putVal(key, value, true);
1511 >    }
1512 >
1513 >    /**
1514 >     * {@inheritDoc}
1515 >     *
1516 >     * @throws NullPointerException if the specified key is null
1517 >     */
1518 >    public boolean remove(Object key, Object value) {
1519 >        if (key == null)
1520 >            throw new NullPointerException();
1521 >        return value != null && replaceNode(key, null, value) != null;
1522 >    }
1523 >
1524 >    /**
1525 >     * {@inheritDoc}
1526 >     *
1527 >     * @throws NullPointerException if any of the arguments are null
1528 >     */
1529 >    public boolean replace(K key, V oldValue, V newValue) {
1530 >        if (key == null || oldValue == null || newValue == null)
1531 >            throw new NullPointerException();
1532 >        return replaceNode(key, newValue, oldValue) != null;
1533 >    }
1534 >
1535 >    /**
1536 >     * {@inheritDoc}
1537 >     *
1538 >     * @return the previous value associated with the specified key,
1539 >     *         or {@code null} if there was no mapping for the key
1540 >     * @throws NullPointerException if the specified key or value is null
1541 >     */
1542 >    public V replace(K key, V value) {
1543 >        if (key == null || value == null)
1544 >            throw new NullPointerException();
1545 >        return replaceNode(key, value, null);
1546 >    }
1547 >
1548 >    // Overrides of JDK8+ Map extension method defaults
1549 >
1550 >    /**
1551 >     * Returns the value to which the specified key is mapped, or the
1552 >     * given default value if this map contains no mapping for the
1553 >     * key.
1554 >     *
1555 >     * @param key the key whose associated value is to be returned
1556 >     * @param defaultValue the value to return if this map contains
1557 >     * no mapping for the given key
1558 >     * @return the mapping for the key, if present; else the default value
1559 >     * @throws NullPointerException if the specified key is null
1560 >     */
1561 >    public V getOrDefault(Object key, V defaultValue) {
1562 >        V v;
1563 >        return (v = get(key)) == null ? defaultValue : v;
1564 >    }
1565 >
1566 >    public void forEach(BiConsumer<? super K, ? super V> action) {
1567 >        if (action == null) throw new NullPointerException();
1568 >        Node<K,V>[] t;
1569 >        if ((t = table) != null) {
1570 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1571 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1572 >                action.accept(p.key, p.val);
1573 >            }
1574 >        }
1575 >    }
1576 >
1577 >    public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1578 >        if (function == null) throw new NullPointerException();
1579 >        Node<K,V>[] t;
1580 >        if ((t = table) != null) {
1581 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1582 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1583 >                V oldValue = p.val;
1584 >                for (K key = p.key;;) {
1585 >                    V newValue = function.apply(key, oldValue);
1586 >                    if (newValue == null)
1587 >                        throw new NullPointerException();
1588 >                    if (replaceNode(key, newValue, oldValue) != null ||
1589 >                        (oldValue = get(key)) == null)
1590 >                        break;
1591 >                }
1592 >            }
1593 >        }
1594 >    }
1595 >
1596 >    /**
1597 >     * Helper method for EntrySetView.removeIf.
1598 >     */
1599 >    boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1600 >        if (function == null) throw new NullPointerException();
1601 >        Node<K,V>[] t;
1602 >        boolean removed = false;
1603 >        if ((t = table) != null) {
1604 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1605 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1606 >                K k = p.key;
1607 >                V v = p.val;
1608 >                Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1609 >                if (function.test(e) && replaceNode(k, null, v) != null)
1610 >                    removed = true;
1611 >            }
1612 >        }
1613 >        return removed;
1614 >    }
1615 >
1616 >    /**
1617 >     * Helper method for ValuesView.removeIf.
1618 >     */
1619 >    boolean removeValueIf(Predicate<? super V> function) {
1620 >        if (function == null) throw new NullPointerException();
1621 >        Node<K,V>[] t;
1622 >        boolean removed = false;
1623 >        if ((t = table) != null) {
1624 >            Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1625 >            for (Node<K,V> p; (p = it.advance()) != null; ) {
1626 >                K k = p.key;
1627 >                V v = p.val;
1628 >                if (function.test(v) && replaceNode(k, null, v) != null)
1629 >                    removed = true;
1630 >            }
1631 >        }
1632 >        return removed;
1633 >    }
1634 >
1635 >    /**
1636 >     * If the specified key is not already associated with a value,
1637 >     * attempts to compute its value using the given mapping function
1638 >     * and enters it into this map unless {@code null}.  The entire
1639 >     * method invocation is performed atomically, so the function is
1640 >     * applied at most once per key.  Some attempted update operations
1641 >     * on this map by other threads may be blocked while computation
1642 >     * is in progress, so the computation should be short and simple,
1643 >     * and must not attempt to update any other mappings of this map.
1644 >     *
1645 >     * @param key key with which the specified value is to be associated
1646 >     * @param mappingFunction the function to compute a value
1647 >     * @return the current (existing or computed) value associated with
1648 >     *         the specified key, or null if the computed value is null
1649 >     * @throws NullPointerException if the specified key or mappingFunction
1650 >     *         is null
1651 >     * @throws IllegalStateException if the computation detectably
1652 >     *         attempts a recursive update to this map that would
1653 >     *         otherwise never complete
1654 >     * @throws RuntimeException or Error if the mappingFunction does so,
1655 >     *         in which case the mapping is left unestablished
1656 >     */
1657 >    public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1658 >        if (key == null || mappingFunction == null)
1659 >            throw new NullPointerException();
1660 >        int h = spread(key.hashCode());
1661 >        V val = null;
1662 >        int binCount = 0;
1663 >        for (Node<K,V>[] tab = table;;) {
1664 >            Node<K,V> f; int n, i, fh;
1665 >            if (tab == null || (n = tab.length) == 0)
1666 >                tab = initTable();
1667 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1668 >                Node<K,V> r = new ReservationNode<K,V>();
1669 >                synchronized (r) {
1670 >                    if (casTabAt(tab, i, null, r)) {
1671 >                        binCount = 1;
1672 >                        Node<K,V> node = null;
1673 >                        try {
1674 >                            if ((val = mappingFunction.apply(key)) != null)
1675 >                                node = new Node<K,V>(h, key, val, null);
1676 >                        } finally {
1677 >                            setTabAt(tab, i, node);
1678 >                        }
1679 >                    }
1680 >                }
1681 >                if (binCount != 0)
1682 >                    break;
1683 >            }
1684 >            else if ((fh = f.hash) == MOVED)
1685 >                tab = helpTransfer(tab, f);
1686 >            else {
1687 >                boolean added = false;
1688 >                synchronized (f) {
1689 >                    if (tabAt(tab, i) == f) {
1690 >                        if (fh >= 0) {
1691 >                            binCount = 1;
1692 >                            for (Node<K,V> e = f;; ++binCount) {
1693 >                                K ek;
1694 >                                if (e.hash == h &&
1695 >                                    ((ek = e.key) == key ||
1696 >                                     (ek != null && key.equals(ek)))) {
1697 >                                    val = e.val;
1698 >                                    break;
1699 >                                }
1700 >                                Node<K,V> pred = e;
1701 >                                if ((e = e.next) == null) {
1702 >                                    if ((val = mappingFunction.apply(key)) != null) {
1703 >                                        if (pred.next != null)
1704 >                                            throw new IllegalStateException("Recursive update");
1705 >                                        added = true;
1706 >                                        pred.next = new Node<K,V>(h, key, val, null);
1707 >                                    }
1708 >                                    break;
1709 >                                }
1710 >                            }
1711 >                        }
1712 >                        else if (f instanceof TreeBin) {
1713 >                            binCount = 2;
1714 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1715 >                            TreeNode<K,V> r, p;
1716 >                            if ((r = t.root) != null &&
1717 >                                (p = r.findTreeNode(h, key, null)) != null)
1718 >                                val = p.val;
1719 >                            else if ((val = mappingFunction.apply(key)) != null) {
1720 >                                added = true;
1721 >                                t.putTreeVal(h, key, val);
1722 >                            }
1723 >                        }
1724 >                        else if (f instanceof ReservationNode)
1725 >                            throw new IllegalStateException("Recursive update");
1726 >                    }
1727 >                }
1728 >                if (binCount != 0) {
1729 >                    if (binCount >= TREEIFY_THRESHOLD)
1730 >                        treeifyBin(tab, i);
1731 >                    if (!added)
1732 >                        return val;
1733 >                    break;
1734 >                }
1735 >            }
1736 >        }
1737 >        if (val != null)
1738 >            addCount(1L, binCount);
1739 >        return val;
1740 >    }
1741 >
1742 >    /**
1743 >     * If the value for the specified key is present, attempts to
1744 >     * compute a new mapping given the key and its current mapped
1745 >     * value.  The entire method invocation is performed atomically.
1746 >     * Some attempted update operations on this map by other threads
1747 >     * may be blocked while computation is in progress, so the
1748 >     * computation should be short and simple, and must not attempt to
1749 >     * update any other mappings of this map.
1750 >     *
1751 >     * @param key key with which a value may be associated
1752 >     * @param remappingFunction the function to compute a value
1753 >     * @return the new value associated with the specified key, or null if none
1754 >     * @throws NullPointerException if the specified key or remappingFunction
1755 >     *         is null
1756 >     * @throws IllegalStateException if the computation detectably
1757 >     *         attempts a recursive update to this map that would
1758 >     *         otherwise never complete
1759 >     * @throws RuntimeException or Error if the remappingFunction does so,
1760 >     *         in which case the mapping is unchanged
1761 >     */
1762 >    public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1763 >        if (key == null || remappingFunction == null)
1764 >            throw new NullPointerException();
1765 >        int h = spread(key.hashCode());
1766 >        V val = null;
1767 >        int delta = 0;
1768 >        int binCount = 0;
1769 >        for (Node<K,V>[] tab = table;;) {
1770 >            Node<K,V> f; int n, i, fh;
1771 >            if (tab == null || (n = tab.length) == 0)
1772 >                tab = initTable();
1773 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1774 >                break;
1775 >            else if ((fh = f.hash) == MOVED)
1776 >                tab = helpTransfer(tab, f);
1777 >            else {
1778 >                synchronized (f) {
1779 >                    if (tabAt(tab, i) == f) {
1780 >                        if (fh >= 0) {
1781 >                            binCount = 1;
1782 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
1783 >                                K ek;
1784 >                                if (e.hash == h &&
1785 >                                    ((ek = e.key) == key ||
1786 >                                     (ek != null && key.equals(ek)))) {
1787 >                                    val = remappingFunction.apply(key, e.val);
1788 >                                    if (val != null)
1789 >                                        e.val = val;
1790 >                                    else {
1791 >                                        delta = -1;
1792 >                                        Node<K,V> en = e.next;
1793 >                                        if (pred != null)
1794 >                                            pred.next = en;
1795 >                                        else
1796 >                                            setTabAt(tab, i, en);
1797 >                                    }
1798 >                                    break;
1799 >                                }
1800 >                                pred = e;
1801 >                                if ((e = e.next) == null)
1802 >                                    break;
1803 >                            }
1804 >                        }
1805 >                        else if (f instanceof TreeBin) {
1806 >                            binCount = 2;
1807 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1808 >                            TreeNode<K,V> r, p;
1809 >                            if ((r = t.root) != null &&
1810 >                                (p = r.findTreeNode(h, key, null)) != null) {
1811 >                                val = remappingFunction.apply(key, p.val);
1812 >                                if (val != null)
1813 >                                    p.val = val;
1814 >                                else {
1815 >                                    delta = -1;
1816 >                                    if (t.removeTreeNode(p))
1817 >                                        setTabAt(tab, i, untreeify(t.first));
1818 >                                }
1819 >                            }
1820 >                        }
1821 >                        else if (f instanceof ReservationNode)
1822 >                            throw new IllegalStateException("Recursive update");
1823 >                    }
1824 >                }
1825 >                if (binCount != 0)
1826 >                    break;
1827 >            }
1828 >        }
1829 >        if (delta != 0)
1830 >            addCount((long)delta, binCount);
1831 >        return val;
1832 >    }
1833 >
1834 >    /**
1835 >     * Attempts to compute a mapping for the specified key and its
1836 >     * current mapped value (or {@code null} if there is no current
1837 >     * mapping). The entire method invocation is performed atomically.
1838 >     * Some attempted update operations on this map by other threads
1839 >     * may be blocked while computation is in progress, so the
1840 >     * computation should be short and simple, and must not attempt to
1841 >     * update any other mappings of this Map.
1842 >     *
1843 >     * @param key key with which the specified value is to be associated
1844 >     * @param remappingFunction the function to compute a value
1845 >     * @return the new value associated with the specified key, or null if none
1846 >     * @throws NullPointerException if the specified key or remappingFunction
1847 >     *         is null
1848 >     * @throws IllegalStateException if the computation detectably
1849 >     *         attempts a recursive update to this map that would
1850 >     *         otherwise never complete
1851 >     * @throws RuntimeException or Error if the remappingFunction does so,
1852 >     *         in which case the mapping is unchanged
1853 >     */
1854 >    public V compute(K key,
1855 >                     BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1856 >        if (key == null || remappingFunction == null)
1857 >            throw new NullPointerException();
1858 >        int h = spread(key.hashCode());
1859 >        V val = null;
1860 >        int delta = 0;
1861 >        int binCount = 0;
1862 >        for (Node<K,V>[] tab = table;;) {
1863 >            Node<K,V> f; int n, i, fh;
1864 >            if (tab == null || (n = tab.length) == 0)
1865 >                tab = initTable();
1866 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1867 >                Node<K,V> r = new ReservationNode<K,V>();
1868 >                synchronized (r) {
1869 >                    if (casTabAt(tab, i, null, r)) {
1870 >                        binCount = 1;
1871 >                        Node<K,V> node = null;
1872 >                        try {
1873 >                            if ((val = remappingFunction.apply(key, null)) != null) {
1874 >                                delta = 1;
1875 >                                node = new Node<K,V>(h, key, val, null);
1876 >                            }
1877 >                        } finally {
1878 >                            setTabAt(tab, i, node);
1879 >                        }
1880 >                    }
1881 >                }
1882 >                if (binCount != 0)
1883 >                    break;
1884 >            }
1885 >            else if ((fh = f.hash) == MOVED)
1886 >                tab = helpTransfer(tab, f);
1887 >            else {
1888 >                synchronized (f) {
1889 >                    if (tabAt(tab, i) == f) {
1890 >                        if (fh >= 0) {
1891 >                            binCount = 1;
1892 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
1893 >                                K ek;
1894 >                                if (e.hash == h &&
1895 >                                    ((ek = e.key) == key ||
1896 >                                     (ek != null && key.equals(ek)))) {
1897 >                                    val = remappingFunction.apply(key, e.val);
1898 >                                    if (val != null)
1899 >                                        e.val = val;
1900 >                                    else {
1901 >                                        delta = -1;
1902 >                                        Node<K,V> en = e.next;
1903 >                                        if (pred != null)
1904 >                                            pred.next = en;
1905 >                                        else
1906 >                                            setTabAt(tab, i, en);
1907 >                                    }
1908 >                                    break;
1909 >                                }
1910 >                                pred = e;
1911 >                                if ((e = e.next) == null) {
1912 >                                    val = remappingFunction.apply(key, null);
1913 >                                    if (val != null) {
1914 >                                        if (pred.next != null)
1915 >                                            throw new IllegalStateException("Recursive update");
1916 >                                        delta = 1;
1917 >                                        pred.next =
1918 >                                            new Node<K,V>(h, key, val, null);
1919 >                                    }
1920 >                                    break;
1921 >                                }
1922 >                            }
1923 >                        }
1924 >                        else if (f instanceof TreeBin) {
1925 >                            binCount = 1;
1926 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
1927 >                            TreeNode<K,V> r, p;
1928 >                            if ((r = t.root) != null)
1929 >                                p = r.findTreeNode(h, key, null);
1930 >                            else
1931 >                                p = null;
1932 >                            V pv = (p == null) ? null : p.val;
1933 >                            val = remappingFunction.apply(key, pv);
1934 >                            if (val != null) {
1935 >                                if (p != null)
1936 >                                    p.val = val;
1937 >                                else {
1938 >                                    delta = 1;
1939 >                                    t.putTreeVal(h, key, val);
1940 >                                }
1941 >                            }
1942 >                            else if (p != null) {
1943 >                                delta = -1;
1944 >                                if (t.removeTreeNode(p))
1945 >                                    setTabAt(tab, i, untreeify(t.first));
1946 >                            }
1947 >                        }
1948 >                        else if (f instanceof ReservationNode)
1949 >                            throw new IllegalStateException("Recursive update");
1950 >                    }
1951 >                }
1952 >                if (binCount != 0) {
1953 >                    if (binCount >= TREEIFY_THRESHOLD)
1954 >                        treeifyBin(tab, i);
1955 >                    break;
1956 >                }
1957 >            }
1958 >        }
1959 >        if (delta != 0)
1960 >            addCount((long)delta, binCount);
1961 >        return val;
1962 >    }
1963 >
1964 >    /**
1965 >     * If the specified key is not already associated with a
1966 >     * (non-null) value, associates it with the given value.
1967 >     * Otherwise, replaces the value with the results of the given
1968 >     * remapping function, or removes if {@code null}. The entire
1969 >     * method invocation is performed atomically.  Some attempted
1970 >     * update operations on this map by other threads may be blocked
1971 >     * while computation is in progress, so the computation should be
1972 >     * short and simple, and must not attempt to update any other
1973 >     * mappings of this Map.
1974 >     *
1975 >     * @param key key with which the specified value is to be associated
1976 >     * @param value the value to use if absent
1977 >     * @param remappingFunction the function to recompute a value if present
1978 >     * @return the new value associated with the specified key, or null if none
1979 >     * @throws NullPointerException if the specified key or the
1980 >     *         remappingFunction is null
1981 >     * @throws RuntimeException or Error if the remappingFunction does so,
1982 >     *         in which case the mapping is unchanged
1983 >     */
1984 >    public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1985 >        if (key == null || value == null || remappingFunction == null)
1986 >            throw new NullPointerException();
1987 >        int h = spread(key.hashCode());
1988 >        V val = null;
1989 >        int delta = 0;
1990 >        int binCount = 0;
1991 >        for (Node<K,V>[] tab = table;;) {
1992 >            Node<K,V> f; int n, i, fh;
1993 >            if (tab == null || (n = tab.length) == 0)
1994 >                tab = initTable();
1995 >            else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1996 >                if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1997 >                    delta = 1;
1998 >                    val = value;
1999 >                    break;
2000 >                }
2001 >            }
2002 >            else if ((fh = f.hash) == MOVED)
2003 >                tab = helpTransfer(tab, f);
2004 >            else {
2005 >                synchronized (f) {
2006 >                    if (tabAt(tab, i) == f) {
2007 >                        if (fh >= 0) {
2008 >                            binCount = 1;
2009 >                            for (Node<K,V> e = f, pred = null;; ++binCount) {
2010 >                                K ek;
2011 >                                if (e.hash == h &&
2012 >                                    ((ek = e.key) == key ||
2013 >                                     (ek != null && key.equals(ek)))) {
2014 >                                    val = remappingFunction.apply(e.val, value);
2015 >                                    if (val != null)
2016 >                                        e.val = val;
2017 >                                    else {
2018 >                                        delta = -1;
2019 >                                        Node<K,V> en = e.next;
2020 >                                        if (pred != null)
2021 >                                            pred.next = en;
2022 >                                        else
2023 >                                            setTabAt(tab, i, en);
2024 >                                    }
2025 >                                    break;
2026 >                                }
2027 >                                pred = e;
2028 >                                if ((e = e.next) == null) {
2029 >                                    delta = 1;
2030 >                                    val = value;
2031 >                                    pred.next =
2032 >                                        new Node<K,V>(h, key, val, null);
2033 >                                    break;
2034 >                                }
2035 >                            }
2036 >                        }
2037 >                        else if (f instanceof TreeBin) {
2038 >                            binCount = 2;
2039 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
2040 >                            TreeNode<K,V> r = t.root;
2041 >                            TreeNode<K,V> p = (r == null) ? null :
2042 >                                r.findTreeNode(h, key, null);
2043 >                            val = (p == null) ? value :
2044 >                                remappingFunction.apply(p.val, value);
2045 >                            if (val != null) {
2046 >                                if (p != null)
2047 >                                    p.val = val;
2048 >                                else {
2049 >                                    delta = 1;
2050 >                                    t.putTreeVal(h, key, val);
2051 >                                }
2052 >                            }
2053 >                            else if (p != null) {
2054 >                                delta = -1;
2055 >                                if (t.removeTreeNode(p))
2056 >                                    setTabAt(tab, i, untreeify(t.first));
2057 >                            }
2058 >                        }
2059 >                        else if (f instanceof ReservationNode)
2060 >                            throw new IllegalStateException("Recursive update");
2061 >                    }
2062 >                }
2063 >                if (binCount != 0) {
2064 >                    if (binCount >= TREEIFY_THRESHOLD)
2065 >                        treeifyBin(tab, i);
2066 >                    break;
2067 >                }
2068 >            }
2069 >        }
2070 >        if (delta != 0)
2071 >            addCount((long)delta, binCount);
2072 >        return val;
2073 >    }
2074 >
2075 >    // Hashtable legacy methods
2076 >
2077 >    /**
2078 >     * Tests if some key maps into the specified value in this table.
2079 >     *
2080 >     * <p>Note that this method is identical in functionality to
2081 >     * {@link #containsValue(Object)}, and exists solely to ensure
2082 >     * full compatibility with class {@link java.util.Hashtable},
2083 >     * which supported this method prior to introduction of the
2084 >     * Java Collections Framework.
2085 >     *
2086 >     * @param  value a value to search for
2087 >     * @return {@code true} if and only if some key maps to the
2088 >     *         {@code value} argument in this table as
2089 >     *         determined by the {@code equals} method;
2090 >     *         {@code false} otherwise
2091 >     * @throws NullPointerException if the specified value is null
2092 >     */
2093 >    public boolean contains(Object value) {
2094 >        return containsValue(value);
2095 >    }
2096 >
2097 >    /**
2098 >     * Returns an enumeration of the keys in this table.
2099 >     *
2100 >     * @return an enumeration of the keys in this table
2101 >     * @see #keySet()
2102 >     */
2103 >    public Enumeration<K> keys() {
2104 >        Node<K,V>[] t;
2105 >        int f = (t = table) == null ? 0 : t.length;
2106 >        return new KeyIterator<K,V>(t, f, 0, f, this);
2107 >    }
2108 >
2109 >    /**
2110 >     * Returns an enumeration of the values in this table.
2111 >     *
2112 >     * @return an enumeration of the values in this table
2113 >     * @see #values()
2114 >     */
2115 >    public Enumeration<V> elements() {
2116 >        Node<K,V>[] t;
2117 >        int f = (t = table) == null ? 0 : t.length;
2118 >        return new ValueIterator<K,V>(t, f, 0, f, this);
2119 >    }
2120 >
2121 >    // ConcurrentHashMap-only methods
2122 >
2123 >    /**
2124 >     * Returns the number of mappings. This method should be used
2125 >     * instead of {@link #size} because a ConcurrentHashMap may
2126 >     * contain more mappings than can be represented as an int. The
2127 >     * value returned is an estimate; the actual count may differ if
2128 >     * there are concurrent insertions or removals.
2129 >     *
2130 >     * @return the number of mappings
2131 >     * @since 1.8
2132 >     */
2133 >    public long mappingCount() {
2134 >        long n = sumCount();
2135 >        return (n < 0L) ? 0L : n; // ignore transient negative values
2136 >    }
2137 >
2138 >    /**
2139 >     * Creates a new {@link Set} backed by a ConcurrentHashMap
2140 >     * from the given type to {@code Boolean.TRUE}.
2141 >     *
2142 >     * @param <K> the element type of the returned set
2143 >     * @return the new set
2144 >     * @since 1.8
2145 >     */
2146 >    public static <K> KeySetView<K,Boolean> newKeySet() {
2147 >        return new KeySetView<K,Boolean>
2148 >            (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2149 >    }
2150 >
2151 >    /**
2152 >     * Creates a new {@link Set} backed by a ConcurrentHashMap
2153 >     * from the given type to {@code Boolean.TRUE}.
2154 >     *
2155 >     * @param initialCapacity The implementation performs internal
2156 >     * sizing to accommodate this many elements.
2157 >     * @param <K> the element type of the returned set
2158 >     * @return the new set
2159 >     * @throws IllegalArgumentException if the initial capacity of
2160 >     * elements is negative
2161 >     * @since 1.8
2162 >     */
2163 >    public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2164 >        return new KeySetView<K,Boolean>
2165 >            (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2166 >    }
2167 >
2168 >    /**
2169 >     * Returns a {@link Set} view of the keys in this map, using the
2170 >     * given common mapped value for any additions (i.e., {@link
2171 >     * Collection#add} and {@link Collection#addAll(Collection)}).
2172 >     * This is of course only appropriate if it is acceptable to use
2173 >     * the same value for all additions from this view.
2174 >     *
2175 >     * @param mappedValue the mapped value to use for any additions
2176 >     * @return the set view
2177 >     * @throws NullPointerException if the mappedValue is null
2178 >     */
2179 >    public KeySetView<K,V> keySet(V mappedValue) {
2180 >        if (mappedValue == null)
2181 >            throw new NullPointerException();
2182 >        return new KeySetView<K,V>(this, mappedValue);
2183 >    }
2184 >
2185 >    /* ---------------- Special Nodes -------------- */
2186 >
2187 >    /**
2188 >     * A node inserted at head of bins during transfer operations.
2189 >     */
2190 >    static final class ForwardingNode<K,V> extends Node<K,V> {
2191 >        final Node<K,V>[] nextTable;
2192 >        ForwardingNode(Node<K,V>[] tab) {
2193 >            super(MOVED, null, null, null);
2194 >            this.nextTable = tab;
2195 >        }
2196 >
2197 >        Node<K,V> find(int h, Object k) {
2198 >            // loop to avoid arbitrarily deep recursion on forwarding nodes
2199 >            outer: for (Node<K,V>[] tab = nextTable;;) {
2200 >                Node<K,V> e; int n;
2201 >                if (k == null || tab == null || (n = tab.length) == 0 ||
2202 >                    (e = tabAt(tab, (n - 1) & h)) == null)
2203 >                    return null;
2204 >                for (;;) {
2205 >                    int eh; K ek;
2206 >                    if ((eh = e.hash) == h &&
2207 >                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
2208 >                        return e;
2209 >                    if (eh < 0) {
2210 >                        if (e instanceof ForwardingNode) {
2211 >                            tab = ((ForwardingNode<K,V>)e).nextTable;
2212 >                            continue outer;
2213 >                        }
2214 >                        else
2215 >                            return e.find(h, k);
2216 >                    }
2217 >                    if ((e = e.next) == null)
2218 >                        return null;
2219 >                }
2220 >            }
2221 >        }
2222 >    }
2223 >
2224 >    /**
2225 >     * A place-holder node used in computeIfAbsent and compute.
2226 >     */
2227 >    static final class ReservationNode<K,V> extends Node<K,V> {
2228 >        ReservationNode() {
2229 >            super(RESERVED, null, null, null);
2230 >        }
2231 >
2232 >        Node<K,V> find(int h, Object k) {
2233 >            return null;
2234 >        }
2235 >    }
2236 >
2237 >    /* ---------------- Table Initialization and Resizing -------------- */
2238 >
2239 >    /**
2240 >     * Returns the stamp bits for resizing a table of size n.
2241 >     * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2242 >     */
2243 >    static final int resizeStamp(int n) {
2244 >        return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2245 >    }
2246 >
2247 >    /**
2248 >     * Initializes table, using the size recorded in sizeCtl.
2249 >     */
2250 >    private final Node<K,V>[] initTable() {
2251 >        Node<K,V>[] tab; int sc;
2252 >        while ((tab = table) == null || tab.length == 0) {
2253 >            if ((sc = sizeCtl) < 0)
2254 >                Thread.yield(); // lost initialization race; just spin
2255 >            else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2256 >                try {
2257 >                    if ((tab = table) == null || tab.length == 0) {
2258 >                        int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2259 >                        @SuppressWarnings("unchecked")
2260 >                        Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2261 >                        table = tab = nt;
2262 >                        sc = n - (n >>> 2);
2263 >                    }
2264 >                } finally {
2265 >                    sizeCtl = sc;
2266 >                }
2267 >                break;
2268 >            }
2269 >        }
2270 >        return tab;
2271 >    }
2272 >
2273 >    /**
2274 >     * Adds to count, and if table is too small and not already
2275 >     * resizing, initiates transfer. If already resizing, helps
2276 >     * perform transfer if work is available.  Rechecks occupancy
2277 >     * after a transfer to see if another resize is already needed
2278 >     * because resizings are lagging additions.
2279 >     *
2280 >     * @param x the count to add
2281 >     * @param check if <0, don't check resize, if <= 1 only check if uncontended
2282 >     */
2283 >    private final void addCount(long x, int check) {
2284 >        CounterCell[] as; long b, s;
2285 >        if ((as = counterCells) != null ||
2286 >            !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2287 >            CounterCell a; long v; int m;
2288 >            boolean uncontended = true;
2289 >            if (as == null || (m = as.length - 1) < 0 ||
2290 >                (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
2291 >                !(uncontended =
2292 >                  U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2293 >                fullAddCount(x, uncontended);
2294 >                return;
2295 >            }
2296 >            if (check <= 1)
2297 >                return;
2298 >            s = sumCount();
2299 >        }
2300 >        if (check >= 0) {
2301 >            Node<K,V>[] tab, nt; int n, sc;
2302 >            while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2303 >                   (n = tab.length) < MAXIMUM_CAPACITY) {
2304 >                int rs = resizeStamp(n);
2305 >                if (sc < 0) {
2306 >                    if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2307 >                        sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2308 >                        transferIndex <= 0)
2309 >                        break;
2310 >                    if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2311 >                        transfer(tab, nt);
2312 >                }
2313 >                else if (U.compareAndSwapInt(this, SIZECTL, sc,
2314 >                                             (rs << RESIZE_STAMP_SHIFT) + 2))
2315 >                    transfer(tab, null);
2316 >                s = sumCount();
2317 >            }
2318 >        }
2319 >    }
2320 >
2321 >    /**
2322 >     * Helps transfer if a resize is in progress.
2323 >     */
2324 >    final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2325 >        Node<K,V>[] nextTab; int sc;
2326 >        if (tab != null && (f instanceof ForwardingNode) &&
2327 >            (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2328 >            int rs = resizeStamp(tab.length);
2329 >            while (nextTab == nextTable && table == tab &&
2330 >                   (sc = sizeCtl) < 0) {
2331 >                if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2332 >                    sc == rs + MAX_RESIZERS || transferIndex <= 0)
2333 >                    break;
2334 >                if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
2335 >                    transfer(tab, nextTab);
2336 >                    break;
2337 >                }
2338 >            }
2339 >            return nextTab;
2340 >        }
2341 >        return table;
2342 >    }
2343 >
2344 >    /**
2345 >     * Tries to presize table to accommodate the given number of elements.
2346 >     *
2347 >     * @param size number of elements (doesn't need to be perfectly accurate)
2348 >     */
2349 >    private final void tryPresize(int size) {
2350 >        int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2351 >            tableSizeFor(size + (size >>> 1) + 1);
2352 >        int sc;
2353 >        while ((sc = sizeCtl) >= 0) {
2354 >            Node<K,V>[] tab = table; int n;
2355 >            if (tab == null || (n = tab.length) == 0) {
2356 >                n = (sc > c) ? sc : c;
2357 >                if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2358 >                    try {
2359 >                        if (table == tab) {
2360 >                            @SuppressWarnings("unchecked")
2361 >                            Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2362 >                            table = nt;
2363 >                            sc = n - (n >>> 2);
2364 >                        }
2365 >                    } finally {
2366 >                        sizeCtl = sc;
2367 >                    }
2368 >                }
2369 >            }
2370 >            else if (c <= sc || n >= MAXIMUM_CAPACITY)
2371 >                break;
2372 >            else if (tab == table) {
2373 >                int rs = resizeStamp(n);
2374 >                if (U.compareAndSwapInt(this, SIZECTL, sc,
2375 >                                        (rs << RESIZE_STAMP_SHIFT) + 2))
2376 >                    transfer(tab, null);
2377 >            }
2378 >        }
2379 >    }
2380 >
2381 >    /**
2382 >     * Moves and/or copies the nodes in each bin to new table. See
2383 >     * above for explanation.
2384 >     */
2385 >    private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2386 >        int n = tab.length, stride;
2387 >        if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2388 >            stride = MIN_TRANSFER_STRIDE; // subdivide range
2389 >        if (nextTab == null) {            // initiating
2390 >            try {
2391 >                @SuppressWarnings("unchecked")
2392 >                Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2393 >                nextTab = nt;
2394 >            } catch (Throwable ex) {      // try to cope with OOME
2395 >                sizeCtl = Integer.MAX_VALUE;
2396 >                return;
2397 >            }
2398 >            nextTable = nextTab;
2399 >            transferIndex = n;
2400 >        }
2401 >        int nextn = nextTab.length;
2402 >        ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2403 >        boolean advance = true;
2404 >        boolean finishing = false; // to ensure sweep before committing nextTab
2405 >        for (int i = 0, bound = 0;;) {
2406 >            Node<K,V> f; int fh;
2407 >            while (advance) {
2408 >                int nextIndex, nextBound;
2409 >                if (--i >= bound || finishing)
2410 >                    advance = false;
2411 >                else if ((nextIndex = transferIndex) <= 0) {
2412 >                    i = -1;
2413 >                    advance = false;
2414 >                }
2415 >                else if (U.compareAndSwapInt
2416 >                         (this, TRANSFERINDEX, nextIndex,
2417 >                          nextBound = (nextIndex > stride ?
2418 >                                       nextIndex - stride : 0))) {
2419 >                    bound = nextBound;
2420 >                    i = nextIndex - 1;
2421 >                    advance = false;
2422 >                }
2423 >            }
2424 >            if (i < 0 || i >= n || i + n >= nextn) {
2425 >                int sc;
2426 >                if (finishing) {
2427 >                    nextTable = null;
2428 >                    table = nextTab;
2429 >                    sizeCtl = (n << 1) - (n >>> 1);
2430 >                    return;
2431 >                }
2432 >                if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2433 >                    if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2434 >                        return;
2435 >                    finishing = advance = true;
2436 >                    i = n; // recheck before commit
2437 >                }
2438 >            }
2439 >            else if ((f = tabAt(tab, i)) == null)
2440 >                advance = casTabAt(tab, i, null, fwd);
2441 >            else if ((fh = f.hash) == MOVED)
2442 >                advance = true; // already processed
2443 >            else {
2444 >                synchronized (f) {
2445 >                    if (tabAt(tab, i) == f) {
2446 >                        Node<K,V> ln, hn;
2447 >                        if (fh >= 0) {
2448 >                            int runBit = fh & n;
2449 >                            Node<K,V> lastRun = f;
2450 >                            for (Node<K,V> p = f.next; p != null; p = p.next) {
2451 >                                int b = p.hash & n;
2452 >                                if (b != runBit) {
2453 >                                    runBit = b;
2454 >                                    lastRun = p;
2455 >                                }
2456 >                            }
2457 >                            if (runBit == 0) {
2458 >                                ln = lastRun;
2459 >                                hn = null;
2460 >                            }
2461 >                            else {
2462 >                                hn = lastRun;
2463 >                                ln = null;
2464 >                            }
2465 >                            for (Node<K,V> p = f; p != lastRun; p = p.next) {
2466 >                                int ph = p.hash; K pk = p.key; V pv = p.val;
2467 >                                if ((ph & n) == 0)
2468 >                                    ln = new Node<K,V>(ph, pk, pv, ln);
2469 >                                else
2470 >                                    hn = new Node<K,V>(ph, pk, pv, hn);
2471 >                            }
2472 >                            setTabAt(nextTab, i, ln);
2473 >                            setTabAt(nextTab, i + n, hn);
2474 >                            setTabAt(tab, i, fwd);
2475 >                            advance = true;
2476 >                        }
2477 >                        else if (f instanceof TreeBin) {
2478 >                            TreeBin<K,V> t = (TreeBin<K,V>)f;
2479 >                            TreeNode<K,V> lo = null, loTail = null;
2480 >                            TreeNode<K,V> hi = null, hiTail = null;
2481 >                            int lc = 0, hc = 0;
2482 >                            for (Node<K,V> e = t.first; e != null; e = e.next) {
2483 >                                int h = e.hash;
2484 >                                TreeNode<K,V> p = new TreeNode<K,V>
2485 >                                    (h, e.key, e.val, null, null);
2486 >                                if ((h & n) == 0) {
2487 >                                    if ((p.prev = loTail) == null)
2488 >                                        lo = p;
2489 >                                    else
2490 >                                        loTail.next = p;
2491 >                                    loTail = p;
2492 >                                    ++lc;
2493 >                                }
2494 >                                else {
2495 >                                    if ((p.prev = hiTail) == null)
2496 >                                        hi = p;
2497 >                                    else
2498 >                                        hiTail.next = p;
2499 >                                    hiTail = p;
2500 >                                    ++hc;
2501 >                                }
2502 >                            }
2503 >                            ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2504 >                                (hc != 0) ? new TreeBin<K,V>(lo) : t;
2505 >                            hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2506 >                                (lc != 0) ? new TreeBin<K,V>(hi) : t;
2507 >                            setTabAt(nextTab, i, ln);
2508 >                            setTabAt(nextTab, i + n, hn);
2509 >                            setTabAt(tab, i, fwd);
2510 >                            advance = true;
2511 >                        }
2512 >                    }
2513 >                }
2514 >            }
2515 >        }
2516 >    }
2517 >
2518 >    /* ---------------- Counter support -------------- */
2519 >
2520 >    /**
2521 >     * A padded cell for distributing counts.  Adapted from LongAdder
2522 >     * and Striped64.  See their internal docs for explanation.
2523 >     */
2524 >    @sun.misc.Contended static final class CounterCell {
2525 >        volatile long value;
2526 >        CounterCell(long x) { value = x; }
2527 >    }
2528 >
2529 >    final long sumCount() {
2530 >        CounterCell[] as = counterCells; CounterCell a;
2531 >        long sum = baseCount;
2532 >        if (as != null) {
2533 >            for (int i = 0; i < as.length; ++i) {
2534 >                if ((a = as[i]) != null)
2535 >                    sum += a.value;
2536 >            }
2537 >        }
2538 >        return sum;
2539 >    }
2540 >
2541 >    // See LongAdder version for explanation
2542 >    private final void fullAddCount(long x, boolean wasUncontended) {
2543 >        int h;
2544 >        if ((h = ThreadLocalRandom.getProbe()) == 0) {
2545 >            ThreadLocalRandom.localInit();      // force initialization
2546 >            h = ThreadLocalRandom.getProbe();
2547 >            wasUncontended = true;
2548 >        }
2549 >        boolean collide = false;                // True if last slot nonempty
2550 >        for (;;) {
2551 >            CounterCell[] as; CounterCell a; int n; long v;
2552 >            if ((as = counterCells) != null && (n = as.length) > 0) {
2553 >                if ((a = as[(n - 1) & h]) == null) {
2554 >                    if (cellsBusy == 0) {            // Try to attach new Cell
2555 >                        CounterCell r = new CounterCell(x); // Optimistic create
2556 >                        if (cellsBusy == 0 &&
2557 >                            U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2558 >                            boolean created = false;
2559 >                            try {               // Recheck under lock
2560 >                                CounterCell[] rs; int m, j;
2561 >                                if ((rs = counterCells) != null &&
2562 >                                    (m = rs.length) > 0 &&
2563 >                                    rs[j = (m - 1) & h] == null) {
2564 >                                    rs[j] = r;
2565 >                                    created = true;
2566 >                                }
2567 >                            } finally {
2568 >                                cellsBusy = 0;
2569 >                            }
2570 >                            if (created)
2571 >                                break;
2572 >                            continue;           // Slot is now non-empty
2573 >                        }
2574 >                    }
2575 >                    collide = false;
2576 >                }
2577 >                else if (!wasUncontended)       // CAS already known to fail
2578 >                    wasUncontended = true;      // Continue after rehash
2579 >                else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2580 >                    break;
2581 >                else if (counterCells != as || n >= NCPU)
2582 >                    collide = false;            // At max size or stale
2583 >                else if (!collide)
2584 >                    collide = true;
2585 >                else if (cellsBusy == 0 &&
2586 >                         U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2587 >                    try {
2588 >                        if (counterCells == as) {// Expand table unless stale
2589 >                            CounterCell[] rs = new CounterCell[n << 1];
2590 >                            for (int i = 0; i < n; ++i)
2591 >                                rs[i] = as[i];
2592 >                            counterCells = rs;
2593 >                        }
2594 >                    } finally {
2595 >                        cellsBusy = 0;
2596 >                    }
2597 >                    collide = false;
2598 >                    continue;                   // Retry with expanded table
2599 >                }
2600 >                h = ThreadLocalRandom.advanceProbe(h);
2601 >            }
2602 >            else if (cellsBusy == 0 && counterCells == as &&
2603 >                     U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2604 >                boolean init = false;
2605 >                try {                           // Initialize table
2606 >                    if (counterCells == as) {
2607 >                        CounterCell[] rs = new CounterCell[2];
2608 >                        rs[h & 1] = new CounterCell(x);
2609 >                        counterCells = rs;
2610 >                        init = true;
2611 >                    }
2612 >                } finally {
2613 >                    cellsBusy = 0;
2614 >                }
2615 >                if (init)
2616 >                    break;
2617 >            }
2618 >            else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2619 >                break;                          // Fall back on using base
2620 >        }
2621 >    }
2622 >
2623 >    /* ---------------- Conversion from/to TreeBins -------------- */
2624 >
2625 >    /**
2626 >     * Replaces all linked nodes in bin at given index unless table is
2627 >     * too small, in which case resizes instead.
2628 >     */
2629 >    private final void treeifyBin(Node<K,V>[] tab, int index) {
2630 >        Node<K,V> b; int n;
2631 >        if (tab != null) {
2632 >            if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2633 >                tryPresize(n << 1);
2634 >            else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2635 >                synchronized (b) {
2636 >                    if (tabAt(tab, index) == b) {
2637 >                        TreeNode<K,V> hd = null, tl = null;
2638 >                        for (Node<K,V> e = b; e != null; e = e.next) {
2639 >                            TreeNode<K,V> p =
2640 >                                new TreeNode<K,V>(e.hash, e.key, e.val,
2641 >                                                  null, null);
2642 >                            if ((p.prev = tl) == null)
2643 >                                hd = p;
2644 >                            else
2645 >                                tl.next = p;
2646 >                            tl = p;
2647 >                        }
2648 >                        setTabAt(tab, index, new TreeBin<K,V>(hd));
2649 >                    }
2650 >                }
2651 >            }
2652 >        }
2653 >    }
2654 >
2655 >    /**
2656 >     * Returns a list on non-TreeNodes replacing those in given list.
2657 >     */
2658 >    static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2659 >        Node<K,V> hd = null, tl = null;
2660 >        for (Node<K,V> q = b; q != null; q = q.next) {
2661 >            Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2662 >            if (tl == null)
2663 >                hd = p;
2664 >            else
2665 >                tl.next = p;
2666 >            tl = p;
2667 >        }
2668 >        return hd;
2669 >    }
2670 >
2671 >    /* ---------------- TreeNodes -------------- */
2672 >
2673 >    /**
2674 >     * Nodes for use in TreeBins.
2675 >     */
2676 >    static final class TreeNode<K,V> extends Node<K,V> {
2677 >        TreeNode<K,V> parent;  // red-black tree links
2678 >        TreeNode<K,V> left;
2679 >        TreeNode<K,V> right;
2680 >        TreeNode<K,V> prev;    // needed to unlink next upon deletion
2681 >        boolean red;
2682 >
2683 >        TreeNode(int hash, K key, V val, Node<K,V> next,
2684 >                 TreeNode<K,V> parent) {
2685 >            super(hash, key, val, next);
2686 >            this.parent = parent;
2687 >        }
2688 >
2689 >        Node<K,V> find(int h, Object k) {
2690 >            return findTreeNode(h, k, null);
2691 >        }
2692  
2693          /**
2694 <         * The table is rehashed when its size exceeds this threshold.
2695 <         * (The value of this field is always (int)(capacity *
196 <         * loadFactor).)
2694 >         * Returns the TreeNode (or null if not found) for the given key
2695 >         * starting at given root.
2696           */
2697 <        private transient int threshold;
2697 >        final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2698 >            if (k != null) {
2699 >                TreeNode<K,V> p = this;
2700 >                do {
2701 >                    int ph, dir; K pk; TreeNode<K,V> q;
2702 >                    TreeNode<K,V> pl = p.left, pr = p.right;
2703 >                    if ((ph = p.hash) > h)
2704 >                        p = pl;
2705 >                    else if (ph < h)
2706 >                        p = pr;
2707 >                    else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2708 >                        return p;
2709 >                    else if (pl == null)
2710 >                        p = pr;
2711 >                    else if (pr == null)
2712 >                        p = pl;
2713 >                    else if ((kc != null ||
2714 >                              (kc = comparableClassFor(k)) != null) &&
2715 >                             (dir = compareComparables(kc, k, pk)) != 0)
2716 >                        p = (dir < 0) ? pl : pr;
2717 >                    else if ((q = pr.findTreeNode(h, k, kc)) != null)
2718 >                        return q;
2719 >                    else
2720 >                        p = pl;
2721 >                } while (p != null);
2722 >            }
2723 >            return null;
2724 >        }
2725 >    }
2726 >
2727 >    /* ---------------- TreeBins -------------- */
2728 >
2729 >    /**
2730 >     * TreeNodes used at the heads of bins. TreeBins do not hold user
2731 >     * keys or values, but instead point to list of TreeNodes and
2732 >     * their root. They also maintain a parasitic read-write lock
2733 >     * forcing writers (who hold bin lock) to wait for readers (who do
2734 >     * not) to complete before tree restructuring operations.
2735 >     */
2736 >    static final class TreeBin<K,V> extends Node<K,V> {
2737 >        TreeNode<K,V> root;
2738 >        volatile TreeNode<K,V> first;
2739 >        volatile Thread waiter;
2740 >        volatile int lockState;
2741 >        // values for lockState
2742 >        static final int WRITER = 1; // set while holding write lock
2743 >        static final int WAITER = 2; // set when waiting for write lock
2744 >        static final int READER = 4; // increment value for setting read lock
2745  
2746          /**
2747 <         * The per-segment table
2747 >         * Tie-breaking utility for ordering insertions when equal
2748 >         * hashCodes and non-comparable. We don't require a total
2749 >         * order, just a consistent insertion rule to maintain
2750 >         * equivalence across rebalancings. Tie-breaking further than
2751 >         * necessary simplifies testing a bit.
2752           */
2753 <        transient HashEntry[] table;
2753 >        static int tieBreakOrder(Object a, Object b) {
2754 >            int d;
2755 >            if (a == null || b == null ||
2756 >                (d = a.getClass().getName().
2757 >                 compareTo(b.getClass().getName())) == 0)
2758 >                d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2759 >                     -1 : 1);
2760 >            return d;
2761 >        }
2762  
2763          /**
2764 <         * The load factor for the hash table.  Even though this value
207 <         * is same for all segments, it is replicated to avoid needing
208 <         * links to outer object.
209 <         * @serial
2764 >         * Creates bin with initial set of nodes headed by b.
2765           */
2766 <        private final float loadFactor;
2767 <
2768 <        Segment(int initialCapacity, float lf) {
2769 <            loadFactor = lf;
2770 <            setTable(new HashEntry[initialCapacity]);
2766 >        TreeBin(TreeNode<K,V> b) {
2767 >            super(TREEBIN, null, null, null);
2768 >            this.first = b;
2769 >            TreeNode<K,V> r = null;
2770 >            for (TreeNode<K,V> x = b, next; x != null; x = next) {
2771 >                next = (TreeNode<K,V>)x.next;
2772 >                x.left = x.right = null;
2773 >                if (r == null) {
2774 >                    x.parent = null;
2775 >                    x.red = false;
2776 >                    r = x;
2777 >                }
2778 >                else {
2779 >                    K k = x.key;
2780 >                    int h = x.hash;
2781 >                    Class<?> kc = null;
2782 >                    for (TreeNode<K,V> p = r;;) {
2783 >                        int dir, ph;
2784 >                        K pk = p.key;
2785 >                        if ((ph = p.hash) > h)
2786 >                            dir = -1;
2787 >                        else if (ph < h)
2788 >                            dir = 1;
2789 >                        else if ((kc == null &&
2790 >                                  (kc = comparableClassFor(k)) == null) ||
2791 >                                 (dir = compareComparables(kc, k, pk)) == 0)
2792 >                            dir = tieBreakOrder(k, pk);
2793 >                        TreeNode<K,V> xp = p;
2794 >                        if ((p = (dir <= 0) ? p.left : p.right) == null) {
2795 >                            x.parent = xp;
2796 >                            if (dir <= 0)
2797 >                                xp.left = x;
2798 >                            else
2799 >                                xp.right = x;
2800 >                            r = balanceInsertion(r, x);
2801 >                            break;
2802 >                        }
2803 >                    }
2804 >                }
2805 >            }
2806 >            this.root = r;
2807 >            assert checkInvariants(root);
2808          }
2809  
2810          /**
2811 <         * Set table to new HashEntry array.
2812 <         * Call only while holding lock or in constructor.
2813 <         **/
2814 <        private void setTable(HashEntry[] newTable) {
2815 <            table = newTable;
224 <            threshold = (int)(newTable.length * loadFactor);
225 <            count = count; // write-volatile
2811 >         * Acquires write lock for tree restructuring.
2812 >         */
2813 >        private final void lockRoot() {
2814 >            if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2815 >                contendedLock(); // offload to separate method
2816          }
2817  
2818 <        /* Specialized implementations of map methods */
2818 >        /**
2819 >         * Releases write lock for tree restructuring.
2820 >         */
2821 >        private final void unlockRoot() {
2822 >            lockState = 0;
2823 >        }
2824  
2825 <        V get(K key, int hash) {
2826 <            if (count != 0) { // read-volatile
2827 <                HashEntry[] tab = table;
2828 <                int index = hash & (tab.length - 1);
2829 <                HashEntry<K,V> e = (HashEntry<K,V>) tab[index];
2830 <                while (e != null) {
2831 <                    if (e.hash == hash && key.equals(e.key))
2832 <                        return e.value;
2833 <                    e = e.next;
2825 >        /**
2826 >         * Possibly blocks awaiting root lock.
2827 >         */
2828 >        private final void contendedLock() {
2829 >            boolean waiting = false;
2830 >            for (int s;;) {
2831 >                if (((s = lockState) & ~WAITER) == 0) {
2832 >                    if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2833 >                        if (waiting)
2834 >                            waiter = null;
2835 >                        return;
2836 >                    }
2837 >                }
2838 >                else if ((s & WAITER) == 0) {
2839 >                    if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2840 >                        waiting = true;
2841 >                        waiter = Thread.currentThread();
2842 >                    }
2843                  }
2844 +                else if (waiting)
2845 +                    LockSupport.park(this);
2846              }
241            return null;
2847          }
2848  
2849 <        boolean containsKey(Object key, int hash) {
2850 <            if (count != 0) { // read-volatile
2851 <                HashEntry[] tab = table;
2852 <                int index = hash & (tab.length - 1);
2853 <                HashEntry<K,V> e = (HashEntry<K,V>) tab[index];
2854 <                while (e != null) {
2855 <                    if (e.hash == hash && key.equals(e.key))
2856 <                        return true;
2857 <                    e = e.next;
2849 >        /**
2850 >         * Returns matching node or null if none. Tries to search
2851 >         * using tree comparisons from root, but continues linear
2852 >         * search when lock not available.
2853 >         */
2854 >        final Node<K,V> find(int h, Object k) {
2855 >            if (k != null) {
2856 >                for (Node<K,V> e = first; e != null; ) {
2857 >                    int s; K ek;
2858 >                    if (((s = lockState) & (WAITER|WRITER)) != 0) {
2859 >                        if (e.hash == h &&
2860 >                            ((ek = e.key) == k || (ek != null && k.equals(ek))))
2861 >                            return e;
2862 >                        e = e.next;
2863 >                    }
2864 >                    else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2865 >                                                 s + READER)) {
2866 >                        TreeNode<K,V> r, p;
2867 >                        try {
2868 >                            p = ((r = root) == null ? null :
2869 >                                 r.findTreeNode(h, k, null));
2870 >                        } finally {
2871 >                            Thread w;
2872 >                            if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2873 >                                (READER|WAITER) && (w = waiter) != null)
2874 >                                LockSupport.unpark(w);
2875 >                        }
2876 >                        return p;
2877 >                    }
2878                  }
2879              }
2880 <            return false;
2880 >            return null;
2881          }
2882  
2883 <        boolean containsValue(Object value) {
2884 <            if (count != 0) { // read-volatile
2885 <                HashEntry[] tab = table;
2886 <                int len = tab.length;
2887 <                for (int i = 0 ; i < len; i++)
2888 <                    for (HashEntry<K,V> e = (HashEntry<K,V>)tab[i] ; e != null ; e = e.next)
2889 <                        if (value.equals(e.value))
2890 <                            return true;
2883 >        /**
2884 >         * Finds or adds a node.
2885 >         * @return null if added
2886 >         */
2887 >        final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2888 >            Class<?> kc = null;
2889 >            boolean searched = false;
2890 >            for (TreeNode<K,V> p = root;;) {
2891 >                int dir, ph; K pk;
2892 >                if (p == null) {
2893 >                    first = root = new TreeNode<K,V>(h, k, v, null, null);
2894 >                    break;
2895 >                }
2896 >                else if ((ph = p.hash) > h)
2897 >                    dir = -1;
2898 >                else if (ph < h)
2899 >                    dir = 1;
2900 >                else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2901 >                    return p;
2902 >                else if ((kc == null &&
2903 >                          (kc = comparableClassFor(k)) == null) ||
2904 >                         (dir = compareComparables(kc, k, pk)) == 0) {
2905 >                    if (!searched) {
2906 >                        TreeNode<K,V> q, ch;
2907 >                        searched = true;
2908 >                        if (((ch = p.left) != null &&
2909 >                             (q = ch.findTreeNode(h, k, kc)) != null) ||
2910 >                            ((ch = p.right) != null &&
2911 >                             (q = ch.findTreeNode(h, k, kc)) != null))
2912 >                            return q;
2913 >                    }
2914 >                    dir = tieBreakOrder(k, pk);
2915 >                }
2916 >
2917 >                TreeNode<K,V> xp = p;
2918 >                if ((p = (dir <= 0) ? p.left : p.right) == null) {
2919 >                    TreeNode<K,V> x, f = first;
2920 >                    first = x = new TreeNode<K,V>(h, k, v, f, xp);
2921 >                    if (f != null)
2922 >                        f.prev = x;
2923 >                    if (dir <= 0)
2924 >                        xp.left = x;
2925 >                    else
2926 >                        xp.right = x;
2927 >                    if (!xp.red)
2928 >                        x.red = true;
2929 >                    else {
2930 >                        lockRoot();
2931 >                        try {
2932 >                            root = balanceInsertion(root, x);
2933 >                        } finally {
2934 >                            unlockRoot();
2935 >                        }
2936 >                    }
2937 >                    break;
2938 >                }
2939              }
2940 <            return false;
2940 >            assert checkInvariants(root);
2941 >            return null;
2942          }
2943  
2944 <        V put(K key, int hash, V value, boolean onlyIfAbsent) {
2945 <            lock();
2944 >        /**
2945 >         * Removes the given node, that must be present before this
2946 >         * call.  This is messier than typical red-black deletion code
2947 >         * because we cannot swap the contents of an interior node
2948 >         * with a leaf successor that is pinned by "next" pointers
2949 >         * that are accessible independently of lock. So instead we
2950 >         * swap the tree linkages.
2951 >         *
2952 >         * @return true if now too small, so should be untreeified
2953 >         */
2954 >        final boolean removeTreeNode(TreeNode<K,V> p) {
2955 >            TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2956 >            TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
2957 >            TreeNode<K,V> r, rl;
2958 >            if (pred == null)
2959 >                first = next;
2960 >            else
2961 >                pred.next = next;
2962 >            if (next != null)
2963 >                next.prev = pred;
2964 >            if (first == null) {
2965 >                root = null;
2966 >                return true;
2967 >            }
2968 >            if ((r = root) == null || r.right == null || // too small
2969 >                (rl = r.left) == null || rl.left == null)
2970 >                return true;
2971 >            lockRoot();
2972              try {
2973 <                int c = count;
2974 <                HashEntry[] tab = table;
2975 <                int index = hash & (tab.length - 1);
2976 <                HashEntry<K,V> first = (HashEntry<K,V>) tab[index];
2977 <
2978 <                for (HashEntry<K,V> e = first; e != null; e = (HashEntry<K,V>) e.next) {
2979 <                    if (e.hash == hash && key.equals(e.key)) {
2980 <                        V oldValue = e.value;
2981 <                        if (!onlyIfAbsent)
2982 <                            e.value = value;
2983 <                        count = c; // write-volatile
2984 <                        return oldValue;
2973 >                TreeNode<K,V> replacement;
2974 >                TreeNode<K,V> pl = p.left;
2975 >                TreeNode<K,V> pr = p.right;
2976 >                if (pl != null && pr != null) {
2977 >                    TreeNode<K,V> s = pr, sl;
2978 >                    while ((sl = s.left) != null) // find successor
2979 >                        s = sl;
2980 >                    boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2981 >                    TreeNode<K,V> sr = s.right;
2982 >                    TreeNode<K,V> pp = p.parent;
2983 >                    if (s == pr) { // p was s's direct parent
2984 >                        p.parent = s;
2985 >                        s.right = p;
2986 >                    }
2987 >                    else {
2988 >                        TreeNode<K,V> sp = s.parent;
2989 >                        if ((p.parent = sp) != null) {
2990 >                            if (s == sp.left)
2991 >                                sp.left = p;
2992 >                            else
2993 >                                sp.right = p;
2994 >                        }
2995 >                        if ((s.right = pr) != null)
2996 >                            pr.parent = s;
2997                      }
2998 +                    p.left = null;
2999 +                    if ((p.right = sr) != null)
3000 +                        sr.parent = p;
3001 +                    if ((s.left = pl) != null)
3002 +                        pl.parent = s;
3003 +                    if ((s.parent = pp) == null)
3004 +                        r = s;
3005 +                    else if (p == pp.left)
3006 +                        pp.left = s;
3007 +                    else
3008 +                        pp.right = s;
3009 +                    if (sr != null)
3010 +                        replacement = sr;
3011 +                    else
3012 +                        replacement = p;
3013 +                }
3014 +                else if (pl != null)
3015 +                    replacement = pl;
3016 +                else if (pr != null)
3017 +                    replacement = pr;
3018 +                else
3019 +                    replacement = p;
3020 +                if (replacement != p) {
3021 +                    TreeNode<K,V> pp = replacement.parent = p.parent;
3022 +                    if (pp == null)
3023 +                        r = replacement;
3024 +                    else if (p == pp.left)
3025 +                        pp.left = replacement;
3026 +                    else
3027 +                        pp.right = replacement;
3028 +                    p.left = p.right = p.parent = null;
3029                  }
3030  
3031 <                tab[index] = new HashEntry<K,V>(hash, key, value, first);
3032 <                ++c;
3033 <                count = c; // write-volatile
3034 <                if (c > threshold)
3035 <                    setTable(rehash(tab));
3036 <                return null;
3037 <            }
3038 <            finally {
3039 <                unlock();
3031 >                root = (p.red) ? r : balanceDeletion(r, replacement);
3032 >
3033 >                if (p == replacement) {  // detach pointers
3034 >                    TreeNode<K,V> pp;
3035 >                    if ((pp = p.parent) != null) {
3036 >                        if (p == pp.left)
3037 >                            pp.left = null;
3038 >                        else if (p == pp.right)
3039 >                            pp.right = null;
3040 >                        p.parent = null;
3041 >                    }
3042 >                }
3043 >            } finally {
3044 >                unlockRoot();
3045              }
3046 +            assert checkInvariants(root);
3047 +            return false;
3048          }
3049  
3050 <        private HashEntry[] rehash(HashEntry[] oldTable) {
3051 <            int oldCapacity = oldTable.length;
3052 <            if (oldCapacity >= MAXIMUM_CAPACITY)
3053 <                return oldTable;
3054 <
3055 <            /*
3056 <             * Reclassify nodes in each list to new Map.  Because we are
3057 <             * using power-of-two expansion, the elements from each bin
3058 <             * must either stay at same index, or move with a power of two
3059 <             * offset. We eliminate unnecessary node creation by catching
3060 <             * cases where old nodes can be reused because their next
3061 <             * fields won't change. Statistically, at the default
3062 <             * threshhold, only about one-sixth of them need cloning when
3063 <             * a table doubles. The nodes they replace will be garbage
3064 <             * collectable as soon as they are no longer referenced by any
3065 <             * reader thread that may be in the midst of traversing table
3066 <             * right now.
3067 <             */
3068 <
3069 <            HashEntry[] newTable = new HashEntry[oldCapacity << 1];
3070 <            int sizeMask = newTable.length - 1;
3071 <            for (int i = 0; i < oldCapacity ; i++) {
3072 <                // We need to guarantee that any existing reads of old Map can
3073 <                //  proceed. So we cannot yet null out each bin.
3074 <                HashEntry<K,V> e = (HashEntry<K,V>)oldTable[i];
3075 <
3076 <                if (e != null) {
3077 <                    HashEntry<K,V> next = e.next;
3078 <                    int idx = e.hash & sizeMask;
3079 <
3080 <                    //  Single node on list
3081 <                    if (next == null)
3082 <                        newTable[idx] = e;
3050 >        /* ------------------------------------------------------------ */
3051 >        // Red-black tree methods, all adapted from CLR
3052 >
3053 >        static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3054 >                                              TreeNode<K,V> p) {
3055 >            TreeNode<K,V> r, pp, rl;
3056 >            if (p != null && (r = p.right) != null) {
3057 >                if ((rl = p.right = r.left) != null)
3058 >                    rl.parent = p;
3059 >                if ((pp = r.parent = p.parent) == null)
3060 >                    (root = r).red = false;
3061 >                else if (pp.left == p)
3062 >                    pp.left = r;
3063 >                else
3064 >                    pp.right = r;
3065 >                r.left = p;
3066 >                p.parent = r;
3067 >            }
3068 >            return root;
3069 >        }
3070 >
3071 >        static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3072 >                                               TreeNode<K,V> p) {
3073 >            TreeNode<K,V> l, pp, lr;
3074 >            if (p != null && (l = p.left) != null) {
3075 >                if ((lr = p.left = l.right) != null)
3076 >                    lr.parent = p;
3077 >                if ((pp = l.parent = p.parent) == null)
3078 >                    (root = l).red = false;
3079 >                else if (pp.right == p)
3080 >                    pp.right = l;
3081 >                else
3082 >                    pp.left = l;
3083 >                l.right = p;
3084 >                p.parent = l;
3085 >            }
3086 >            return root;
3087 >        }
3088  
3089 +        static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3090 +                                                    TreeNode<K,V> x) {
3091 +            x.red = true;
3092 +            for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3093 +                if ((xp = x.parent) == null) {
3094 +                    x.red = false;
3095 +                    return x;
3096 +                }
3097 +                else if (!xp.red || (xpp = xp.parent) == null)
3098 +                    return root;
3099 +                if (xp == (xppl = xpp.left)) {
3100 +                    if ((xppr = xpp.right) != null && xppr.red) {
3101 +                        xppr.red = false;
3102 +                        xp.red = false;
3103 +                        xpp.red = true;
3104 +                        x = xpp;
3105 +                    }
3106                      else {
3107 <                        // Reuse trailing consecutive sequence at same slot
3108 <                        HashEntry<K,V> lastRun = e;
3109 <                        int lastIdx = idx;
3110 <                        for (HashEntry<K,V> last = next;
3111 <                             last != null;
3112 <                             last = last.next) {
3113 <                            int k = last.hash & sizeMask;
3114 <                            if (k != lastIdx) {
3115 <                                lastIdx = k;
3116 <                                lastRun = last;
3107 >                        if (x == xp.right) {
3108 >                            root = rotateLeft(root, x = xp);
3109 >                            xpp = (xp = x.parent) == null ? null : xp.parent;
3110 >                        }
3111 >                        if (xp != null) {
3112 >                            xp.red = false;
3113 >                            if (xpp != null) {
3114 >                                xpp.red = true;
3115 >                                root = rotateRight(root, xpp);
3116 >                            }
3117 >                        }
3118 >                    }
3119 >                }
3120 >                else {
3121 >                    if (xppl != null && xppl.red) {
3122 >                        xppl.red = false;
3123 >                        xp.red = false;
3124 >                        xpp.red = true;
3125 >                        x = xpp;
3126 >                    }
3127 >                    else {
3128 >                        if (x == xp.left) {
3129 >                            root = rotateRight(root, x = xp);
3130 >                            xpp = (xp = x.parent) == null ? null : xp.parent;
3131 >                        }
3132 >                        if (xp != null) {
3133 >                            xp.red = false;
3134 >                            if (xpp != null) {
3135 >                                xpp.red = true;
3136 >                                root = rotateLeft(root, xpp);
3137                              }
3138                          }
3139 <                        newTable[lastIdx] = lastRun;
3139 >                    }
3140 >                }
3141 >            }
3142 >        }
3143  
3144 <                        // Clone all remaining nodes
3145 <                        for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
3146 <                            int k = p.hash & sizeMask;
3147 <                            newTable[k] = new HashEntry<K,V>(p.hash,
3148 <                                                             p.key,
3149 <                                                             p.value,
3150 <                                                             (HashEntry<K,V>) newTable[k]);
3144 >        static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3145 >                                                   TreeNode<K,V> x) {
3146 >            for (TreeNode<K,V> xp, xpl, xpr;;) {
3147 >                if (x == null || x == root)
3148 >                    return root;
3149 >                else if ((xp = x.parent) == null) {
3150 >                    x.red = false;
3151 >                    return x;
3152 >                }
3153 >                else if (x.red) {
3154 >                    x.red = false;
3155 >                    return root;
3156 >                }
3157 >                else if ((xpl = xp.left) == x) {
3158 >                    if ((xpr = xp.right) != null && xpr.red) {
3159 >                        xpr.red = false;
3160 >                        xp.red = true;
3161 >                        root = rotateLeft(root, xp);
3162 >                        xpr = (xp = x.parent) == null ? null : xp.right;
3163 >                    }
3164 >                    if (xpr == null)
3165 >                        x = xp;
3166 >                    else {
3167 >                        TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3168 >                        if ((sr == null || !sr.red) &&
3169 >                            (sl == null || !sl.red)) {
3170 >                            xpr.red = true;
3171 >                            x = xp;
3172 >                        }
3173 >                        else {
3174 >                            if (sr == null || !sr.red) {
3175 >                                if (sl != null)
3176 >                                    sl.red = false;
3177 >                                xpr.red = true;
3178 >                                root = rotateRight(root, xpr);
3179 >                                xpr = (xp = x.parent) == null ?
3180 >                                    null : xp.right;
3181 >                            }
3182 >                            if (xpr != null) {
3183 >                                xpr.red = (xp == null) ? false : xp.red;
3184 >                                if ((sr = xpr.right) != null)
3185 >                                    sr.red = false;
3186 >                            }
3187 >                            if (xp != null) {
3188 >                                xp.red = false;
3189 >                                root = rotateLeft(root, xp);
3190 >                            }
3191 >                            x = root;
3192 >                        }
3193 >                    }
3194 >                }
3195 >                else { // symmetric
3196 >                    if (xpl != null && xpl.red) {
3197 >                        xpl.red = false;
3198 >                        xp.red = true;
3199 >                        root = rotateRight(root, xp);
3200 >                        xpl = (xp = x.parent) == null ? null : xp.left;
3201 >                    }
3202 >                    if (xpl == null)
3203 >                        x = xp;
3204 >                    else {
3205 >                        TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3206 >                        if ((sl == null || !sl.red) &&
3207 >                            (sr == null || !sr.red)) {
3208 >                            xpl.red = true;
3209 >                            x = xp;
3210 >                        }
3211 >                        else {
3212 >                            if (sl == null || !sl.red) {
3213 >                                if (sr != null)
3214 >                                    sr.red = false;
3215 >                                xpl.red = true;
3216 >                                root = rotateLeft(root, xpl);
3217 >                                xpl = (xp = x.parent) == null ?
3218 >                                    null : xp.left;
3219 >                            }
3220 >                            if (xpl != null) {
3221 >                                xpl.red = (xp == null) ? false : xp.red;
3222 >                                if ((sl = xpl.left) != null)
3223 >                                    sl.red = false;
3224 >                            }
3225 >                            if (xp != null) {
3226 >                                xp.red = false;
3227 >                                root = rotateRight(root, xp);
3228 >                            }
3229 >                            x = root;
3230                          }
3231                      }
3232                  }
3233              }
360            return newTable;
3234          }
3235  
3236          /**
3237 <         * Remove; match on key only if value null, else match both.
3237 >         * Checks invariants recursively for the tree of Nodes rooted at t.
3238           */
3239 <        V remove(Object key, int hash, Object value) {
3240 <            lock();
3239 >        static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3240 >            TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3241 >                tb = t.prev, tn = (TreeNode<K,V>)t.next;
3242 >            if (tb != null && tb.next != t)
3243 >                return false;
3244 >            if (tn != null && tn.prev != t)
3245 >                return false;
3246 >            if (tp != null && t != tp.left && t != tp.right)
3247 >                return false;
3248 >            if (tl != null && (tl.parent != t || tl.hash > t.hash))
3249 >                return false;
3250 >            if (tr != null && (tr.parent != t || tr.hash < t.hash))
3251 >                return false;
3252 >            if (t.red && tl != null && tl.red && tr != null && tr.red)
3253 >                return false;
3254 >            if (tl != null && !checkInvariants(tl))
3255 >                return false;
3256 >            if (tr != null && !checkInvariants(tr))
3257 >                return false;
3258 >            return true;
3259 >        }
3260 >
3261 >        private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
3262 >        private static final long LOCKSTATE;
3263 >        static {
3264              try {
3265 <                int c = count;
3266 <                HashEntry[] tab = table;
3267 <                int index = hash & (tab.length - 1);
3268 <                HashEntry<K,V> first = (HashEntry<K,V>)tab[index];
3265 >                LOCKSTATE = U.objectFieldOffset
3266 >                    (TreeBin.class.getDeclaredField("lockState"));
3267 >            } catch (ReflectiveOperationException e) {
3268 >                throw new Error(e);
3269 >            }
3270 >        }
3271 >    }
3272  
3273 <                HashEntry<K,V> e = first;
375 <                for (;;) {
376 <                    if (e == null)
377 <                        return null;
378 <                    if (e.hash == hash && key.equals(e.key))
379 <                        break;
380 <                    e = e.next;
381 <                }
3273 >    /* ----------------Table Traversal -------------- */
3274  
3275 <                V oldValue = e.value;
3276 <                if (value != null && !value.equals(oldValue))
3277 <                    return null;
3275 >    /**
3276 >     * Records the table, its length, and current traversal index for a
3277 >     * traverser that must process a region of a forwarded table before
3278 >     * proceeding with current table.
3279 >     */
3280 >    static final class TableStack<K,V> {
3281 >        int length;
3282 >        int index;
3283 >        Node<K,V>[] tab;
3284 >        TableStack<K,V> next;
3285 >    }
3286  
3287 <                // All entries following removed node can stay in list, but
3288 <                // all preceeding ones need to be cloned.
3289 <                HashEntry<K,V> newFirst = e.next;
3290 <                for (HashEntry<K,V> p = first; p != e; p = p.next)
3291 <                    newFirst = new HashEntry<K,V>(p.hash, p.key,
3292 <                                                  p.value, newFirst);
3293 <                tab[index] = newFirst;
3294 <                count = c-1; // write-volatile
3295 <                return oldValue;
3296 <            }
3297 <            finally {
3298 <                unlock();
3299 <            }
3287 >    /**
3288 >     * Encapsulates traversal for methods such as containsValue; also
3289 >     * serves as a base class for other iterators and spliterators.
3290 >     *
3291 >     * Method advance visits once each still-valid node that was
3292 >     * reachable upon iterator construction. It might miss some that
3293 >     * were added to a bin after the bin was visited, which is OK wrt
3294 >     * consistency guarantees. Maintaining this property in the face
3295 >     * of possible ongoing resizes requires a fair amount of
3296 >     * bookkeeping state that is difficult to optimize away amidst
3297 >     * volatile accesses.  Even so, traversal maintains reasonable
3298 >     * throughput.
3299 >     *
3300 >     * Normally, iteration proceeds bin-by-bin traversing lists.
3301 >     * However, if the table has been resized, then all future steps
3302 >     * must traverse both the bin at the current index as well as at
3303 >     * (index + baseSize); and so on for further resizings. To
3304 >     * paranoically cope with potential sharing by users of iterators
3305 >     * across threads, iteration terminates if a bounds checks fails
3306 >     * for a table read.
3307 >     */
3308 >    static class Traverser<K,V> {
3309 >        Node<K,V>[] tab;        // current table; updated if resized
3310 >        Node<K,V> next;         // the next entry to use
3311 >        TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3312 >        int index;              // index of bin to use next
3313 >        int baseIndex;          // current index of initial table
3314 >        int baseLimit;          // index bound for initial table
3315 >        final int baseSize;     // initial table size
3316 >
3317 >        Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3318 >            this.tab = tab;
3319 >            this.baseSize = size;
3320 >            this.baseIndex = this.index = index;
3321 >            this.baseLimit = limit;
3322 >            this.next = null;
3323          }
3324  
3325 <        void clear() {
3326 <            lock();
3327 <            try {
3328 <                HashEntry[] tab = table;
3329 <                for (int i = 0; i < tab.length ; i++)
3330 <                    tab[i] = null;
3331 <                count = 0; // write-volatile
3325 >        /**
3326 >         * Advances if possible, returning next valid node, or null if none.
3327 >         */
3328 >        final Node<K,V> advance() {
3329 >            Node<K,V> e;
3330 >            if ((e = next) != null)
3331 >                e = e.next;
3332 >            for (;;) {
3333 >                Node<K,V>[] t; int i, n;  // must use locals in checks
3334 >                if (e != null)
3335 >                    return next = e;
3336 >                if (baseIndex >= baseLimit || (t = tab) == null ||
3337 >                    (n = t.length) <= (i = index) || i < 0)
3338 >                    return next = null;
3339 >                if ((e = tabAt(t, i)) != null && e.hash < 0) {
3340 >                    if (e instanceof ForwardingNode) {
3341 >                        tab = ((ForwardingNode<K,V>)e).nextTable;
3342 >                        e = null;
3343 >                        pushState(t, i, n);
3344 >                        continue;
3345 >                    }
3346 >                    else if (e instanceof TreeBin)
3347 >                        e = ((TreeBin<K,V>)e).first;
3348 >                    else
3349 >                        e = null;
3350 >                }
3351 >                if (stack != null)
3352 >                    recoverState(n);
3353 >                else if ((index = i + baseSize) >= n)
3354 >                    index = ++baseIndex; // visit upper slots if present
3355              }
3356 <            finally {
3357 <                unlock();
3356 >        }
3357 >
3358 >        /**
3359 >         * Saves traversal state upon encountering a forwarding node.
3360 >         */
3361 >        private void pushState(Node<K,V>[] t, int i, int n) {
3362 >            TableStack<K,V> s = spare;  // reuse if possible
3363 >            if (s != null)
3364 >                spare = s.next;
3365 >            else
3366 >                s = new TableStack<K,V>();
3367 >            s.tab = t;
3368 >            s.length = n;
3369 >            s.index = i;
3370 >            s.next = stack;
3371 >            stack = s;
3372 >        }
3373 >
3374 >        /**
3375 >         * Possibly pops traversal state.
3376 >         *
3377 >         * @param n length of current table
3378 >         */
3379 >        private void recoverState(int n) {
3380 >            TableStack<K,V> s; int len;
3381 >            while ((s = stack) != null && (index += (len = s.length)) >= n) {
3382 >                n = len;
3383 >                index = s.index;
3384 >                tab = s.tab;
3385 >                s.tab = null;
3386 >                TableStack<K,V> next = s.next;
3387 >                s.next = spare; // save for reuse
3388 >                stack = next;
3389 >                spare = s;
3390              }
3391 +            if (s == null && (index += baseSize) >= n)
3392 +                index = ++baseIndex;
3393          }
3394      }
3395  
3396      /**
3397 <     * ConcurrentReaderHashMap list entry.
3397 >     * Base of key, value, and entry Iterators. Adds fields to
3398 >     * Traverser to support iterator.remove.
3399       */
3400 <    private static class HashEntry<K,V> implements Entry<K,V> {
3401 <        private final K key;
3402 <        private V value;
3403 <        private final int hash;
3404 <        private final HashEntry<K,V> next;
3400 >    static class BaseIterator<K,V> extends Traverser<K,V> {
3401 >        final ConcurrentHashMap<K,V> map;
3402 >        Node<K,V> lastReturned;
3403 >        BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3404 >                    ConcurrentHashMap<K,V> map) {
3405 >            super(tab, size, index, limit);
3406 >            this.map = map;
3407 >            advance();
3408 >        }
3409  
3410 <        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
3411 <            this.value = value;
3412 <            this.hash = hash;
3413 <            this.key = key;
3414 <            this.next = next;
3410 >        public final boolean hasNext() { return next != null; }
3411 >        public final boolean hasMoreElements() { return next != null; }
3412 >
3413 >        public final void remove() {
3414 >            Node<K,V> p;
3415 >            if ((p = lastReturned) == null)
3416 >                throw new IllegalStateException();
3417 >            lastReturned = null;
3418 >            map.replaceNode(p.key, null, null);
3419 >        }
3420 >    }
3421 >
3422 >    static final class KeyIterator<K,V> extends BaseIterator<K,V>
3423 >        implements Iterator<K>, Enumeration<K> {
3424 >        KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3425 >                    ConcurrentHashMap<K,V> map) {
3426 >            super(tab, index, size, limit, map);
3427 >        }
3428 >
3429 >        public final K next() {
3430 >            Node<K,V> p;
3431 >            if ((p = next) == null)
3432 >                throw new NoSuchElementException();
3433 >            K k = p.key;
3434 >            lastReturned = p;
3435 >            advance();
3436 >            return k;
3437          }
3438  
3439 <        public K getKey() {
3440 <            return key;
3439 >        public final K nextElement() { return next(); }
3440 >    }
3441 >
3442 >    static final class ValueIterator<K,V> extends BaseIterator<K,V>
3443 >        implements Iterator<V>, Enumeration<V> {
3444 >        ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3445 >                      ConcurrentHashMap<K,V> map) {
3446 >            super(tab, index, size, limit, map);
3447          }
3448  
3449 <        public V getValue() {
3450 <            return value;
3449 >        public final V next() {
3450 >            Node<K,V> p;
3451 >            if ((p = next) == null)
3452 >                throw new NoSuchElementException();
3453 >            V v = p.val;
3454 >            lastReturned = p;
3455 >            advance();
3456 >            return v;
3457          }
3458  
3459 <        public V setValue(V newValue) {
3460 <            // We aren't required to, and don't provide any
3461 <            // visibility barriers for setting value.
3462 <            if (newValue == null)
3463 <                throw new NullPointerException();
3464 <            V oldValue = this.value;
3465 <            this.value = newValue;
3466 <            return oldValue;
3459 >        public final V nextElement() { return next(); }
3460 >    }
3461 >
3462 >    static final class EntryIterator<K,V> extends BaseIterator<K,V>
3463 >        implements Iterator<Map.Entry<K,V>> {
3464 >        EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3465 >                      ConcurrentHashMap<K,V> map) {
3466 >            super(tab, index, size, limit, map);
3467 >        }
3468 >
3469 >        public final Map.Entry<K,V> next() {
3470 >            Node<K,V> p;
3471 >            if ((p = next) == null)
3472 >                throw new NoSuchElementException();
3473 >            K k = p.key;
3474 >            V v = p.val;
3475 >            lastReturned = p;
3476 >            advance();
3477 >            return new MapEntry<K,V>(k, v, map);
3478 >        }
3479 >    }
3480 >
3481 >    /**
3482 >     * Exported Entry for EntryIterator.
3483 >     */
3484 >    static final class MapEntry<K,V> implements Map.Entry<K,V> {
3485 >        final K key; // non-null
3486 >        V val;       // non-null
3487 >        final ConcurrentHashMap<K,V> map;
3488 >        MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3489 >            this.key = key;
3490 >            this.val = val;
3491 >            this.map = map;
3492 >        }
3493 >        public K getKey()        { return key; }
3494 >        public V getValue()      { return val; }
3495 >        public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
3496 >        public String toString() {
3497 >            return Helpers.mapEntryToString(key, val);
3498          }
3499  
3500          public boolean equals(Object o) {
3501 <            if (!(o instanceof Entry))
3501 >            Object k, v; Map.Entry<?,?> e;
3502 >            return ((o instanceof Map.Entry) &&
3503 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3504 >                    (v = e.getValue()) != null &&
3505 >                    (k == key || k.equals(key)) &&
3506 >                    (v == val || v.equals(val)));
3507 >        }
3508 >
3509 >        /**
3510 >         * Sets our entry's value and writes through to the map. The
3511 >         * value to return is somewhat arbitrary here. Since we do not
3512 >         * necessarily track asynchronous changes, the most recent
3513 >         * "previous" value could be different from what we return (or
3514 >         * could even have been removed, in which case the put will
3515 >         * re-establish). We do not and cannot guarantee more.
3516 >         */
3517 >        public V setValue(V value) {
3518 >            if (value == null) throw new NullPointerException();
3519 >            V v = val;
3520 >            val = value;
3521 >            map.put(key, value);
3522 >            return v;
3523 >        }
3524 >    }
3525 >
3526 >    static final class KeySpliterator<K,V> extends Traverser<K,V>
3527 >        implements Spliterator<K> {
3528 >        long est;               // size estimate
3529 >        KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3530 >                       long est) {
3531 >            super(tab, size, index, limit);
3532 >            this.est = est;
3533 >        }
3534 >
3535 >        public Spliterator<K> trySplit() {
3536 >            int i, f, h;
3537 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3538 >                new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3539 >                                        f, est >>>= 1);
3540 >        }
3541 >
3542 >        public void forEachRemaining(Consumer<? super K> action) {
3543 >            if (action == null) throw new NullPointerException();
3544 >            for (Node<K,V> p; (p = advance()) != null;)
3545 >                action.accept(p.key);
3546 >        }
3547 >
3548 >        public boolean tryAdvance(Consumer<? super K> action) {
3549 >            if (action == null) throw new NullPointerException();
3550 >            Node<K,V> p;
3551 >            if ((p = advance()) == null)
3552                  return false;
3553 <            Entry<K,V> e = (Entry<K,V>)o;
3554 <            return (key.equals(e.getKey()) && value.equals(e.getValue()));
3553 >            action.accept(p.key);
3554 >            return true;
3555          }
3556  
3557 <        public int hashCode() {
3558 <            return  key.hashCode() ^ value.hashCode();
3557 >        public long estimateSize() { return est; }
3558 >
3559 >        public int characteristics() {
3560 >            return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3561 >                Spliterator.NONNULL;
3562          }
3563 +    }
3564  
3565 <        public String toString() {
3566 <            return key + "=" + value;
3565 >    static final class ValueSpliterator<K,V> extends Traverser<K,V>
3566 >        implements Spliterator<V> {
3567 >        long est;               // size estimate
3568 >        ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3569 >                         long est) {
3570 >            super(tab, size, index, limit);
3571 >            this.est = est;
3572 >        }
3573 >
3574 >        public Spliterator<V> trySplit() {
3575 >            int i, f, h;
3576 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3577 >                new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3578 >                                          f, est >>>= 1);
3579 >        }
3580 >
3581 >        public void forEachRemaining(Consumer<? super V> action) {
3582 >            if (action == null) throw new NullPointerException();
3583 >            for (Node<K,V> p; (p = advance()) != null;)
3584 >                action.accept(p.val);
3585 >        }
3586 >
3587 >        public boolean tryAdvance(Consumer<? super V> action) {
3588 >            if (action == null) throw new NullPointerException();
3589 >            Node<K,V> p;
3590 >            if ((p = advance()) == null)
3591 >                return false;
3592 >            action.accept(p.val);
3593 >            return true;
3594 >        }
3595 >
3596 >        public long estimateSize() { return est; }
3597 >
3598 >        public int characteristics() {
3599 >            return Spliterator.CONCURRENT | Spliterator.NONNULL;
3600          }
3601      }
3602  
3603 +    static final class EntrySpliterator<K,V> extends Traverser<K,V>
3604 +        implements Spliterator<Map.Entry<K,V>> {
3605 +        final ConcurrentHashMap<K,V> map; // To export MapEntry
3606 +        long est;               // size estimate
3607 +        EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3608 +                         long est, ConcurrentHashMap<K,V> map) {
3609 +            super(tab, size, index, limit);
3610 +            this.map = map;
3611 +            this.est = est;
3612 +        }
3613  
3614 <    /* ---------------- Public operations -------------- */
3614 >        public Spliterator<Map.Entry<K,V>> trySplit() {
3615 >            int i, f, h;
3616 >            return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3617 >                new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3618 >                                          f, est >>>= 1, map);
3619 >        }
3620  
3621 <    /**
3622 <     * Constructs a new, empty map with the specified initial
3623 <     * capacity and the specified load factor.
3624 <     *
3625 <     * @param initialCapacity the initial capacity.  The actual
474 <     * initial capacity is rounded up to the nearest power of two.
475 <     * @param loadFactor  the load factor threshold, used to control resizing.
476 <     * @param segments the number of concurrently accessible segments. the
477 <     * actual number of segments is rounded to the next power of two.
478 <     * @throws IllegalArgumentException if the initial capacity is
479 <     * negative or the load factor or number of segments are
480 <     * nonpositive.
481 <     */
482 <    public ConcurrentHashMap(int initialCapacity,
483 <                             float loadFactor, int segments) {
484 <        if (!(loadFactor > 0) || initialCapacity < 0 || segments <= 0)
485 <            throw new IllegalArgumentException();
3621 >        public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3622 >            if (action == null) throw new NullPointerException();
3623 >            for (Node<K,V> p; (p = advance()) != null; )
3624 >                action.accept(new MapEntry<K,V>(p.key, p.val, map));
3625 >        }
3626  
3627 <        // Find power-of-two sizes best matching arguments
3628 <        int sshift = 0;
3629 <        int ssize = 1;
3630 <        while (ssize < segments) {
3631 <            ++sshift;
3632 <            ssize <<= 1;
3627 >        public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3628 >            if (action == null) throw new NullPointerException();
3629 >            Node<K,V> p;
3630 >            if ((p = advance()) == null)
3631 >                return false;
3632 >            action.accept(new MapEntry<K,V>(p.key, p.val, map));
3633 >            return true;
3634          }
494        segmentShift = 32 - sshift;
495        segmentMask = ssize - 1;
496        this.segments = new Segment[ssize];
3635  
3636 <        if (initialCapacity > MAXIMUM_CAPACITY)
3637 <            initialCapacity = MAXIMUM_CAPACITY;
3638 <        int c = initialCapacity / ssize;
3639 <        if (c * ssize < initialCapacity)
3640 <            ++c;
3641 <        int cap = 1;
3642 <        while (cap < c)
3643 <            cap <<= 1;
3636 >        public long estimateSize() { return est; }
3637 >
3638 >        public int characteristics() {
3639 >            return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3640 >                Spliterator.NONNULL;
3641 >        }
3642 >    }
3643 >
3644 >    // Parallel bulk operations
3645  
3646 <        for (int i = 0; i < this.segments.length; ++i)
3647 <            this.segments[i] = new Segment<K,V>(cap, loadFactor);
3646 >    /**
3647 >     * Computes initial batch value for bulk tasks. The returned value
3648 >     * is approximately exp2 of the number of times (minus one) to
3649 >     * split task by two before executing leaf action. This value is
3650 >     * faster to compute and more convenient to use as a guide to
3651 >     * splitting than is the depth, since it is used while dividing by
3652 >     * two anyway.
3653 >     */
3654 >    final int batchFor(long b) {
3655 >        long n;
3656 >        if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3657 >            return 0;
3658 >        int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3659 >        return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3660      }
3661  
3662      /**
3663 <     * Constructs a new, empty map with the specified initial
513 <     * capacity,  and with default load factor and segments.
3663 >     * Performs the given action for each (key, value).
3664       *
3665 <     * @param initialCapacity the initial capacity of the
3666 <     * ConcurrentHashMap.
3667 <     * @throws IllegalArgumentException if the initial capacity of
3668 <     * elements is negative.
3665 >     * @param parallelismThreshold the (estimated) number of elements
3666 >     * needed for this operation to be executed in parallel
3667 >     * @param action the action
3668 >     * @since 1.8
3669       */
3670 <    public ConcurrentHashMap(int initialCapacity) {
3671 <        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
3670 >    public void forEach(long parallelismThreshold,
3671 >                        BiConsumer<? super K,? super V> action) {
3672 >        if (action == null) throw new NullPointerException();
3673 >        new ForEachMappingTask<K,V>
3674 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3675 >             action).invoke();
3676      }
3677  
3678      /**
3679 <     * Constructs a new, empty map with a default initial capacity,
3680 <     * load factor, and number of segments.
3679 >     * Performs the given action for each non-null transformation
3680 >     * of each (key, value).
3681 >     *
3682 >     * @param parallelismThreshold the (estimated) number of elements
3683 >     * needed for this operation to be executed in parallel
3684 >     * @param transformer a function returning the transformation
3685 >     * for an element, or null if there is no transformation (in
3686 >     * which case the action is not applied)
3687 >     * @param action the action
3688 >     * @param <U> the return type of the transformer
3689 >     * @since 1.8
3690       */
3691 <    public ConcurrentHashMap() {
3692 <        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
3691 >    public <U> void forEach(long parallelismThreshold,
3692 >                            BiFunction<? super K, ? super V, ? extends U> transformer,
3693 >                            Consumer<? super U> action) {
3694 >        if (transformer == null || action == null)
3695 >            throw new NullPointerException();
3696 >        new ForEachTransformedMappingTask<K,V,U>
3697 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3698 >             transformer, action).invoke();
3699      }
3700  
3701      /**
3702 <     * Constructs a new map with the same mappings as the given map.  The
3703 <     * map is created with a capacity of twice the number of mappings in
3704 <     * the given map or 11 (whichever is greater), and a default load factor.
3702 >     * Returns a non-null result from applying the given search
3703 >     * function on each (key, value), or null if none.  Upon
3704 >     * success, further element processing is suppressed and the
3705 >     * results of any other parallel invocations of the search
3706 >     * function are ignored.
3707 >     *
3708 >     * @param parallelismThreshold the (estimated) number of elements
3709 >     * needed for this operation to be executed in parallel
3710 >     * @param searchFunction a function returning a non-null
3711 >     * result on success, else null
3712 >     * @param <U> the return type of the search function
3713 >     * @return a non-null result from applying the given search
3714 >     * function on each (key, value), or null if none
3715 >     * @since 1.8
3716       */
3717 <    public <A extends K, B extends V> ConcurrentHashMap(Map<A,B> t) {
3718 <        this(Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1,
3719 <                      11),
3720 <             DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
3721 <        putAll(t);
3717 >    public <U> U search(long parallelismThreshold,
3718 >                        BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3719 >        if (searchFunction == null) throw new NullPointerException();
3720 >        return new SearchMappingsTask<K,V,U>
3721 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3722 >             searchFunction, new AtomicReference<U>()).invoke();
3723      }
3724  
3725 <    // inherit Map javadoc
3726 <    public int size() {
3727 <        int c = 0;
3728 <        for (int i = 0; i < segments.length; ++i)
3729 <            c += segments[i].count;
3730 <        return c;
3725 >    /**
3726 >     * Returns the result of accumulating the given transformation
3727 >     * of all (key, value) pairs using the given reducer to
3728 >     * combine values, or null if none.
3729 >     *
3730 >     * @param parallelismThreshold the (estimated) number of elements
3731 >     * needed for this operation to be executed in parallel
3732 >     * @param transformer a function returning the transformation
3733 >     * for an element, or null if there is no transformation (in
3734 >     * which case it is not combined)
3735 >     * @param reducer a commutative associative combining function
3736 >     * @param <U> the return type of the transformer
3737 >     * @return the result of accumulating the given transformation
3738 >     * of all (key, value) pairs
3739 >     * @since 1.8
3740 >     */
3741 >    public <U> U reduce(long parallelismThreshold,
3742 >                        BiFunction<? super K, ? super V, ? extends U> transformer,
3743 >                        BiFunction<? super U, ? super U, ? extends U> reducer) {
3744 >        if (transformer == null || reducer == null)
3745 >            throw new NullPointerException();
3746 >        return new MapReduceMappingsTask<K,V,U>
3747 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3748 >             null, transformer, reducer).invoke();
3749      }
3750  
3751 <    // inherit Map javadoc
3752 <    public boolean isEmpty() {
3753 <        for (int i = 0; i < segments.length; ++i)
3754 <            if (segments[i].count != 0)
3755 <                return false;
3756 <        return true;
3751 >    /**
3752 >     * Returns the result of accumulating the given transformation
3753 >     * of all (key, value) pairs using the given reducer to
3754 >     * combine values, and the given basis as an identity value.
3755 >     *
3756 >     * @param parallelismThreshold the (estimated) number of elements
3757 >     * needed for this operation to be executed in parallel
3758 >     * @param transformer a function returning the transformation
3759 >     * for an element
3760 >     * @param basis the identity (initial default value) for the reduction
3761 >     * @param reducer a commutative associative combining function
3762 >     * @return the result of accumulating the given transformation
3763 >     * of all (key, value) pairs
3764 >     * @since 1.8
3765 >     */
3766 >    public double reduceToDouble(long parallelismThreshold,
3767 >                                 ToDoubleBiFunction<? super K, ? super V> transformer,
3768 >                                 double basis,
3769 >                                 DoubleBinaryOperator reducer) {
3770 >        if (transformer == null || reducer == null)
3771 >            throw new NullPointerException();
3772 >        return new MapReduceMappingsToDoubleTask<K,V>
3773 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3774 >             null, transformer, basis, reducer).invoke();
3775      }
3776  
3777      /**
3778 <     * Returns the value to which the specified key is mapped in this table.
3778 >     * Returns the result of accumulating the given transformation
3779 >     * of all (key, value) pairs using the given reducer to
3780 >     * combine values, and the given basis as an identity value.
3781       *
3782 <     * @param   key   a key in the table.
3783 <     * @return  the value to which the key is mapped in this table;
3784 <     *          <code>null</code> if the key is not mapped to any value in
3785 <     *          this table.
3786 <     * @throws  NullPointerException  if the key is
3787 <     *               <code>null</code>.
3788 <     * @see     #put(Object, Object)
3782 >     * @param parallelismThreshold the (estimated) number of elements
3783 >     * needed for this operation to be executed in parallel
3784 >     * @param transformer a function returning the transformation
3785 >     * for an element
3786 >     * @param basis the identity (initial default value) for the reduction
3787 >     * @param reducer a commutative associative combining function
3788 >     * @return the result of accumulating the given transformation
3789 >     * of all (key, value) pairs
3790 >     * @since 1.8
3791       */
3792 <    public V get(Object key) {
3793 <        int hash = hash(key); // throws NullPointerException if key null
3794 <        return segmentFor(hash).get((K) key, hash);
3792 >    public long reduceToLong(long parallelismThreshold,
3793 >                             ToLongBiFunction<? super K, ? super V> transformer,
3794 >                             long basis,
3795 >                             LongBinaryOperator reducer) {
3796 >        if (transformer == null || reducer == null)
3797 >            throw new NullPointerException();
3798 >        return new MapReduceMappingsToLongTask<K,V>
3799 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3800 >             null, transformer, basis, reducer).invoke();
3801      }
3802  
3803      /**
3804 <     * Tests if the specified object is a key in this table.
3804 >     * Returns the result of accumulating the given transformation
3805 >     * of all (key, value) pairs using the given reducer to
3806 >     * combine values, and the given basis as an identity value.
3807       *
3808 <     * @param   key   possible key.
3809 <     * @return  <code>true</code> if and only if the specified object
3810 <     *          is a key in this table, as determined by the
3811 <     *          <tt>equals</tt> method; <code>false</code> otherwise.
3812 <     * @throws  NullPointerException  if the key is
3813 <     *               <code>null</code>.
3814 <     * @see     #contains(Object)
3808 >     * @param parallelismThreshold the (estimated) number of elements
3809 >     * needed for this operation to be executed in parallel
3810 >     * @param transformer a function returning the transformation
3811 >     * for an element
3812 >     * @param basis the identity (initial default value) for the reduction
3813 >     * @param reducer a commutative associative combining function
3814 >     * @return the result of accumulating the given transformation
3815 >     * of all (key, value) pairs
3816 >     * @since 1.8
3817       */
3818 <    public boolean containsKey(Object key) {
3819 <        int hash = hash(key); // throws NullPointerException if key null
3820 <        return segmentFor(hash).containsKey(key, hash);
3818 >    public int reduceToInt(long parallelismThreshold,
3819 >                           ToIntBiFunction<? super K, ? super V> transformer,
3820 >                           int basis,
3821 >                           IntBinaryOperator reducer) {
3822 >        if (transformer == null || reducer == null)
3823 >            throw new NullPointerException();
3824 >        return new MapReduceMappingsToIntTask<K,V>
3825 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3826 >             null, transformer, basis, reducer).invoke();
3827      }
3828  
3829      /**
3830 <     * Returns <tt>true</tt> if this map maps one or more keys to the
594 <     * specified value. Note: This method requires a full internal
595 <     * traversal of the hash table, and so is much slower than
596 <     * method <tt>containsKey</tt>.
3830 >     * Performs the given action for each key.
3831       *
3832 <     * @param value value whose presence in this map is to be tested.
3833 <     * @return <tt>true</tt> if this map maps one or more keys to the
3834 <     * specified value.
3835 <     * @throws  NullPointerException  if the value is <code>null</code>.
3832 >     * @param parallelismThreshold the (estimated) number of elements
3833 >     * needed for this operation to be executed in parallel
3834 >     * @param action the action
3835 >     * @since 1.8
3836       */
3837 <    public boolean containsValue(Object value) {
3838 <        if (value == null)
3837 >    public void forEachKey(long parallelismThreshold,
3838 >                           Consumer<? super K> action) {
3839 >        if (action == null) throw new NullPointerException();
3840 >        new ForEachKeyTask<K,V>
3841 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3842 >             action).invoke();
3843 >    }
3844 >
3845 >    /**
3846 >     * Performs the given action for each non-null transformation
3847 >     * of each key.
3848 >     *
3849 >     * @param parallelismThreshold the (estimated) number of elements
3850 >     * needed for this operation to be executed in parallel
3851 >     * @param transformer a function returning the transformation
3852 >     * for an element, or null if there is no transformation (in
3853 >     * which case the action is not applied)
3854 >     * @param action the action
3855 >     * @param <U> the return type of the transformer
3856 >     * @since 1.8
3857 >     */
3858 >    public <U> void forEachKey(long parallelismThreshold,
3859 >                               Function<? super K, ? extends U> transformer,
3860 >                               Consumer<? super U> action) {
3861 >        if (transformer == null || action == null)
3862              throw new NullPointerException();
3863 +        new ForEachTransformedKeyTask<K,V,U>
3864 +            (null, batchFor(parallelismThreshold), 0, 0, table,
3865 +             transformer, action).invoke();
3866 +    }
3867  
3868 <        for (int i = 0; i < segments.length; ++i) {
3869 <            if (segments[i].containsValue(value))
3870 <                return true;
3871 <        }
3872 <        return false;
3868 >    /**
3869 >     * Returns a non-null result from applying the given search
3870 >     * function on each key, or null if none. Upon success,
3871 >     * further element processing is suppressed and the results of
3872 >     * any other parallel invocations of the search function are
3873 >     * ignored.
3874 >     *
3875 >     * @param parallelismThreshold the (estimated) number of elements
3876 >     * needed for this operation to be executed in parallel
3877 >     * @param searchFunction a function returning a non-null
3878 >     * result on success, else null
3879 >     * @param <U> the return type of the search function
3880 >     * @return a non-null result from applying the given search
3881 >     * function on each key, or null if none
3882 >     * @since 1.8
3883 >     */
3884 >    public <U> U searchKeys(long parallelismThreshold,
3885 >                            Function<? super K, ? extends U> searchFunction) {
3886 >        if (searchFunction == null) throw new NullPointerException();
3887 >        return new SearchKeysTask<K,V,U>
3888 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3889 >             searchFunction, new AtomicReference<U>()).invoke();
3890      }
3891 +
3892      /**
3893 <     * Tests if some key maps into the specified value in this table.
3894 <     * This operation is more expensive than the <code>containsKey</code>
616 <     * method.<p>
3893 >     * Returns the result of accumulating all keys using the given
3894 >     * reducer to combine values, or null if none.
3895       *
3896 <     * Note that this method is identical in functionality to containsValue,
3897 <     * (which is part of the Map interface in the collections framework).
3896 >     * @param parallelismThreshold the (estimated) number of elements
3897 >     * needed for this operation to be executed in parallel
3898 >     * @param reducer a commutative associative combining function
3899 >     * @return the result of accumulating all keys using the given
3900 >     * reducer to combine values, or null if none
3901 >     * @since 1.8
3902 >     */
3903 >    public K reduceKeys(long parallelismThreshold,
3904 >                        BiFunction<? super K, ? super K, ? extends K> reducer) {
3905 >        if (reducer == null) throw new NullPointerException();
3906 >        return new ReduceKeysTask<K,V>
3907 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3908 >             null, reducer).invoke();
3909 >    }
3910 >
3911 >    /**
3912 >     * Returns the result of accumulating the given transformation
3913 >     * of all keys using the given reducer to combine values, or
3914 >     * null if none.
3915       *
3916 <     * @param      value   a value to search for.
3917 <     * @return     <code>true</code> if and only if some key maps to the
3918 <     *             <code>value</code> argument in this table as
3919 <     *             determined by the <tt>equals</tt> method;
3920 <     *             <code>false</code> otherwise.
3921 <     * @throws  NullPointerException  if the value is <code>null</code>.
3922 <     * @see        #containsKey(Object)
3923 <     * @see        #containsValue(Object)
3924 <     * @see   Map
3916 >     * @param parallelismThreshold the (estimated) number of elements
3917 >     * needed for this operation to be executed in parallel
3918 >     * @param transformer a function returning the transformation
3919 >     * for an element, or null if there is no transformation (in
3920 >     * which case it is not combined)
3921 >     * @param reducer a commutative associative combining function
3922 >     * @param <U> the return type of the transformer
3923 >     * @return the result of accumulating the given transformation
3924 >     * of all keys
3925 >     * @since 1.8
3926       */
3927 <    public boolean contains(Object value) {
3928 <        return containsValue(value);
3927 >    public <U> U reduceKeys(long parallelismThreshold,
3928 >                            Function<? super K, ? extends U> transformer,
3929 >         BiFunction<? super U, ? super U, ? extends U> reducer) {
3930 >        if (transformer == null || reducer == null)
3931 >            throw new NullPointerException();
3932 >        return new MapReduceKeysTask<K,V,U>
3933 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3934 >             null, transformer, reducer).invoke();
3935      }
3936  
3937      /**
3938 <     * Maps the specified <code>key</code> to the specified
3939 <     * <code>value</code> in this table. Neither the key nor the
3940 <     * value can be <code>null</code>. <p>
3938 >     * Returns the result of accumulating the given transformation
3939 >     * of all keys using the given reducer to combine values, and
3940 >     * the given basis as an identity value.
3941       *
3942 <     * The value can be retrieved by calling the <code>get</code> method
3943 <     * with a key that is equal to the original key.
3942 >     * @param parallelismThreshold the (estimated) number of elements
3943 >     * needed for this operation to be executed in parallel
3944 >     * @param transformer a function returning the transformation
3945 >     * for an element
3946 >     * @param basis the identity (initial default value) for the reduction
3947 >     * @param reducer a commutative associative combining function
3948 >     * @return the result of accumulating the given transformation
3949 >     * of all keys
3950 >     * @since 1.8
3951 >     */
3952 >    public double reduceKeysToDouble(long parallelismThreshold,
3953 >                                     ToDoubleFunction<? super K> transformer,
3954 >                                     double basis,
3955 >                                     DoubleBinaryOperator reducer) {
3956 >        if (transformer == null || reducer == null)
3957 >            throw new NullPointerException();
3958 >        return new MapReduceKeysToDoubleTask<K,V>
3959 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3960 >             null, transformer, basis, reducer).invoke();
3961 >    }
3962 >
3963 >    /**
3964 >     * Returns the result of accumulating the given transformation
3965 >     * of all keys using the given reducer to combine values, and
3966 >     * the given basis as an identity value.
3967       *
3968 <     * @param      key     the table key.
3969 <     * @param      value   the value.
3970 <     * @return     the previous value of the specified key in this table,
3971 <     *             or <code>null</code> if it did not have one.
3972 <     * @throws  NullPointerException  if the key or value is
3973 <     *               <code>null</code>.
3974 <     * @see     Object#equals(Object)
3975 <     * @see     #get(Object)
3968 >     * @param parallelismThreshold the (estimated) number of elements
3969 >     * needed for this operation to be executed in parallel
3970 >     * @param transformer a function returning the transformation
3971 >     * for an element
3972 >     * @param basis the identity (initial default value) for the reduction
3973 >     * @param reducer a commutative associative combining function
3974 >     * @return the result of accumulating the given transformation
3975 >     * of all keys
3976 >     * @since 1.8
3977       */
3978 <    public V put(K key, V value) {
3979 <        if (value == null)
3978 >    public long reduceKeysToLong(long parallelismThreshold,
3979 >                                 ToLongFunction<? super K> transformer,
3980 >                                 long basis,
3981 >                                 LongBinaryOperator reducer) {
3982 >        if (transformer == null || reducer == null)
3983              throw new NullPointerException();
3984 <        int hash = hash(key);
3985 <        return segmentFor(hash).put(key, hash, value, false);
3984 >        return new MapReduceKeysToLongTask<K,V>
3985 >            (null, batchFor(parallelismThreshold), 0, 0, table,
3986 >             null, transformer, basis, reducer).invoke();
3987      }
3988  
3989      /**
3990 <     * If the specified key is not already associated
3991 <     * with a value, associate it with the given value.
3992 <     * This is equivalent to
663 <     * <pre>
664 <     *   if (!map.containsKey(key)) map.put(key, value);
665 <     *   return get(key);
666 <     * </pre>
667 <     * Except that the action is performed atomically.
668 <     * @param key key with which the specified value is to be associated.
669 <     * @param value value to be associated with the specified key.
670 <     * @return previous value associated with specified key, or <tt>null</tt>
671 <     *         if there was no mapping for key.  A <tt>null</tt> return can
672 <     *         also indicate that the map previously associated <tt>null</tt>
673 <     *         with the specified key, if the implementation supports
674 <     *         <tt>null</tt> values.
675 <     *
676 <     * @throws NullPointerException this map does not permit <tt>null</tt>
677 <     *            keys or values, and the specified key or value is
678 <     *            <tt>null</tt>.
3990 >     * Returns the result of accumulating the given transformation
3991 >     * of all keys using the given reducer to combine values, and
3992 >     * the given basis as an identity value.
3993       *
3994 <     **/
3995 <    public V putIfAbsent(K key, V value) {
3996 <        if (value == null)
3994 >     * @param parallelismThreshold the (estimated) number of elements
3995 >     * needed for this operation to be executed in parallel
3996 >     * @param transformer a function returning the transformation
3997 >     * for an element
3998 >     * @param basis the identity (initial default value) for the reduction
3999 >     * @param reducer a commutative associative combining function
4000 >     * @return the result of accumulating the given transformation
4001 >     * of all keys
4002 >     * @since 1.8
4003 >     */
4004 >    public int reduceKeysToInt(long parallelismThreshold,
4005 >                               ToIntFunction<? super K> transformer,
4006 >                               int basis,
4007 >                               IntBinaryOperator reducer) {
4008 >        if (transformer == null || reducer == null)
4009              throw new NullPointerException();
4010 <        int hash = hash(key);
4011 <        return segmentFor(hash).put(key, hash, value, true);
4010 >        return new MapReduceKeysToIntTask<K,V>
4011 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4012 >             null, transformer, basis, reducer).invoke();
4013      }
4014  
4015 +    /**
4016 +     * Performs the given action for each value.
4017 +     *
4018 +     * @param parallelismThreshold the (estimated) number of elements
4019 +     * needed for this operation to be executed in parallel
4020 +     * @param action the action
4021 +     * @since 1.8
4022 +     */
4023 +    public void forEachValue(long parallelismThreshold,
4024 +                             Consumer<? super V> action) {
4025 +        if (action == null)
4026 +            throw new NullPointerException();
4027 +        new ForEachValueTask<K,V>
4028 +            (null, batchFor(parallelismThreshold), 0, 0, table,
4029 +             action).invoke();
4030 +    }
4031  
4032      /**
4033 <     * Copies all of the mappings from the specified map to this one.
4033 >     * Performs the given action for each non-null transformation
4034 >     * of each value.
4035       *
4036 <     * These mappings replace any mappings that this map had for any of the
4037 <     * keys currently in the specified Map.
4036 >     * @param parallelismThreshold the (estimated) number of elements
4037 >     * needed for this operation to be executed in parallel
4038 >     * @param transformer a function returning the transformation
4039 >     * for an element, or null if there is no transformation (in
4040 >     * which case the action is not applied)
4041 >     * @param action the action
4042 >     * @param <U> the return type of the transformer
4043 >     * @since 1.8
4044 >     */
4045 >    public <U> void forEachValue(long parallelismThreshold,
4046 >                                 Function<? super V, ? extends U> transformer,
4047 >                                 Consumer<? super U> action) {
4048 >        if (transformer == null || action == null)
4049 >            throw new NullPointerException();
4050 >        new ForEachTransformedValueTask<K,V,U>
4051 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4052 >             transformer, action).invoke();
4053 >    }
4054 >
4055 >    /**
4056 >     * Returns a non-null result from applying the given search
4057 >     * function on each value, or null if none.  Upon success,
4058 >     * further element processing is suppressed and the results of
4059 >     * any other parallel invocations of the search function are
4060 >     * ignored.
4061       *
4062 <     * @param t Mappings to be stored in this map.
4062 >     * @param parallelismThreshold the (estimated) number of elements
4063 >     * needed for this operation to be executed in parallel
4064 >     * @param searchFunction a function returning a non-null
4065 >     * result on success, else null
4066 >     * @param <U> the return type of the search function
4067 >     * @return a non-null result from applying the given search
4068 >     * function on each value, or null if none
4069 >     * @since 1.8
4070       */
4071 <    public void putAll(Map<? extends K, ? extends V> t) {
4072 <        Iterator<Map.Entry<? extends K, ? extends V>> it = t.entrySet().iterator();
4073 <        while (it.hasNext()) {
4074 <            Entry<? extends K, ? extends V> e = it.next();
4075 <            put(e.getKey(), e.getValue());
4076 <        }
4071 >    public <U> U searchValues(long parallelismThreshold,
4072 >                              Function<? super V, ? extends U> searchFunction) {
4073 >        if (searchFunction == null) throw new NullPointerException();
4074 >        return new SearchValuesTask<K,V,U>
4075 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4076 >             searchFunction, new AtomicReference<U>()).invoke();
4077      }
4078  
4079      /**
4080 <     * Removes the key (and its corresponding value) from this
4081 <     * table. This method does nothing if the key is not in the table.
4080 >     * Returns the result of accumulating all values using the
4081 >     * given reducer to combine values, or null if none.
4082       *
4083 <     * @param   key   the key that needs to be removed.
4084 <     * @return  the value to which the key had been mapped in this table,
4085 <     *          or <code>null</code> if the key did not have a mapping.
4086 <     * @throws  NullPointerException  if the key is
4087 <     *               <code>null</code>.
4083 >     * @param parallelismThreshold the (estimated) number of elements
4084 >     * needed for this operation to be executed in parallel
4085 >     * @param reducer a commutative associative combining function
4086 >     * @return the result of accumulating all values
4087 >     * @since 1.8
4088       */
4089 <    public V remove(Object key) {
4090 <        int hash = hash(key);
4091 <        return segmentFor(hash).remove(key, hash, null);
4089 >    public V reduceValues(long parallelismThreshold,
4090 >                          BiFunction<? super V, ? super V, ? extends V> reducer) {
4091 >        if (reducer == null) throw new NullPointerException();
4092 >        return new ReduceValuesTask<K,V>
4093 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4094 >             null, reducer).invoke();
4095      }
4096  
4097      /**
4098 <     * Removes the (key, value) pair from this
4099 <     * table. This method does nothing if the key is not in the table,
4100 <     * or if the key is associated with a different value.
4101 <     *
4102 <     * @param   key   the key that needs to be removed.
4103 <     * @param   value   the associated value. If the value is null,
4104 <     *                   it means "any value".
4105 <     * @return  the value to which the key had been mapped in this table,
4106 <     *          or <code>null</code> if the key did not have a mapping.
4107 <     * @throws  NullPointerException  if the key is
4108 <     *               <code>null</code>.
4098 >     * Returns the result of accumulating the given transformation
4099 >     * of all values using the given reducer to combine values, or
4100 >     * null if none.
4101 >     *
4102 >     * @param parallelismThreshold the (estimated) number of elements
4103 >     * needed for this operation to be executed in parallel
4104 >     * @param transformer a function returning the transformation
4105 >     * for an element, or null if there is no transformation (in
4106 >     * which case it is not combined)
4107 >     * @param reducer a commutative associative combining function
4108 >     * @param <U> the return type of the transformer
4109 >     * @return the result of accumulating the given transformation
4110 >     * of all values
4111 >     * @since 1.8
4112       */
4113 <    public boolean remove(Object key, Object value) {
4114 <        int hash = hash(key);
4115 <        return segmentFor(hash).remove(key, hash, value) != null;
4113 >    public <U> U reduceValues(long parallelismThreshold,
4114 >                              Function<? super V, ? extends U> transformer,
4115 >                              BiFunction<? super U, ? super U, ? extends U> reducer) {
4116 >        if (transformer == null || reducer == null)
4117 >            throw new NullPointerException();
4118 >        return new MapReduceValuesTask<K,V,U>
4119 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4120 >             null, transformer, reducer).invoke();
4121      }
4122  
4123      /**
4124 <     * Removes all mappings from this map.
4124 >     * Returns the result of accumulating the given transformation
4125 >     * of all values using the given reducer to combine values,
4126 >     * and the given basis as an identity value.
4127 >     *
4128 >     * @param parallelismThreshold the (estimated) number of elements
4129 >     * needed for this operation to be executed in parallel
4130 >     * @param transformer a function returning the transformation
4131 >     * for an element
4132 >     * @param basis the identity (initial default value) for the reduction
4133 >     * @param reducer a commutative associative combining function
4134 >     * @return the result of accumulating the given transformation
4135 >     * of all values
4136 >     * @since 1.8
4137       */
4138 <    public void clear() {
4139 <        for (int i = 0; i < segments.length; ++i)
4140 <            segments[i].clear();
4138 >    public double reduceValuesToDouble(long parallelismThreshold,
4139 >                                       ToDoubleFunction<? super V> transformer,
4140 >                                       double basis,
4141 >                                       DoubleBinaryOperator reducer) {
4142 >        if (transformer == null || reducer == null)
4143 >            throw new NullPointerException();
4144 >        return new MapReduceValuesToDoubleTask<K,V>
4145 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4146 >             null, transformer, basis, reducer).invoke();
4147      }
4148  
4149 +    /**
4150 +     * Returns the result of accumulating the given transformation
4151 +     * of all values using the given reducer to combine values,
4152 +     * and the given basis as an identity value.
4153 +     *
4154 +     * @param parallelismThreshold the (estimated) number of elements
4155 +     * needed for this operation to be executed in parallel
4156 +     * @param transformer a function returning the transformation
4157 +     * for an element
4158 +     * @param basis the identity (initial default value) for the reduction
4159 +     * @param reducer a commutative associative combining function
4160 +     * @return the result of accumulating the given transformation
4161 +     * of all values
4162 +     * @since 1.8
4163 +     */
4164 +    public long reduceValuesToLong(long parallelismThreshold,
4165 +                                   ToLongFunction<? super V> transformer,
4166 +                                   long basis,
4167 +                                   LongBinaryOperator reducer) {
4168 +        if (transformer == null || reducer == null)
4169 +            throw new NullPointerException();
4170 +        return new MapReduceValuesToLongTask<K,V>
4171 +            (null, batchFor(parallelismThreshold), 0, 0, table,
4172 +             null, transformer, basis, reducer).invoke();
4173 +    }
4174  
4175      /**
4176 <     * Returns a shallow copy of this
4177 <     * <tt>ConcurrentHashMap</tt> instance: the keys and
4178 <     * values themselves are not cloned.
4176 >     * Returns the result of accumulating the given transformation
4177 >     * of all values using the given reducer to combine values,
4178 >     * and the given basis as an identity value.
4179       *
4180 <     * @return a shallow copy of this map.
4180 >     * @param parallelismThreshold the (estimated) number of elements
4181 >     * needed for this operation to be executed in parallel
4182 >     * @param transformer a function returning the transformation
4183 >     * for an element
4184 >     * @param basis the identity (initial default value) for the reduction
4185 >     * @param reducer a commutative associative combining function
4186 >     * @return the result of accumulating the given transformation
4187 >     * of all values
4188 >     * @since 1.8
4189       */
4190 <    public Object clone() {
4191 <        // We cannot call super.clone, since it would share final
4192 <        // segments array, and there's no way to reassign finals.
4190 >    public int reduceValuesToInt(long parallelismThreshold,
4191 >                                 ToIntFunction<? super V> transformer,
4192 >                                 int basis,
4193 >                                 IntBinaryOperator reducer) {
4194 >        if (transformer == null || reducer == null)
4195 >            throw new NullPointerException();
4196 >        return new MapReduceValuesToIntTask<K,V>
4197 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4198 >             null, transformer, basis, reducer).invoke();
4199 >    }
4200  
4201 <        float lf = segments[0].loadFactor;
4202 <        int segs = segments.length;
4203 <        int cap = (int)(size() / lf);
4204 <        if (cap < segs) cap = segs;
4205 <        ConcurrentHashMap<K,V> t = new ConcurrentHashMap<K,V>(cap, lf, segs);
4206 <        t.putAll(this);
4207 <        return t;
4201 >    /**
4202 >     * Performs the given action for each entry.
4203 >     *
4204 >     * @param parallelismThreshold the (estimated) number of elements
4205 >     * needed for this operation to be executed in parallel
4206 >     * @param action the action
4207 >     * @since 1.8
4208 >     */
4209 >    public void forEachEntry(long parallelismThreshold,
4210 >                             Consumer<? super Map.Entry<K,V>> action) {
4211 >        if (action == null) throw new NullPointerException();
4212 >        new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4213 >                                  action).invoke();
4214      }
4215  
4216      /**
4217 <     * Returns a set view of the keys contained in this map.  The set is
4218 <     * backed by the map, so changes to the map are reflected in the set, and
770 <     * vice-versa.  The set supports element removal, which removes the
771 <     * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
772 <     * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
773 <     * <tt>clear</tt> operations.  It does not support the <tt>add</tt> or
774 <     * <tt>addAll</tt> operations.
775 <     * The returned <tt>iterator</tt> is a "weakly consistent" iterator that
776 <     * will never throw {@link java.util.ConcurrentModificationException},
777 <     * and guarantees to traverse elements as they existed upon
778 <     * construction of the iterator, and may (but is not guaranteed to)
779 <     * reflect any modifications subsequent to construction.
4217 >     * Performs the given action for each non-null transformation
4218 >     * of each entry.
4219       *
4220 <     * @return a set view of the keys contained in this map.
4220 >     * @param parallelismThreshold the (estimated) number of elements
4221 >     * needed for this operation to be executed in parallel
4222 >     * @param transformer a function returning the transformation
4223 >     * for an element, or null if there is no transformation (in
4224 >     * which case the action is not applied)
4225 >     * @param action the action
4226 >     * @param <U> the return type of the transformer
4227 >     * @since 1.8
4228       */
4229 <    public Set<K> keySet() {
4230 <        Set<K> ks = keySet;
4231 <        return (ks != null) ? ks : (keySet = new KeySet());
4229 >    public <U> void forEachEntry(long parallelismThreshold,
4230 >                                 Function<Map.Entry<K,V>, ? extends U> transformer,
4231 >                                 Consumer<? super U> action) {
4232 >        if (transformer == null || action == null)
4233 >            throw new NullPointerException();
4234 >        new ForEachTransformedEntryTask<K,V,U>
4235 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4236 >             transformer, action).invoke();
4237      }
4238  
4239 +    /**
4240 +     * Returns a non-null result from applying the given search
4241 +     * function on each entry, or null if none.  Upon success,
4242 +     * further element processing is suppressed and the results of
4243 +     * any other parallel invocations of the search function are
4244 +     * ignored.
4245 +     *
4246 +     * @param parallelismThreshold the (estimated) number of elements
4247 +     * needed for this operation to be executed in parallel
4248 +     * @param searchFunction a function returning a non-null
4249 +     * result on success, else null
4250 +     * @param <U> the return type of the search function
4251 +     * @return a non-null result from applying the given search
4252 +     * function on each entry, or null if none
4253 +     * @since 1.8
4254 +     */
4255 +    public <U> U searchEntries(long parallelismThreshold,
4256 +                               Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4257 +        if (searchFunction == null) throw new NullPointerException();
4258 +        return new SearchEntriesTask<K,V,U>
4259 +            (null, batchFor(parallelismThreshold), 0, 0, table,
4260 +             searchFunction, new AtomicReference<U>()).invoke();
4261 +    }
4262  
4263      /**
4264 <     * Returns a collection view of the values contained in this map.  The
4265 <     * collection is backed by the map, so changes to the map are reflected in
792 <     * the collection, and vice-versa.  The collection supports element
793 <     * removal, which removes the corresponding mapping from this map, via the
794 <     * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
795 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
796 <     * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
797 <     * The returned <tt>iterator</tt> is a "weakly consistent" iterator that
798 <     * will never throw {@link java.util.ConcurrentModificationException},
799 <     * and guarantees to traverse elements as they existed upon
800 <     * construction of the iterator, and may (but is not guaranteed to)
801 <     * reflect any modifications subsequent to construction.
4264 >     * Returns the result of accumulating all entries using the
4265 >     * given reducer to combine values, or null if none.
4266       *
4267 <     * @return a collection view of the values contained in this map.
4267 >     * @param parallelismThreshold the (estimated) number of elements
4268 >     * needed for this operation to be executed in parallel
4269 >     * @param reducer a commutative associative combining function
4270 >     * @return the result of accumulating all entries
4271 >     * @since 1.8
4272       */
4273 <    public Collection<V> values() {
4274 <        Collection<V> vs = values;
4275 <        return (vs != null) ? vs : (values = new Values());
4273 >    public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4274 >                                        BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4275 >        if (reducer == null) throw new NullPointerException();
4276 >        return new ReduceEntriesTask<K,V>
4277 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4278 >             null, reducer).invoke();
4279      }
4280  
4281 +    /**
4282 +     * Returns the result of accumulating the given transformation
4283 +     * of all entries using the given reducer to combine values,
4284 +     * or null if none.
4285 +     *
4286 +     * @param parallelismThreshold the (estimated) number of elements
4287 +     * needed for this operation to be executed in parallel
4288 +     * @param transformer a function returning the transformation
4289 +     * for an element, or null if there is no transformation (in
4290 +     * which case it is not combined)
4291 +     * @param reducer a commutative associative combining function
4292 +     * @param <U> the return type of the transformer
4293 +     * @return the result of accumulating the given transformation
4294 +     * of all entries
4295 +     * @since 1.8
4296 +     */
4297 +    public <U> U reduceEntries(long parallelismThreshold,
4298 +                               Function<Map.Entry<K,V>, ? extends U> transformer,
4299 +                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4300 +        if (transformer == null || reducer == null)
4301 +            throw new NullPointerException();
4302 +        return new MapReduceEntriesTask<K,V,U>
4303 +            (null, batchFor(parallelismThreshold), 0, 0, table,
4304 +             null, transformer, reducer).invoke();
4305 +    }
4306  
4307      /**
4308 <     * Returns a collection view of the mappings contained in this map.  Each
4309 <     * element in the returned collection is a <tt>Map.Entry</tt>.  The
4310 <     * collection is backed by the map, so changes to the map are reflected in
815 <     * the collection, and vice-versa.  The collection supports element
816 <     * removal, which removes the corresponding mapping from the map, via the
817 <     * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
818 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
819 <     * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
820 <     * The returned <tt>iterator</tt> is a "weakly consistent" iterator that
821 <     * will never throw {@link java.util.ConcurrentModificationException},
822 <     * and guarantees to traverse elements as they existed upon
823 <     * construction of the iterator, and may (but is not guaranteed to)
824 <     * reflect any modifications subsequent to construction.
4308 >     * Returns the result of accumulating the given transformation
4309 >     * of all entries using the given reducer to combine values,
4310 >     * and the given basis as an identity value.
4311       *
4312 <     * @return a collection view of the mappings contained in this map.
4312 >     * @param parallelismThreshold the (estimated) number of elements
4313 >     * needed for this operation to be executed in parallel
4314 >     * @param transformer a function returning the transformation
4315 >     * for an element
4316 >     * @param basis the identity (initial default value) for the reduction
4317 >     * @param reducer a commutative associative combining function
4318 >     * @return the result of accumulating the given transformation
4319 >     * of all entries
4320 >     * @since 1.8
4321       */
4322 <    public Set<Map.Entry<K,V>> entrySet() {
4323 <        Set<Map.Entry<K,V>> es = entrySet;
4324 <        return (es != null) ? es : (entrySet = new EntrySet());
4322 >    public double reduceEntriesToDouble(long parallelismThreshold,
4323 >                                        ToDoubleFunction<Map.Entry<K,V>> transformer,
4324 >                                        double basis,
4325 >                                        DoubleBinaryOperator reducer) {
4326 >        if (transformer == null || reducer == null)
4327 >            throw new NullPointerException();
4328 >        return new MapReduceEntriesToDoubleTask<K,V>
4329 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4330 >             null, transformer, basis, reducer).invoke();
4331      }
4332  
4333 +    /**
4334 +     * Returns the result of accumulating the given transformation
4335 +     * of all entries using the given reducer to combine values,
4336 +     * and the given basis as an identity value.
4337 +     *
4338 +     * @param parallelismThreshold the (estimated) number of elements
4339 +     * needed for this operation to be executed in parallel
4340 +     * @param transformer a function returning the transformation
4341 +     * for an element
4342 +     * @param basis the identity (initial default value) for the reduction
4343 +     * @param reducer a commutative associative combining function
4344 +     * @return the result of accumulating the given transformation
4345 +     * of all entries
4346 +     * @since 1.8
4347 +     */
4348 +    public long reduceEntriesToLong(long parallelismThreshold,
4349 +                                    ToLongFunction<Map.Entry<K,V>> transformer,
4350 +                                    long basis,
4351 +                                    LongBinaryOperator reducer) {
4352 +        if (transformer == null || reducer == null)
4353 +            throw new NullPointerException();
4354 +        return new MapReduceEntriesToLongTask<K,V>
4355 +            (null, batchFor(parallelismThreshold), 0, 0, table,
4356 +             null, transformer, basis, reducer).invoke();
4357 +    }
4358  
4359      /**
4360 <     * Returns an enumeration of the keys in this table.
4360 >     * Returns the result of accumulating the given transformation
4361 >     * of all entries using the given reducer to combine values,
4362 >     * and the given basis as an identity value.
4363       *
4364 <     * @return  an enumeration of the keys in this table.
4365 <     * @see     Enumeration
4366 <     * @see     #elements()
4367 <     * @see     #keySet()
4368 <     * @see     Map
4364 >     * @param parallelismThreshold the (estimated) number of elements
4365 >     * needed for this operation to be executed in parallel
4366 >     * @param transformer a function returning the transformation
4367 >     * for an element
4368 >     * @param basis the identity (initial default value) for the reduction
4369 >     * @param reducer a commutative associative combining function
4370 >     * @return the result of accumulating the given transformation
4371 >     * of all entries
4372 >     * @since 1.8
4373       */
4374 <    public Enumeration<K> keys() {
4375 <        return new KeyIterator();
4374 >    public int reduceEntriesToInt(long parallelismThreshold,
4375 >                                  ToIntFunction<Map.Entry<K,V>> transformer,
4376 >                                  int basis,
4377 >                                  IntBinaryOperator reducer) {
4378 >        if (transformer == null || reducer == null)
4379 >            throw new NullPointerException();
4380 >        return new MapReduceEntriesToIntTask<K,V>
4381 >            (null, batchFor(parallelismThreshold), 0, 0, table,
4382 >             null, transformer, basis, reducer).invoke();
4383      }
4384  
4385 +
4386 +    /* ----------------Views -------------- */
4387 +
4388      /**
4389 <     * Returns an enumeration of the values in this table.
4390 <     * Use the Enumeration methods on the returned object to fetch the elements
4391 <     * sequentially.
4389 >     * Base class for views.
4390 >     */
4391 >    abstract static class CollectionView<K,V,E>
4392 >        implements Collection<E>, java.io.Serializable {
4393 >        private static final long serialVersionUID = 7249069246763182397L;
4394 >        final ConcurrentHashMap<K,V> map;
4395 >        CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4396 >
4397 >        /**
4398 >         * Returns the map backing this view.
4399 >         *
4400 >         * @return the map backing this view
4401 >         */
4402 >        public ConcurrentHashMap<K,V> getMap() { return map; }
4403 >
4404 >        /**
4405 >         * Removes all of the elements from this view, by removing all
4406 >         * the mappings from the map backing this view.
4407 >         */
4408 >        public final void clear()      { map.clear(); }
4409 >        public final int size()        { return map.size(); }
4410 >        public final boolean isEmpty() { return map.isEmpty(); }
4411 >
4412 >        // implementations below rely on concrete classes supplying these
4413 >        // abstract methods
4414 >        /**
4415 >         * Returns an iterator over the elements in this collection.
4416 >         *
4417 >         * <p>The returned iterator is
4418 >         * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4419 >         *
4420 >         * @return an iterator over the elements in this collection
4421 >         */
4422 >        public abstract Iterator<E> iterator();
4423 >        public abstract boolean contains(Object o);
4424 >        public abstract boolean remove(Object o);
4425 >
4426 >        private static final String OOME_MSG = "Required array size too large";
4427 >
4428 >        public final Object[] toArray() {
4429 >            long sz = map.mappingCount();
4430 >            if (sz > MAX_ARRAY_SIZE)
4431 >                throw new OutOfMemoryError(OOME_MSG);
4432 >            int n = (int)sz;
4433 >            Object[] r = new Object[n];
4434 >            int i = 0;
4435 >            for (E e : this) {
4436 >                if (i == n) {
4437 >                    if (n >= MAX_ARRAY_SIZE)
4438 >                        throw new OutOfMemoryError(OOME_MSG);
4439 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4440 >                        n = MAX_ARRAY_SIZE;
4441 >                    else
4442 >                        n += (n >>> 1) + 1;
4443 >                    r = Arrays.copyOf(r, n);
4444 >                }
4445 >                r[i++] = e;
4446 >            }
4447 >            return (i == n) ? r : Arrays.copyOf(r, i);
4448 >        }
4449 >
4450 >        @SuppressWarnings("unchecked")
4451 >        public final <T> T[] toArray(T[] a) {
4452 >            long sz = map.mappingCount();
4453 >            if (sz > MAX_ARRAY_SIZE)
4454 >                throw new OutOfMemoryError(OOME_MSG);
4455 >            int m = (int)sz;
4456 >            T[] r = (a.length >= m) ? a :
4457 >                (T[])java.lang.reflect.Array
4458 >                .newInstance(a.getClass().getComponentType(), m);
4459 >            int n = r.length;
4460 >            int i = 0;
4461 >            for (E e : this) {
4462 >                if (i == n) {
4463 >                    if (n >= MAX_ARRAY_SIZE)
4464 >                        throw new OutOfMemoryError(OOME_MSG);
4465 >                    if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4466 >                        n = MAX_ARRAY_SIZE;
4467 >                    else
4468 >                        n += (n >>> 1) + 1;
4469 >                    r = Arrays.copyOf(r, n);
4470 >                }
4471 >                r[i++] = (T)e;
4472 >            }
4473 >            if (a == r && i < n) {
4474 >                r[i] = null; // null-terminate
4475 >                return r;
4476 >            }
4477 >            return (i == n) ? r : Arrays.copyOf(r, i);
4478 >        }
4479 >
4480 >        /**
4481 >         * Returns a string representation of this collection.
4482 >         * The string representation consists of the string representations
4483 >         * of the collection's elements in the order they are returned by
4484 >         * its iterator, enclosed in square brackets ({@code "[]"}).
4485 >         * Adjacent elements are separated by the characters {@code ", "}
4486 >         * (comma and space).  Elements are converted to strings as by
4487 >         * {@link String#valueOf(Object)}.
4488 >         *
4489 >         * @return a string representation of this collection
4490 >         */
4491 >        public final String toString() {
4492 >            StringBuilder sb = new StringBuilder();
4493 >            sb.append('[');
4494 >            Iterator<E> it = iterator();
4495 >            if (it.hasNext()) {
4496 >                for (;;) {
4497 >                    Object e = it.next();
4498 >                    sb.append(e == this ? "(this Collection)" : e);
4499 >                    if (!it.hasNext())
4500 >                        break;
4501 >                    sb.append(',').append(' ');
4502 >                }
4503 >            }
4504 >            return sb.append(']').toString();
4505 >        }
4506 >
4507 >        public final boolean containsAll(Collection<?> c) {
4508 >            if (c != this) {
4509 >                for (Object e : c) {
4510 >                    if (e == null || !contains(e))
4511 >                        return false;
4512 >                }
4513 >            }
4514 >            return true;
4515 >        }
4516 >
4517 >        public final boolean removeAll(Collection<?> c) {
4518 >            if (c == null) throw new NullPointerException();
4519 >            boolean modified = false;
4520 >            for (Iterator<E> it = iterator(); it.hasNext();) {
4521 >                if (c.contains(it.next())) {
4522 >                    it.remove();
4523 >                    modified = true;
4524 >                }
4525 >            }
4526 >            return modified;
4527 >        }
4528 >
4529 >        public final boolean retainAll(Collection<?> c) {
4530 >            if (c == null) throw new NullPointerException();
4531 >            boolean modified = false;
4532 >            for (Iterator<E> it = iterator(); it.hasNext();) {
4533 >                if (!c.contains(it.next())) {
4534 >                    it.remove();
4535 >                    modified = true;
4536 >                }
4537 >            }
4538 >            return modified;
4539 >        }
4540 >
4541 >    }
4542 >
4543 >    /**
4544 >     * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4545 >     * which additions may optionally be enabled by mapping to a
4546 >     * common value.  This class cannot be directly instantiated.
4547 >     * See {@link #keySet() keySet()},
4548 >     * {@link #keySet(Object) keySet(V)},
4549 >     * {@link #newKeySet() newKeySet()},
4550 >     * {@link #newKeySet(int) newKeySet(int)}.
4551       *
4552 <     * @return  an enumeration of the values in this table.
853 <     * @see     java.util.Enumeration
854 <     * @see     #keys()
855 <     * @see     #values()
856 <     * @see     Map
4552 >     * @since 1.8
4553       */
4554 <    public Enumeration<V> elements() {
4555 <        return new ValueIterator();
4554 >    public static class KeySetView<K,V> extends CollectionView<K,V,K>
4555 >        implements Set<K>, java.io.Serializable {
4556 >        private static final long serialVersionUID = 7249069246763182397L;
4557 >        private final V value;
4558 >        KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4559 >            super(map);
4560 >            this.value = value;
4561 >        }
4562 >
4563 >        /**
4564 >         * Returns the default mapped value for additions,
4565 >         * or {@code null} if additions are not supported.
4566 >         *
4567 >         * @return the default mapped value for additions, or {@code null}
4568 >         * if not supported
4569 >         */
4570 >        public V getMappedValue() { return value; }
4571 >
4572 >        /**
4573 >         * {@inheritDoc}
4574 >         * @throws NullPointerException if the specified key is null
4575 >         */
4576 >        public boolean contains(Object o) { return map.containsKey(o); }
4577 >
4578 >        /**
4579 >         * Removes the key from this map view, by removing the key (and its
4580 >         * corresponding value) from the backing map.  This method does
4581 >         * nothing if the key is not in the map.
4582 >         *
4583 >         * @param  o the key to be removed from the backing map
4584 >         * @return {@code true} if the backing map contained the specified key
4585 >         * @throws NullPointerException if the specified key is null
4586 >         */
4587 >        public boolean remove(Object o) { return map.remove(o) != null; }
4588 >
4589 >        /**
4590 >         * @return an iterator over the keys of the backing map
4591 >         */
4592 >        public Iterator<K> iterator() {
4593 >            Node<K,V>[] t;
4594 >            ConcurrentHashMap<K,V> m = map;
4595 >            int f = (t = m.table) == null ? 0 : t.length;
4596 >            return new KeyIterator<K,V>(t, f, 0, f, m);
4597 >        }
4598 >
4599 >        /**
4600 >         * Adds the specified key to this set view by mapping the key to
4601 >         * the default mapped value in the backing map, if defined.
4602 >         *
4603 >         * @param e key to be added
4604 >         * @return {@code true} if this set changed as a result of the call
4605 >         * @throws NullPointerException if the specified key is null
4606 >         * @throws UnsupportedOperationException if no default mapped value
4607 >         * for additions was provided
4608 >         */
4609 >        public boolean add(K e) {
4610 >            V v;
4611 >            if ((v = value) == null)
4612 >                throw new UnsupportedOperationException();
4613 >            return map.putVal(e, v, true) == null;
4614 >        }
4615 >
4616 >        /**
4617 >         * Adds all of the elements in the specified collection to this set,
4618 >         * as if by calling {@link #add} on each one.
4619 >         *
4620 >         * @param c the elements to be inserted into this set
4621 >         * @return {@code true} if this set changed as a result of the call
4622 >         * @throws NullPointerException if the collection or any of its
4623 >         * elements are {@code null}
4624 >         * @throws UnsupportedOperationException if no default mapped value
4625 >         * for additions was provided
4626 >         */
4627 >        public boolean addAll(Collection<? extends K> c) {
4628 >            boolean added = false;
4629 >            V v;
4630 >            if ((v = value) == null)
4631 >                throw new UnsupportedOperationException();
4632 >            for (K e : c) {
4633 >                if (map.putVal(e, v, true) == null)
4634 >                    added = true;
4635 >            }
4636 >            return added;
4637 >        }
4638 >
4639 >        public int hashCode() {
4640 >            int h = 0;
4641 >            for (K e : this)
4642 >                h += e.hashCode();
4643 >            return h;
4644 >        }
4645 >
4646 >        public boolean equals(Object o) {
4647 >            Set<?> c;
4648 >            return ((o instanceof Set) &&
4649 >                    ((c = (Set<?>)o) == this ||
4650 >                     (containsAll(c) && c.containsAll(this))));
4651 >        }
4652 >
4653 >        public Spliterator<K> spliterator() {
4654 >            Node<K,V>[] t;
4655 >            ConcurrentHashMap<K,V> m = map;
4656 >            long n = m.sumCount();
4657 >            int f = (t = m.table) == null ? 0 : t.length;
4658 >            return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4659 >        }
4660 >
4661 >        public void forEach(Consumer<? super K> action) {
4662 >            if (action == null) throw new NullPointerException();
4663 >            Node<K,V>[] t;
4664 >            if ((t = map.table) != null) {
4665 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4666 >                for (Node<K,V> p; (p = it.advance()) != null; )
4667 >                    action.accept(p.key);
4668 >            }
4669 >        }
4670 >    }
4671 >
4672 >    /**
4673 >     * A view of a ConcurrentHashMap as a {@link Collection} of
4674 >     * values, in which additions are disabled. This class cannot be
4675 >     * directly instantiated. See {@link #values()}.
4676 >     */
4677 >    static final class ValuesView<K,V> extends CollectionView<K,V,V>
4678 >        implements Collection<V>, java.io.Serializable {
4679 >        private static final long serialVersionUID = 2249069246763182397L;
4680 >        ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4681 >        public final boolean contains(Object o) {
4682 >            return map.containsValue(o);
4683 >        }
4684 >
4685 >        public final boolean remove(Object o) {
4686 >            if (o != null) {
4687 >                for (Iterator<V> it = iterator(); it.hasNext();) {
4688 >                    if (o.equals(it.next())) {
4689 >                        it.remove();
4690 >                        return true;
4691 >                    }
4692 >                }
4693 >            }
4694 >            return false;
4695 >        }
4696 >
4697 >        public final Iterator<V> iterator() {
4698 >            ConcurrentHashMap<K,V> m = map;
4699 >            Node<K,V>[] t;
4700 >            int f = (t = m.table) == null ? 0 : t.length;
4701 >            return new ValueIterator<K,V>(t, f, 0, f, m);
4702 >        }
4703 >
4704 >        public final boolean add(V e) {
4705 >            throw new UnsupportedOperationException();
4706 >        }
4707 >        public final boolean addAll(Collection<? extends V> c) {
4708 >            throw new UnsupportedOperationException();
4709 >        }
4710 >
4711 >        public boolean removeIf(Predicate<? super V> filter) {
4712 >            return map.removeValueIf(filter);
4713 >        }
4714 >
4715 >        public Spliterator<V> spliterator() {
4716 >            Node<K,V>[] t;
4717 >            ConcurrentHashMap<K,V> m = map;
4718 >            long n = m.sumCount();
4719 >            int f = (t = m.table) == null ? 0 : t.length;
4720 >            return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4721 >        }
4722 >
4723 >        public void forEach(Consumer<? super V> action) {
4724 >            if (action == null) throw new NullPointerException();
4725 >            Node<K,V>[] t;
4726 >            if ((t = map.table) != null) {
4727 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4728 >                for (Node<K,V> p; (p = it.advance()) != null; )
4729 >                    action.accept(p.val);
4730 >            }
4731 >        }
4732      }
4733  
4734 <    /* ---------------- Iterator Support -------------- */
4734 >    /**
4735 >     * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4736 >     * entries.  This class cannot be directly instantiated. See
4737 >     * {@link #entrySet()}.
4738 >     */
4739 >    static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4740 >        implements Set<Map.Entry<K,V>>, java.io.Serializable {
4741 >        private static final long serialVersionUID = 2249069246763182397L;
4742 >        EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4743 >
4744 >        public boolean contains(Object o) {
4745 >            Object k, v, r; Map.Entry<?,?> e;
4746 >            return ((o instanceof Map.Entry) &&
4747 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4748 >                    (r = map.get(k)) != null &&
4749 >                    (v = e.getValue()) != null &&
4750 >                    (v == r || v.equals(r)));
4751 >        }
4752  
4753 <    private abstract class HashIterator {
4754 <        private int nextSegmentIndex;
4755 <        private int nextTableIndex;
4756 <        private HashEntry[] currentTable;
4757 <        private HashEntry<K, V> nextEntry;
4758 <        private HashEntry<K, V> lastReturned;
4753 >        public boolean remove(Object o) {
4754 >            Object k, v; Map.Entry<?,?> e;
4755 >            return ((o instanceof Map.Entry) &&
4756 >                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4757 >                    (v = e.getValue()) != null &&
4758 >                    map.remove(k, v));
4759 >        }
4760  
4761 <        private HashIterator() {
4762 <            nextSegmentIndex = segments.length - 1;
4763 <            nextTableIndex = -1;
4764 <            advance();
4761 >        /**
4762 >         * @return an iterator over the entries of the backing map
4763 >         */
4764 >        public Iterator<Map.Entry<K,V>> iterator() {
4765 >            ConcurrentHashMap<K,V> m = map;
4766 >            Node<K,V>[] t;
4767 >            int f = (t = m.table) == null ? 0 : t.length;
4768 >            return new EntryIterator<K,V>(t, f, 0, f, m);
4769          }
4770  
4771 <        public boolean hasMoreElements() { return hasNext(); }
4771 >        public boolean add(Entry<K,V> e) {
4772 >            return map.putVal(e.getKey(), e.getValue(), false) == null;
4773 >        }
4774  
4775 <        private void advance() {
4776 <            if (nextEntry != null && (nextEntry = nextEntry.next) != null)
4777 <                return;
4775 >        public boolean addAll(Collection<? extends Entry<K,V>> c) {
4776 >            boolean added = false;
4777 >            for (Entry<K,V> e : c) {
4778 >                if (add(e))
4779 >                    added = true;
4780 >            }
4781 >            return added;
4782 >        }
4783  
4784 <            while (nextTableIndex >= 0) {
4785 <                if ( (nextEntry = (HashEntry<K,V>)currentTable[nextTableIndex--]) != null)
4786 <                    return;
4784 >        public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4785 >            return map.removeEntryIf(filter);
4786 >        }
4787 >
4788 >        public final int hashCode() {
4789 >            int h = 0;
4790 >            Node<K,V>[] t;
4791 >            if ((t = map.table) != null) {
4792 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4793 >                for (Node<K,V> p; (p = it.advance()) != null; ) {
4794 >                    h += p.hashCode();
4795 >                }
4796              }
4797 +            return h;
4798 +        }
4799  
4800 <            while (nextSegmentIndex >= 0) {
4801 <                Segment<K,V> seg = (Segment<K,V>)segments[nextSegmentIndex--];
4802 <                if (seg.count != 0) {
4803 <                    currentTable = seg.table;
4804 <                    for (int j = currentTable.length - 1; j >= 0; --j) {
4805 <                        if ( (nextEntry = (HashEntry<K,V>)currentTable[j]) != null) {
4806 <                            nextTableIndex = j - 1;
4807 <                            return;
4808 <                        }
4800 >        public final boolean equals(Object o) {
4801 >            Set<?> c;
4802 >            return ((o instanceof Set) &&
4803 >                    ((c = (Set<?>)o) == this ||
4804 >                     (containsAll(c) && c.containsAll(this))));
4805 >        }
4806 >
4807 >        public Spliterator<Map.Entry<K,V>> spliterator() {
4808 >            Node<K,V>[] t;
4809 >            ConcurrentHashMap<K,V> m = map;
4810 >            long n = m.sumCount();
4811 >            int f = (t = m.table) == null ? 0 : t.length;
4812 >            return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4813 >        }
4814 >
4815 >        public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4816 >            if (action == null) throw new NullPointerException();
4817 >            Node<K,V>[] t;
4818 >            if ((t = map.table) != null) {
4819 >                Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4820 >                for (Node<K,V> p; (p = it.advance()) != null; )
4821 >                    action.accept(new MapEntry<K,V>(p.key, p.val, map));
4822 >            }
4823 >        }
4824 >
4825 >    }
4826 >
4827 >    // -------------------------------------------------------
4828 >
4829 >    /**
4830 >     * Base class for bulk tasks. Repeats some fields and code from
4831 >     * class Traverser, because we need to subclass CountedCompleter.
4832 >     */
4833 >    @SuppressWarnings("serial")
4834 >    abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4835 >        Node<K,V>[] tab;        // same as Traverser
4836 >        Node<K,V> next;
4837 >        TableStack<K,V> stack, spare;
4838 >        int index;
4839 >        int baseIndex;
4840 >        int baseLimit;
4841 >        final int baseSize;
4842 >        int batch;              // split control
4843 >
4844 >        BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4845 >            super(par);
4846 >            this.batch = b;
4847 >            this.index = this.baseIndex = i;
4848 >            if ((this.tab = t) == null)
4849 >                this.baseSize = this.baseLimit = 0;
4850 >            else if (par == null)
4851 >                this.baseSize = this.baseLimit = t.length;
4852 >            else {
4853 >                this.baseLimit = f;
4854 >                this.baseSize = par.baseSize;
4855 >            }
4856 >        }
4857 >
4858 >        /**
4859 >         * Same as Traverser version.
4860 >         */
4861 >        final Node<K,V> advance() {
4862 >            Node<K,V> e;
4863 >            if ((e = next) != null)
4864 >                e = e.next;
4865 >            for (;;) {
4866 >                Node<K,V>[] t; int i, n;
4867 >                if (e != null)
4868 >                    return next = e;
4869 >                if (baseIndex >= baseLimit || (t = tab) == null ||
4870 >                    (n = t.length) <= (i = index) || i < 0)
4871 >                    return next = null;
4872 >                if ((e = tabAt(t, i)) != null && e.hash < 0) {
4873 >                    if (e instanceof ForwardingNode) {
4874 >                        tab = ((ForwardingNode<K,V>)e).nextTable;
4875 >                        e = null;
4876 >                        pushState(t, i, n);
4877 >                        continue;
4878                      }
4879 +                    else if (e instanceof TreeBin)
4880 +                        e = ((TreeBin<K,V>)e).first;
4881 +                    else
4882 +                        e = null;
4883                  }
4884 +                if (stack != null)
4885 +                    recoverState(n);
4886 +                else if ((index = i + baseSize) >= n)
4887 +                    index = ++baseIndex;
4888              }
4889          }
4890  
4891 <        public boolean hasNext() { return nextEntry != null; }
4891 >        private void pushState(Node<K,V>[] t, int i, int n) {
4892 >            TableStack<K,V> s = spare;
4893 >            if (s != null)
4894 >                spare = s.next;
4895 >            else
4896 >                s = new TableStack<K,V>();
4897 >            s.tab = t;
4898 >            s.length = n;
4899 >            s.index = i;
4900 >            s.next = stack;
4901 >            stack = s;
4902 >        }
4903  
4904 <        HashEntry<K,V> nextEntry() {
4905 <            if (nextEntry == null)
4906 <                throw new NoSuchElementException();
4907 <            lastReturned = nextEntry;
4908 <            advance();
4909 <            return lastReturned;
4904 >        private void recoverState(int n) {
4905 >            TableStack<K,V> s; int len;
4906 >            while ((s = stack) != null && (index += (len = s.length)) >= n) {
4907 >                n = len;
4908 >                index = s.index;
4909 >                tab = s.tab;
4910 >                s.tab = null;
4911 >                TableStack<K,V> next = s.next;
4912 >                s.next = spare; // save for reuse
4913 >                stack = next;
4914 >                spare = s;
4915 >            }
4916 >            if (s == null && (index += baseSize) >= n)
4917 >                index = ++baseIndex;
4918          }
4919 +    }
4920  
4921 <        public void remove() {
4922 <            if (lastReturned == null)
4923 <                throw new IllegalStateException();
4924 <            ConcurrentHashMap.this.remove(lastReturned.key);
4925 <            lastReturned = null;
4921 >    /*
4922 >     * Task classes. Coded in a regular but ugly format/style to
4923 >     * simplify checks that each variant differs in the right way from
4924 >     * others. The null screenings exist because compilers cannot tell
4925 >     * that we've already null-checked task arguments, so we force
4926 >     * simplest hoisted bypass to help avoid convoluted traps.
4927 >     */
4928 >    @SuppressWarnings("serial")
4929 >    static final class ForEachKeyTask<K,V>
4930 >        extends BulkTask<K,V,Void> {
4931 >        final Consumer<? super K> action;
4932 >        ForEachKeyTask
4933 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4934 >             Consumer<? super K> action) {
4935 >            super(p, b, i, f, t);
4936 >            this.action = action;
4937 >        }
4938 >        public final void compute() {
4939 >            final Consumer<? super K> action;
4940 >            if ((action = this.action) != null) {
4941 >                for (int i = baseIndex, f, h; batch > 0 &&
4942 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4943 >                    addToPendingCount(1);
4944 >                    new ForEachKeyTask<K,V>
4945 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4946 >                         action).fork();
4947 >                }
4948 >                for (Node<K,V> p; (p = advance()) != null;)
4949 >                    action.accept(p.key);
4950 >                propagateCompletion();
4951 >            }
4952          }
4953      }
4954  
4955 <    private class KeyIterator extends HashIterator implements Iterator<K>, Enumeration<K> {
4956 <        public K next() { return super.nextEntry().key; }
4957 <        public K nextElement() { return super.nextEntry().key; }
4955 >    @SuppressWarnings("serial")
4956 >    static final class ForEachValueTask<K,V>
4957 >        extends BulkTask<K,V,Void> {
4958 >        final Consumer<? super V> action;
4959 >        ForEachValueTask
4960 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4961 >             Consumer<? super V> action) {
4962 >            super(p, b, i, f, t);
4963 >            this.action = action;
4964 >        }
4965 >        public final void compute() {
4966 >            final Consumer<? super V> action;
4967 >            if ((action = this.action) != null) {
4968 >                for (int i = baseIndex, f, h; batch > 0 &&
4969 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4970 >                    addToPendingCount(1);
4971 >                    new ForEachValueTask<K,V>
4972 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
4973 >                         action).fork();
4974 >                }
4975 >                for (Node<K,V> p; (p = advance()) != null;)
4976 >                    action.accept(p.val);
4977 >                propagateCompletion();
4978 >            }
4979 >        }
4980      }
4981  
4982 <    private class ValueIterator extends HashIterator implements Iterator<V>, Enumeration<V> {
4983 <        public V next() { return super.nextEntry().value; }
4984 <        public V nextElement() { return super.nextEntry().value; }
4982 >    @SuppressWarnings("serial")
4983 >    static final class ForEachEntryTask<K,V>
4984 >        extends BulkTask<K,V,Void> {
4985 >        final Consumer<? super Entry<K,V>> action;
4986 >        ForEachEntryTask
4987 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4988 >             Consumer<? super Entry<K,V>> action) {
4989 >            super(p, b, i, f, t);
4990 >            this.action = action;
4991 >        }
4992 >        public final void compute() {
4993 >            final Consumer<? super Entry<K,V>> action;
4994 >            if ((action = this.action) != null) {
4995 >                for (int i = baseIndex, f, h; batch > 0 &&
4996 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
4997 >                    addToPendingCount(1);
4998 >                    new ForEachEntryTask<K,V>
4999 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5000 >                         action).fork();
5001 >                }
5002 >                for (Node<K,V> p; (p = advance()) != null; )
5003 >                    action.accept(p);
5004 >                propagateCompletion();
5005 >            }
5006 >        }
5007      }
5008  
5009 <    private class EntryIterator extends HashIterator implements Iterator<Entry<K,V>> {
5010 <        public Map.Entry<K,V> next() { return super.nextEntry(); }
5009 >    @SuppressWarnings("serial")
5010 >    static final class ForEachMappingTask<K,V>
5011 >        extends BulkTask<K,V,Void> {
5012 >        final BiConsumer<? super K, ? super V> action;
5013 >        ForEachMappingTask
5014 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5015 >             BiConsumer<? super K,? super V> action) {
5016 >            super(p, b, i, f, t);
5017 >            this.action = action;
5018 >        }
5019 >        public final void compute() {
5020 >            final BiConsumer<? super K, ? super V> action;
5021 >            if ((action = this.action) != null) {
5022 >                for (int i = baseIndex, f, h; batch > 0 &&
5023 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5024 >                    addToPendingCount(1);
5025 >                    new ForEachMappingTask<K,V>
5026 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5027 >                         action).fork();
5028 >                }
5029 >                for (Node<K,V> p; (p = advance()) != null; )
5030 >                    action.accept(p.key, p.val);
5031 >                propagateCompletion();
5032 >            }
5033 >        }
5034      }
5035  
5036 <    private class KeySet extends AbstractSet<K> {
5037 <        public Iterator<K> iterator() {
5038 <            return new KeyIterator();
5036 >    @SuppressWarnings("serial")
5037 >    static final class ForEachTransformedKeyTask<K,V,U>
5038 >        extends BulkTask<K,V,Void> {
5039 >        final Function<? super K, ? extends U> transformer;
5040 >        final Consumer<? super U> action;
5041 >        ForEachTransformedKeyTask
5042 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5043 >             Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5044 >            super(p, b, i, f, t);
5045 >            this.transformer = transformer; this.action = action;
5046          }
5047 <        public int size() {
5048 <            return ConcurrentHashMap.this.size();
5047 >        public final void compute() {
5048 >            final Function<? super K, ? extends U> transformer;
5049 >            final Consumer<? super U> action;
5050 >            if ((transformer = this.transformer) != null &&
5051 >                (action = this.action) != null) {
5052 >                for (int i = baseIndex, f, h; batch > 0 &&
5053 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5054 >                    addToPendingCount(1);
5055 >                    new ForEachTransformedKeyTask<K,V,U>
5056 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5057 >                         transformer, action).fork();
5058 >                }
5059 >                for (Node<K,V> p; (p = advance()) != null; ) {
5060 >                    U u;
5061 >                    if ((u = transformer.apply(p.key)) != null)
5062 >                        action.accept(u);
5063 >                }
5064 >                propagateCompletion();
5065 >            }
5066          }
5067 <        public boolean contains(Object o) {
5068 <            return ConcurrentHashMap.this.containsKey(o);
5067 >    }
5068 >
5069 >    @SuppressWarnings("serial")
5070 >    static final class ForEachTransformedValueTask<K,V,U>
5071 >        extends BulkTask<K,V,Void> {
5072 >        final Function<? super V, ? extends U> transformer;
5073 >        final Consumer<? super U> action;
5074 >        ForEachTransformedValueTask
5075 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5076 >             Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5077 >            super(p, b, i, f, t);
5078 >            this.transformer = transformer; this.action = action;
5079          }
5080 <        public boolean remove(Object o) {
5081 <            return ConcurrentHashMap.this.remove(o) != null;
5080 >        public final void compute() {
5081 >            final Function<? super V, ? extends U> transformer;
5082 >            final Consumer<? super U> action;
5083 >            if ((transformer = this.transformer) != null &&
5084 >                (action = this.action) != null) {
5085 >                for (int i = baseIndex, f, h; batch > 0 &&
5086 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5087 >                    addToPendingCount(1);
5088 >                    new ForEachTransformedValueTask<K,V,U>
5089 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5090 >                         transformer, action).fork();
5091 >                }
5092 >                for (Node<K,V> p; (p = advance()) != null; ) {
5093 >                    U u;
5094 >                    if ((u = transformer.apply(p.val)) != null)
5095 >                        action.accept(u);
5096 >                }
5097 >                propagateCompletion();
5098 >            }
5099          }
5100 <        public void clear() {
5101 <            ConcurrentHashMap.this.clear();
5100 >    }
5101 >
5102 >    @SuppressWarnings("serial")
5103 >    static final class ForEachTransformedEntryTask<K,V,U>
5104 >        extends BulkTask<K,V,Void> {
5105 >        final Function<Map.Entry<K,V>, ? extends U> transformer;
5106 >        final Consumer<? super U> action;
5107 >        ForEachTransformedEntryTask
5108 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5109 >             Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5110 >            super(p, b, i, f, t);
5111 >            this.transformer = transformer; this.action = action;
5112 >        }
5113 >        public final void compute() {
5114 >            final Function<Map.Entry<K,V>, ? extends U> transformer;
5115 >            final Consumer<? super U> action;
5116 >            if ((transformer = this.transformer) != null &&
5117 >                (action = this.action) != null) {
5118 >                for (int i = baseIndex, f, h; batch > 0 &&
5119 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5120 >                    addToPendingCount(1);
5121 >                    new ForEachTransformedEntryTask<K,V,U>
5122 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5123 >                         transformer, action).fork();
5124 >                }
5125 >                for (Node<K,V> p; (p = advance()) != null; ) {
5126 >                    U u;
5127 >                    if ((u = transformer.apply(p)) != null)
5128 >                        action.accept(u);
5129 >                }
5130 >                propagateCompletion();
5131 >            }
5132          }
5133      }
5134  
5135 <    private class Values extends AbstractCollection<V> {
5136 <        public Iterator<V> iterator() {
5137 <            return new ValueIterator();
5135 >    @SuppressWarnings("serial")
5136 >    static final class ForEachTransformedMappingTask<K,V,U>
5137 >        extends BulkTask<K,V,Void> {
5138 >        final BiFunction<? super K, ? super V, ? extends U> transformer;
5139 >        final Consumer<? super U> action;
5140 >        ForEachTransformedMappingTask
5141 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5142 >             BiFunction<? super K, ? super V, ? extends U> transformer,
5143 >             Consumer<? super U> action) {
5144 >            super(p, b, i, f, t);
5145 >            this.transformer = transformer; this.action = action;
5146          }
5147 <        public int size() {
5148 <            return ConcurrentHashMap.this.size();
5147 >        public final void compute() {
5148 >            final BiFunction<? super K, ? super V, ? extends U> transformer;
5149 >            final Consumer<? super U> action;
5150 >            if ((transformer = this.transformer) != null &&
5151 >                (action = this.action) != null) {
5152 >                for (int i = baseIndex, f, h; batch > 0 &&
5153 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5154 >                    addToPendingCount(1);
5155 >                    new ForEachTransformedMappingTask<K,V,U>
5156 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5157 >                         transformer, action).fork();
5158 >                }
5159 >                for (Node<K,V> p; (p = advance()) != null; ) {
5160 >                    U u;
5161 >                    if ((u = transformer.apply(p.key, p.val)) != null)
5162 >                        action.accept(u);
5163 >                }
5164 >                propagateCompletion();
5165 >            }
5166          }
5167 <        public boolean contains(Object o) {
5168 <            return ConcurrentHashMap.this.containsValue(o);
5167 >    }
5168 >
5169 >    @SuppressWarnings("serial")
5170 >    static final class SearchKeysTask<K,V,U>
5171 >        extends BulkTask<K,V,U> {
5172 >        final Function<? super K, ? extends U> searchFunction;
5173 >        final AtomicReference<U> result;
5174 >        SearchKeysTask
5175 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5176 >             Function<? super K, ? extends U> searchFunction,
5177 >             AtomicReference<U> result) {
5178 >            super(p, b, i, f, t);
5179 >            this.searchFunction = searchFunction; this.result = result;
5180          }
5181 <        public void clear() {
5182 <            ConcurrentHashMap.this.clear();
5181 >        public final U getRawResult() { return result.get(); }
5182 >        public final void compute() {
5183 >            final Function<? super K, ? extends U> searchFunction;
5184 >            final AtomicReference<U> result;
5185 >            if ((searchFunction = this.searchFunction) != null &&
5186 >                (result = this.result) != null) {
5187 >                for (int i = baseIndex, f, h; batch > 0 &&
5188 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5189 >                    if (result.get() != null)
5190 >                        return;
5191 >                    addToPendingCount(1);
5192 >                    new SearchKeysTask<K,V,U>
5193 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5194 >                         searchFunction, result).fork();
5195 >                }
5196 >                while (result.get() == null) {
5197 >                    U u;
5198 >                    Node<K,V> p;
5199 >                    if ((p = advance()) == null) {
5200 >                        propagateCompletion();
5201 >                        break;
5202 >                    }
5203 >                    if ((u = searchFunction.apply(p.key)) != null) {
5204 >                        if (result.compareAndSet(null, u))
5205 >                            quietlyCompleteRoot();
5206 >                        break;
5207 >                    }
5208 >                }
5209 >            }
5210          }
5211      }
5212  
5213 <    private class EntrySet extends AbstractSet<Map.Entry<K,V>> {
5214 <        public Iterator<Map.Entry<K,V>> iterator() {
5215 <            return new EntryIterator();
5213 >    @SuppressWarnings("serial")
5214 >    static final class SearchValuesTask<K,V,U>
5215 >        extends BulkTask<K,V,U> {
5216 >        final Function<? super V, ? extends U> searchFunction;
5217 >        final AtomicReference<U> result;
5218 >        SearchValuesTask
5219 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5220 >             Function<? super V, ? extends U> searchFunction,
5221 >             AtomicReference<U> result) {
5222 >            super(p, b, i, f, t);
5223 >            this.searchFunction = searchFunction; this.result = result;
5224          }
5225 <        public boolean contains(Object o) {
5226 <            if (!(o instanceof Map.Entry))
5227 <                return false;
5228 <            Map.Entry<K,V> e = (Map.Entry<K,V>)o;
5229 <            V v = ConcurrentHashMap.this.get(e.getKey());
5230 <            return v != null && v.equals(e.getValue());
5225 >        public final U getRawResult() { return result.get(); }
5226 >        public final void compute() {
5227 >            final Function<? super V, ? extends U> searchFunction;
5228 >            final AtomicReference<U> result;
5229 >            if ((searchFunction = this.searchFunction) != null &&
5230 >                (result = this.result) != null) {
5231 >                for (int i = baseIndex, f, h; batch > 0 &&
5232 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5233 >                    if (result.get() != null)
5234 >                        return;
5235 >                    addToPendingCount(1);
5236 >                    new SearchValuesTask<K,V,U>
5237 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5238 >                         searchFunction, result).fork();
5239 >                }
5240 >                while (result.get() == null) {
5241 >                    U u;
5242 >                    Node<K,V> p;
5243 >                    if ((p = advance()) == null) {
5244 >                        propagateCompletion();
5245 >                        break;
5246 >                    }
5247 >                    if ((u = searchFunction.apply(p.val)) != null) {
5248 >                        if (result.compareAndSet(null, u))
5249 >                            quietlyCompleteRoot();
5250 >                        break;
5251 >                    }
5252 >                }
5253 >            }
5254          }
5255 <        public boolean remove(Object o) {
5256 <            if (!(o instanceof Map.Entry))
5257 <                return false;
5258 <            Map.Entry<K,V> e = (Map.Entry<K,V>)o;
5259 <            return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
5255 >    }
5256 >
5257 >    @SuppressWarnings("serial")
5258 >    static final class SearchEntriesTask<K,V,U>
5259 >        extends BulkTask<K,V,U> {
5260 >        final Function<Entry<K,V>, ? extends U> searchFunction;
5261 >        final AtomicReference<U> result;
5262 >        SearchEntriesTask
5263 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5264 >             Function<Entry<K,V>, ? extends U> searchFunction,
5265 >             AtomicReference<U> result) {
5266 >            super(p, b, i, f, t);
5267 >            this.searchFunction = searchFunction; this.result = result;
5268 >        }
5269 >        public final U getRawResult() { return result.get(); }
5270 >        public final void compute() {
5271 >            final Function<Entry<K,V>, ? extends U> searchFunction;
5272 >            final AtomicReference<U> result;
5273 >            if ((searchFunction = this.searchFunction) != null &&
5274 >                (result = this.result) != null) {
5275 >                for (int i = baseIndex, f, h; batch > 0 &&
5276 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5277 >                    if (result.get() != null)
5278 >                        return;
5279 >                    addToPendingCount(1);
5280 >                    new SearchEntriesTask<K,V,U>
5281 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5282 >                         searchFunction, result).fork();
5283 >                }
5284 >                while (result.get() == null) {
5285 >                    U u;
5286 >                    Node<K,V> p;
5287 >                    if ((p = advance()) == null) {
5288 >                        propagateCompletion();
5289 >                        break;
5290 >                    }
5291 >                    if ((u = searchFunction.apply(p)) != null) {
5292 >                        if (result.compareAndSet(null, u))
5293 >                            quietlyCompleteRoot();
5294 >                        return;
5295 >                    }
5296 >                }
5297 >            }
5298          }
5299 <        public int size() {
5300 <            return ConcurrentHashMap.this.size();
5299 >    }
5300 >
5301 >    @SuppressWarnings("serial")
5302 >    static final class SearchMappingsTask<K,V,U>
5303 >        extends BulkTask<K,V,U> {
5304 >        final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5305 >        final AtomicReference<U> result;
5306 >        SearchMappingsTask
5307 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5308 >             BiFunction<? super K, ? super V, ? extends U> searchFunction,
5309 >             AtomicReference<U> result) {
5310 >            super(p, b, i, f, t);
5311 >            this.searchFunction = searchFunction; this.result = result;
5312          }
5313 <        public void clear() {
5314 <            ConcurrentHashMap.this.clear();
5313 >        public final U getRawResult() { return result.get(); }
5314 >        public final void compute() {
5315 >            final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5316 >            final AtomicReference<U> result;
5317 >            if ((searchFunction = this.searchFunction) != null &&
5318 >                (result = this.result) != null) {
5319 >                for (int i = baseIndex, f, h; batch > 0 &&
5320 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5321 >                    if (result.get() != null)
5322 >                        return;
5323 >                    addToPendingCount(1);
5324 >                    new SearchMappingsTask<K,V,U>
5325 >                        (this, batch >>>= 1, baseLimit = h, f, tab,
5326 >                         searchFunction, result).fork();
5327 >                }
5328 >                while (result.get() == null) {
5329 >                    U u;
5330 >                    Node<K,V> p;
5331 >                    if ((p = advance()) == null) {
5332 >                        propagateCompletion();
5333 >                        break;
5334 >                    }
5335 >                    if ((u = searchFunction.apply(p.key, p.val)) != null) {
5336 >                        if (result.compareAndSet(null, u))
5337 >                            quietlyCompleteRoot();
5338 >                        break;
5339 >                    }
5340 >                }
5341 >            }
5342          }
5343      }
5344  
5345 <    /* ---------------- Serialization Support -------------- */
5345 >    @SuppressWarnings("serial")
5346 >    static final class ReduceKeysTask<K,V>
5347 >        extends BulkTask<K,V,K> {
5348 >        final BiFunction<? super K, ? super K, ? extends K> reducer;
5349 >        K result;
5350 >        ReduceKeysTask<K,V> rights, nextRight;
5351 >        ReduceKeysTask
5352 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5353 >             ReduceKeysTask<K,V> nextRight,
5354 >             BiFunction<? super K, ? super K, ? extends K> reducer) {
5355 >            super(p, b, i, f, t); this.nextRight = nextRight;
5356 >            this.reducer = reducer;
5357 >        }
5358 >        public final K getRawResult() { return result; }
5359 >        public final void compute() {
5360 >            final BiFunction<? super K, ? super K, ? extends K> reducer;
5361 >            if ((reducer = this.reducer) != null) {
5362 >                for (int i = baseIndex, f, h; batch > 0 &&
5363 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5364 >                    addToPendingCount(1);
5365 >                    (rights = new ReduceKeysTask<K,V>
5366 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5367 >                      rights, reducer)).fork();
5368 >                }
5369 >                K r = null;
5370 >                for (Node<K,V> p; (p = advance()) != null; ) {
5371 >                    K u = p.key;
5372 >                    r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5373 >                }
5374 >                result = r;
5375 >                CountedCompleter<?> c;
5376 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5377 >                    @SuppressWarnings("unchecked")
5378 >                    ReduceKeysTask<K,V>
5379 >                        t = (ReduceKeysTask<K,V>)c,
5380 >                        s = t.rights;
5381 >                    while (s != null) {
5382 >                        K tr, sr;
5383 >                        if ((sr = s.result) != null)
5384 >                            t.result = (((tr = t.result) == null) ? sr :
5385 >                                        reducer.apply(tr, sr));
5386 >                        s = t.rights = s.nextRight;
5387 >                    }
5388 >                }
5389 >            }
5390 >        }
5391 >    }
5392  
5393 <    /**
5394 <     * Save the state of the <tt>ConcurrentHashMap</tt>
5395 <     * instance to a stream (i.e.,
5396 <     * serialize it).
5397 <     * @param s the stream
5398 <     * @serialData
5399 <     * the key (Object) and value (Object)
5400 <     * for each key-value mapping, followed by a null pair.
5401 <     * The key-value mappings are emitted in no particular order.
5402 <     */
5403 <    private void writeObject(java.io.ObjectOutputStream s) throws IOException  {
5404 <        s.defaultWriteObject();
5393 >    @SuppressWarnings("serial")
5394 >    static final class ReduceValuesTask<K,V>
5395 >        extends BulkTask<K,V,V> {
5396 >        final BiFunction<? super V, ? super V, ? extends V> reducer;
5397 >        V result;
5398 >        ReduceValuesTask<K,V> rights, nextRight;
5399 >        ReduceValuesTask
5400 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5401 >             ReduceValuesTask<K,V> nextRight,
5402 >             BiFunction<? super V, ? super V, ? extends V> reducer) {
5403 >            super(p, b, i, f, t); this.nextRight = nextRight;
5404 >            this.reducer = reducer;
5405 >        }
5406 >        public final V getRawResult() { return result; }
5407 >        public final void compute() {
5408 >            final BiFunction<? super V, ? super V, ? extends V> reducer;
5409 >            if ((reducer = this.reducer) != null) {
5410 >                for (int i = baseIndex, f, h; batch > 0 &&
5411 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5412 >                    addToPendingCount(1);
5413 >                    (rights = new ReduceValuesTask<K,V>
5414 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5415 >                      rights, reducer)).fork();
5416 >                }
5417 >                V r = null;
5418 >                for (Node<K,V> p; (p = advance()) != null; ) {
5419 >                    V v = p.val;
5420 >                    r = (r == null) ? v : reducer.apply(r, v);
5421 >                }
5422 >                result = r;
5423 >                CountedCompleter<?> c;
5424 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5425 >                    @SuppressWarnings("unchecked")
5426 >                    ReduceValuesTask<K,V>
5427 >                        t = (ReduceValuesTask<K,V>)c,
5428 >                        s = t.rights;
5429 >                    while (s != null) {
5430 >                        V tr, sr;
5431 >                        if ((sr = s.result) != null)
5432 >                            t.result = (((tr = t.result) == null) ? sr :
5433 >                                        reducer.apply(tr, sr));
5434 >                        s = t.rights = s.nextRight;
5435 >                    }
5436 >                }
5437 >            }
5438 >        }
5439 >    }
5440  
5441 <        for (int k = 0; k < segments.length; ++k) {
5442 <            Segment<K,V> seg = (Segment<K,V>)segments[k];
5443 <            seg.lock();
5444 <            try {
5445 <                HashEntry[] tab = seg.table;
5446 <                for (int i = 0; i < tab.length; ++i) {
5447 <                    for (HashEntry<K,V> e = (HashEntry<K,V>)tab[i]; e != null; e = e.next) {
5448 <                        s.writeObject(e.key);
5449 <                        s.writeObject(e.value);
5441 >    @SuppressWarnings("serial")
5442 >    static final class ReduceEntriesTask<K,V>
5443 >        extends BulkTask<K,V,Map.Entry<K,V>> {
5444 >        final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5445 >        Map.Entry<K,V> result;
5446 >        ReduceEntriesTask<K,V> rights, nextRight;
5447 >        ReduceEntriesTask
5448 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5449 >             ReduceEntriesTask<K,V> nextRight,
5450 >             BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5451 >            super(p, b, i, f, t); this.nextRight = nextRight;
5452 >            this.reducer = reducer;
5453 >        }
5454 >        public final Map.Entry<K,V> getRawResult() { return result; }
5455 >        public final void compute() {
5456 >            final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5457 >            if ((reducer = this.reducer) != null) {
5458 >                for (int i = baseIndex, f, h; batch > 0 &&
5459 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5460 >                    addToPendingCount(1);
5461 >                    (rights = new ReduceEntriesTask<K,V>
5462 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5463 >                      rights, reducer)).fork();
5464 >                }
5465 >                Map.Entry<K,V> r = null;
5466 >                for (Node<K,V> p; (p = advance()) != null; )
5467 >                    r = (r == null) ? p : reducer.apply(r, p);
5468 >                result = r;
5469 >                CountedCompleter<?> c;
5470 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5471 >                    @SuppressWarnings("unchecked")
5472 >                    ReduceEntriesTask<K,V>
5473 >                        t = (ReduceEntriesTask<K,V>)c,
5474 >                        s = t.rights;
5475 >                    while (s != null) {
5476 >                        Map.Entry<K,V> tr, sr;
5477 >                        if ((sr = s.result) != null)
5478 >                            t.result = (((tr = t.result) == null) ? sr :
5479 >                                        reducer.apply(tr, sr));
5480 >                        s = t.rights = s.nextRight;
5481 >                    }
5482 >                }
5483 >            }
5484 >        }
5485 >    }
5486 >
5487 >    @SuppressWarnings("serial")
5488 >    static final class MapReduceKeysTask<K,V,U>
5489 >        extends BulkTask<K,V,U> {
5490 >        final Function<? super K, ? extends U> transformer;
5491 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5492 >        U result;
5493 >        MapReduceKeysTask<K,V,U> rights, nextRight;
5494 >        MapReduceKeysTask
5495 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5496 >             MapReduceKeysTask<K,V,U> nextRight,
5497 >             Function<? super K, ? extends U> transformer,
5498 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5499 >            super(p, b, i, f, t); this.nextRight = nextRight;
5500 >            this.transformer = transformer;
5501 >            this.reducer = reducer;
5502 >        }
5503 >        public final U getRawResult() { return result; }
5504 >        public final void compute() {
5505 >            final Function<? super K, ? extends U> transformer;
5506 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5507 >            if ((transformer = this.transformer) != null &&
5508 >                (reducer = this.reducer) != null) {
5509 >                for (int i = baseIndex, f, h; batch > 0 &&
5510 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5511 >                    addToPendingCount(1);
5512 >                    (rights = new MapReduceKeysTask<K,V,U>
5513 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5514 >                      rights, transformer, reducer)).fork();
5515 >                }
5516 >                U r = null;
5517 >                for (Node<K,V> p; (p = advance()) != null; ) {
5518 >                    U u;
5519 >                    if ((u = transformer.apply(p.key)) != null)
5520 >                        r = (r == null) ? u : reducer.apply(r, u);
5521 >                }
5522 >                result = r;
5523 >                CountedCompleter<?> c;
5524 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5525 >                    @SuppressWarnings("unchecked")
5526 >                    MapReduceKeysTask<K,V,U>
5527 >                        t = (MapReduceKeysTask<K,V,U>)c,
5528 >                        s = t.rights;
5529 >                    while (s != null) {
5530 >                        U tr, sr;
5531 >                        if ((sr = s.result) != null)
5532 >                            t.result = (((tr = t.result) == null) ? sr :
5533 >                                        reducer.apply(tr, sr));
5534 >                        s = t.rights = s.nextRight;
5535                      }
5536                  }
5537              }
5538 <            finally {
5539 <                seg.unlock();
5538 >        }
5539 >    }
5540 >
5541 >    @SuppressWarnings("serial")
5542 >    static final class MapReduceValuesTask<K,V,U>
5543 >        extends BulkTask<K,V,U> {
5544 >        final Function<? super V, ? extends U> transformer;
5545 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5546 >        U result;
5547 >        MapReduceValuesTask<K,V,U> rights, nextRight;
5548 >        MapReduceValuesTask
5549 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5550 >             MapReduceValuesTask<K,V,U> nextRight,
5551 >             Function<? super V, ? extends U> transformer,
5552 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5553 >            super(p, b, i, f, t); this.nextRight = nextRight;
5554 >            this.transformer = transformer;
5555 >            this.reducer = reducer;
5556 >        }
5557 >        public final U getRawResult() { return result; }
5558 >        public final void compute() {
5559 >            final Function<? super V, ? extends U> transformer;
5560 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5561 >            if ((transformer = this.transformer) != null &&
5562 >                (reducer = this.reducer) != null) {
5563 >                for (int i = baseIndex, f, h; batch > 0 &&
5564 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5565 >                    addToPendingCount(1);
5566 >                    (rights = new MapReduceValuesTask<K,V,U>
5567 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5568 >                      rights, transformer, reducer)).fork();
5569 >                }
5570 >                U r = null;
5571 >                for (Node<K,V> p; (p = advance()) != null; ) {
5572 >                    U u;
5573 >                    if ((u = transformer.apply(p.val)) != null)
5574 >                        r = (r == null) ? u : reducer.apply(r, u);
5575 >                }
5576 >                result = r;
5577 >                CountedCompleter<?> c;
5578 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5579 >                    @SuppressWarnings("unchecked")
5580 >                    MapReduceValuesTask<K,V,U>
5581 >                        t = (MapReduceValuesTask<K,V,U>)c,
5582 >                        s = t.rights;
5583 >                    while (s != null) {
5584 >                        U tr, sr;
5585 >                        if ((sr = s.result) != null)
5586 >                            t.result = (((tr = t.result) == null) ? sr :
5587 >                                        reducer.apply(tr, sr));
5588 >                        s = t.rights = s.nextRight;
5589 >                    }
5590 >                }
5591              }
5592          }
1023        s.writeObject(null);
1024        s.writeObject(null);
5593      }
5594  
5595 <    /**
5596 <     * Reconstitute the <tt>ConcurrentHashMap</tt>
5597 <     * instance from a stream (i.e.,
5598 <     * deserialize it).
5599 <     * @param s the stream
5600 <     */
5601 <    private void readObject(java.io.ObjectInputStream s)
5602 <        throws IOException, ClassNotFoundException  {
5603 <        s.defaultReadObject();
5595 >    @SuppressWarnings("serial")
5596 >    static final class MapReduceEntriesTask<K,V,U>
5597 >        extends BulkTask<K,V,U> {
5598 >        final Function<Map.Entry<K,V>, ? extends U> transformer;
5599 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5600 >        U result;
5601 >        MapReduceEntriesTask<K,V,U> rights, nextRight;
5602 >        MapReduceEntriesTask
5603 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5604 >             MapReduceEntriesTask<K,V,U> nextRight,
5605 >             Function<Map.Entry<K,V>, ? extends U> transformer,
5606 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5607 >            super(p, b, i, f, t); this.nextRight = nextRight;
5608 >            this.transformer = transformer;
5609 >            this.reducer = reducer;
5610 >        }
5611 >        public final U getRawResult() { return result; }
5612 >        public final void compute() {
5613 >            final Function<Map.Entry<K,V>, ? extends U> transformer;
5614 >            final BiFunction<? super U, ? super U, ? extends U> reducer;
5615 >            if ((transformer = this.transformer) != null &&
5616 >                (reducer = this.reducer) != null) {
5617 >                for (int i = baseIndex, f, h; batch > 0 &&
5618 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5619 >                    addToPendingCount(1);
5620 >                    (rights = new MapReduceEntriesTask<K,V,U>
5621 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5622 >                      rights, transformer, reducer)).fork();
5623 >                }
5624 >                U r = null;
5625 >                for (Node<K,V> p; (p = advance()) != null; ) {
5626 >                    U u;
5627 >                    if ((u = transformer.apply(p)) != null)
5628 >                        r = (r == null) ? u : reducer.apply(r, u);
5629 >                }
5630 >                result = r;
5631 >                CountedCompleter<?> c;
5632 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5633 >                    @SuppressWarnings("unchecked")
5634 >                    MapReduceEntriesTask<K,V,U>
5635 >                        t = (MapReduceEntriesTask<K,V,U>)c,
5636 >                        s = t.rights;
5637 >                    while (s != null) {
5638 >                        U tr, sr;
5639 >                        if ((sr = s.result) != null)
5640 >                            t.result = (((tr = t.result) == null) ? sr :
5641 >                                        reducer.apply(tr, sr));
5642 >                        s = t.rights = s.nextRight;
5643 >                    }
5644 >                }
5645 >            }
5646 >        }
5647 >    }
5648  
5649 <        // Initialize each segment to be minimally sized, and let grow.
5650 <        for (int i = 0; i < segments.length; ++i) {
5651 <            segments[i].setTable(new HashEntry[1]);
5649 >    @SuppressWarnings("serial")
5650 >    static final class MapReduceMappingsTask<K,V,U>
5651 >        extends BulkTask<K,V,U> {
5652 >        final BiFunction<? super K, ? super V, ? extends U> transformer;
5653 >        final BiFunction<? super U, ? super U, ? extends U> reducer;
5654 >        U result;
5655 >        MapReduceMappingsTask<K,V,U> rights, nextRight;
5656 >        MapReduceMappingsTask
5657 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5658 >             MapReduceMappingsTask<K,V,U> nextRight,
5659 >             BiFunction<? super K, ? super V, ? extends U> transformer,
5660 >             BiFunction<? super U, ? super U, ? extends U> reducer) {
5661 >            super(p, b, i, f, t); this.nextRight = nextRight;
5662 >            this.transformer = transformer;
5663 >            this.reducer = reducer;
5664          }
5665 +        public final U getRawResult() { return result; }
5666 +        public final void compute() {
5667 +            final BiFunction<? super K, ? super V, ? extends U> transformer;
5668 +            final BiFunction<? super U, ? super U, ? extends U> reducer;
5669 +            if ((transformer = this.transformer) != null &&
5670 +                (reducer = this.reducer) != null) {
5671 +                for (int i = baseIndex, f, h; batch > 0 &&
5672 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5673 +                    addToPendingCount(1);
5674 +                    (rights = new MapReduceMappingsTask<K,V,U>
5675 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
5676 +                      rights, transformer, reducer)).fork();
5677 +                }
5678 +                U r = null;
5679 +                for (Node<K,V> p; (p = advance()) != null; ) {
5680 +                    U u;
5681 +                    if ((u = transformer.apply(p.key, p.val)) != null)
5682 +                        r = (r == null) ? u : reducer.apply(r, u);
5683 +                }
5684 +                result = r;
5685 +                CountedCompleter<?> c;
5686 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5687 +                    @SuppressWarnings("unchecked")
5688 +                    MapReduceMappingsTask<K,V,U>
5689 +                        t = (MapReduceMappingsTask<K,V,U>)c,
5690 +                        s = t.rights;
5691 +                    while (s != null) {
5692 +                        U tr, sr;
5693 +                        if ((sr = s.result) != null)
5694 +                            t.result = (((tr = t.result) == null) ? sr :
5695 +                                        reducer.apply(tr, sr));
5696 +                        s = t.rights = s.nextRight;
5697 +                    }
5698 +                }
5699 +            }
5700 +        }
5701 +    }
5702  
5703 <        // Read the keys and values, and put the mappings in the table
5704 <        for (;;) {
5705 <            K key = (K) s.readObject();
5706 <            V value = (V) s.readObject();
5707 <            if (key == null)
5708 <                break;
5709 <            put(key, value);
5703 >    @SuppressWarnings("serial")
5704 >    static final class MapReduceKeysToDoubleTask<K,V>
5705 >        extends BulkTask<K,V,Double> {
5706 >        final ToDoubleFunction<? super K> transformer;
5707 >        final DoubleBinaryOperator reducer;
5708 >        final double basis;
5709 >        double result;
5710 >        MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5711 >        MapReduceKeysToDoubleTask
5712 >            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5713 >             MapReduceKeysToDoubleTask<K,V> nextRight,
5714 >             ToDoubleFunction<? super K> transformer,
5715 >             double basis,
5716 >             DoubleBinaryOperator reducer) {
5717 >            super(p, b, i, f, t); this.nextRight = nextRight;
5718 >            this.transformer = transformer;
5719 >            this.basis = basis; this.reducer = reducer;
5720 >        }
5721 >        public final Double getRawResult() { return result; }
5722 >        public final void compute() {
5723 >            final ToDoubleFunction<? super K> transformer;
5724 >            final DoubleBinaryOperator reducer;
5725 >            if ((transformer = this.transformer) != null &&
5726 >                (reducer = this.reducer) != null) {
5727 >                double r = this.basis;
5728 >                for (int i = baseIndex, f, h; batch > 0 &&
5729 >                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5730 >                    addToPendingCount(1);
5731 >                    (rights = new MapReduceKeysToDoubleTask<K,V>
5732 >                     (this, batch >>>= 1, baseLimit = h, f, tab,
5733 >                      rights, transformer, r, reducer)).fork();
5734 >                }
5735 >                for (Node<K,V> p; (p = advance()) != null; )
5736 >                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5737 >                result = r;
5738 >                CountedCompleter<?> c;
5739 >                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5740 >                    @SuppressWarnings("unchecked")
5741 >                    MapReduceKeysToDoubleTask<K,V>
5742 >                        t = (MapReduceKeysToDoubleTask<K,V>)c,
5743 >                        s = t.rights;
5744 >                    while (s != null) {
5745 >                        t.result = reducer.applyAsDouble(t.result, s.result);
5746 >                        s = t.rights = s.nextRight;
5747 >                    }
5748 >                }
5749 >            }
5750          }
5751      }
1051 }
5752  
5753 +    @SuppressWarnings("serial")
5754 +    static final class MapReduceValuesToDoubleTask<K,V>
5755 +        extends BulkTask<K,V,Double> {
5756 +        final ToDoubleFunction<? super V> transformer;
5757 +        final DoubleBinaryOperator reducer;
5758 +        final double basis;
5759 +        double result;
5760 +        MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5761 +        MapReduceValuesToDoubleTask
5762 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5763 +             MapReduceValuesToDoubleTask<K,V> nextRight,
5764 +             ToDoubleFunction<? super V> transformer,
5765 +             double basis,
5766 +             DoubleBinaryOperator reducer) {
5767 +            super(p, b, i, f, t); this.nextRight = nextRight;
5768 +            this.transformer = transformer;
5769 +            this.basis = basis; this.reducer = reducer;
5770 +        }
5771 +        public final Double getRawResult() { return result; }
5772 +        public final void compute() {
5773 +            final ToDoubleFunction<? super V> transformer;
5774 +            final DoubleBinaryOperator reducer;
5775 +            if ((transformer = this.transformer) != null &&
5776 +                (reducer = this.reducer) != null) {
5777 +                double r = this.basis;
5778 +                for (int i = baseIndex, f, h; batch > 0 &&
5779 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5780 +                    addToPendingCount(1);
5781 +                    (rights = new MapReduceValuesToDoubleTask<K,V>
5782 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
5783 +                      rights, transformer, r, reducer)).fork();
5784 +                }
5785 +                for (Node<K,V> p; (p = advance()) != null; )
5786 +                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5787 +                result = r;
5788 +                CountedCompleter<?> c;
5789 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5790 +                    @SuppressWarnings("unchecked")
5791 +                    MapReduceValuesToDoubleTask<K,V>
5792 +                        t = (MapReduceValuesToDoubleTask<K,V>)c,
5793 +                        s = t.rights;
5794 +                    while (s != null) {
5795 +                        t.result = reducer.applyAsDouble(t.result, s.result);
5796 +                        s = t.rights = s.nextRight;
5797 +                    }
5798 +                }
5799 +            }
5800 +        }
5801 +    }
5802 +
5803 +    @SuppressWarnings("serial")
5804 +    static final class MapReduceEntriesToDoubleTask<K,V>
5805 +        extends BulkTask<K,V,Double> {
5806 +        final ToDoubleFunction<Map.Entry<K,V>> transformer;
5807 +        final DoubleBinaryOperator reducer;
5808 +        final double basis;
5809 +        double result;
5810 +        MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5811 +        MapReduceEntriesToDoubleTask
5812 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5813 +             MapReduceEntriesToDoubleTask<K,V> nextRight,
5814 +             ToDoubleFunction<Map.Entry<K,V>> transformer,
5815 +             double basis,
5816 +             DoubleBinaryOperator reducer) {
5817 +            super(p, b, i, f, t); this.nextRight = nextRight;
5818 +            this.transformer = transformer;
5819 +            this.basis = basis; this.reducer = reducer;
5820 +        }
5821 +        public final Double getRawResult() { return result; }
5822 +        public final void compute() {
5823 +            final ToDoubleFunction<Map.Entry<K,V>> transformer;
5824 +            final DoubleBinaryOperator reducer;
5825 +            if ((transformer = this.transformer) != null &&
5826 +                (reducer = this.reducer) != null) {
5827 +                double r = this.basis;
5828 +                for (int i = baseIndex, f, h; batch > 0 &&
5829 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5830 +                    addToPendingCount(1);
5831 +                    (rights = new MapReduceEntriesToDoubleTask<K,V>
5832 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
5833 +                      rights, transformer, r, reducer)).fork();
5834 +                }
5835 +                for (Node<K,V> p; (p = advance()) != null; )
5836 +                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5837 +                result = r;
5838 +                CountedCompleter<?> c;
5839 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5840 +                    @SuppressWarnings("unchecked")
5841 +                    MapReduceEntriesToDoubleTask<K,V>
5842 +                        t = (MapReduceEntriesToDoubleTask<K,V>)c,
5843 +                        s = t.rights;
5844 +                    while (s != null) {
5845 +                        t.result = reducer.applyAsDouble(t.result, s.result);
5846 +                        s = t.rights = s.nextRight;
5847 +                    }
5848 +                }
5849 +            }
5850 +        }
5851 +    }
5852 +
5853 +    @SuppressWarnings("serial")
5854 +    static final class MapReduceMappingsToDoubleTask<K,V>
5855 +        extends BulkTask<K,V,Double> {
5856 +        final ToDoubleBiFunction<? super K, ? super V> transformer;
5857 +        final DoubleBinaryOperator reducer;
5858 +        final double basis;
5859 +        double result;
5860 +        MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5861 +        MapReduceMappingsToDoubleTask
5862 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5863 +             MapReduceMappingsToDoubleTask<K,V> nextRight,
5864 +             ToDoubleBiFunction<? super K, ? super V> transformer,
5865 +             double basis,
5866 +             DoubleBinaryOperator reducer) {
5867 +            super(p, b, i, f, t); this.nextRight = nextRight;
5868 +            this.transformer = transformer;
5869 +            this.basis = basis; this.reducer = reducer;
5870 +        }
5871 +        public final Double getRawResult() { return result; }
5872 +        public final void compute() {
5873 +            final ToDoubleBiFunction<? super K, ? super V> transformer;
5874 +            final DoubleBinaryOperator reducer;
5875 +            if ((transformer = this.transformer) != null &&
5876 +                (reducer = this.reducer) != null) {
5877 +                double r = this.basis;
5878 +                for (int i = baseIndex, f, h; batch > 0 &&
5879 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5880 +                    addToPendingCount(1);
5881 +                    (rights = new MapReduceMappingsToDoubleTask<K,V>
5882 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
5883 +                      rights, transformer, r, reducer)).fork();
5884 +                }
5885 +                for (Node<K,V> p; (p = advance()) != null; )
5886 +                    r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5887 +                result = r;
5888 +                CountedCompleter<?> c;
5889 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5890 +                    @SuppressWarnings("unchecked")
5891 +                    MapReduceMappingsToDoubleTask<K,V>
5892 +                        t = (MapReduceMappingsToDoubleTask<K,V>)c,
5893 +                        s = t.rights;
5894 +                    while (s != null) {
5895 +                        t.result = reducer.applyAsDouble(t.result, s.result);
5896 +                        s = t.rights = s.nextRight;
5897 +                    }
5898 +                }
5899 +            }
5900 +        }
5901 +    }
5902 +
5903 +    @SuppressWarnings("serial")
5904 +    static final class MapReduceKeysToLongTask<K,V>
5905 +        extends BulkTask<K,V,Long> {
5906 +        final ToLongFunction<? super K> transformer;
5907 +        final LongBinaryOperator reducer;
5908 +        final long basis;
5909 +        long result;
5910 +        MapReduceKeysToLongTask<K,V> rights, nextRight;
5911 +        MapReduceKeysToLongTask
5912 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5913 +             MapReduceKeysToLongTask<K,V> nextRight,
5914 +             ToLongFunction<? super K> transformer,
5915 +             long basis,
5916 +             LongBinaryOperator reducer) {
5917 +            super(p, b, i, f, t); this.nextRight = nextRight;
5918 +            this.transformer = transformer;
5919 +            this.basis = basis; this.reducer = reducer;
5920 +        }
5921 +        public final Long getRawResult() { return result; }
5922 +        public final void compute() {
5923 +            final ToLongFunction<? super K> transformer;
5924 +            final LongBinaryOperator reducer;
5925 +            if ((transformer = this.transformer) != null &&
5926 +                (reducer = this.reducer) != null) {
5927 +                long r = this.basis;
5928 +                for (int i = baseIndex, f, h; batch > 0 &&
5929 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5930 +                    addToPendingCount(1);
5931 +                    (rights = new MapReduceKeysToLongTask<K,V>
5932 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
5933 +                      rights, transformer, r, reducer)).fork();
5934 +                }
5935 +                for (Node<K,V> p; (p = advance()) != null; )
5936 +                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
5937 +                result = r;
5938 +                CountedCompleter<?> c;
5939 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5940 +                    @SuppressWarnings("unchecked")
5941 +                    MapReduceKeysToLongTask<K,V>
5942 +                        t = (MapReduceKeysToLongTask<K,V>)c,
5943 +                        s = t.rights;
5944 +                    while (s != null) {
5945 +                        t.result = reducer.applyAsLong(t.result, s.result);
5946 +                        s = t.rights = s.nextRight;
5947 +                    }
5948 +                }
5949 +            }
5950 +        }
5951 +    }
5952 +
5953 +    @SuppressWarnings("serial")
5954 +    static final class MapReduceValuesToLongTask<K,V>
5955 +        extends BulkTask<K,V,Long> {
5956 +        final ToLongFunction<? super V> transformer;
5957 +        final LongBinaryOperator reducer;
5958 +        final long basis;
5959 +        long result;
5960 +        MapReduceValuesToLongTask<K,V> rights, nextRight;
5961 +        MapReduceValuesToLongTask
5962 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5963 +             MapReduceValuesToLongTask<K,V> nextRight,
5964 +             ToLongFunction<? super V> transformer,
5965 +             long basis,
5966 +             LongBinaryOperator reducer) {
5967 +            super(p, b, i, f, t); this.nextRight = nextRight;
5968 +            this.transformer = transformer;
5969 +            this.basis = basis; this.reducer = reducer;
5970 +        }
5971 +        public final Long getRawResult() { return result; }
5972 +        public final void compute() {
5973 +            final ToLongFunction<? super V> transformer;
5974 +            final LongBinaryOperator reducer;
5975 +            if ((transformer = this.transformer) != null &&
5976 +                (reducer = this.reducer) != null) {
5977 +                long r = this.basis;
5978 +                for (int i = baseIndex, f, h; batch > 0 &&
5979 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
5980 +                    addToPendingCount(1);
5981 +                    (rights = new MapReduceValuesToLongTask<K,V>
5982 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
5983 +                      rights, transformer, r, reducer)).fork();
5984 +                }
5985 +                for (Node<K,V> p; (p = advance()) != null; )
5986 +                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
5987 +                result = r;
5988 +                CountedCompleter<?> c;
5989 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
5990 +                    @SuppressWarnings("unchecked")
5991 +                    MapReduceValuesToLongTask<K,V>
5992 +                        t = (MapReduceValuesToLongTask<K,V>)c,
5993 +                        s = t.rights;
5994 +                    while (s != null) {
5995 +                        t.result = reducer.applyAsLong(t.result, s.result);
5996 +                        s = t.rights = s.nextRight;
5997 +                    }
5998 +                }
5999 +            }
6000 +        }
6001 +    }
6002 +
6003 +    @SuppressWarnings("serial")
6004 +    static final class MapReduceEntriesToLongTask<K,V>
6005 +        extends BulkTask<K,V,Long> {
6006 +        final ToLongFunction<Map.Entry<K,V>> transformer;
6007 +        final LongBinaryOperator reducer;
6008 +        final long basis;
6009 +        long result;
6010 +        MapReduceEntriesToLongTask<K,V> rights, nextRight;
6011 +        MapReduceEntriesToLongTask
6012 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6013 +             MapReduceEntriesToLongTask<K,V> nextRight,
6014 +             ToLongFunction<Map.Entry<K,V>> transformer,
6015 +             long basis,
6016 +             LongBinaryOperator reducer) {
6017 +            super(p, b, i, f, t); this.nextRight = nextRight;
6018 +            this.transformer = transformer;
6019 +            this.basis = basis; this.reducer = reducer;
6020 +        }
6021 +        public final Long getRawResult() { return result; }
6022 +        public final void compute() {
6023 +            final ToLongFunction<Map.Entry<K,V>> transformer;
6024 +            final LongBinaryOperator reducer;
6025 +            if ((transformer = this.transformer) != null &&
6026 +                (reducer = this.reducer) != null) {
6027 +                long r = this.basis;
6028 +                for (int i = baseIndex, f, h; batch > 0 &&
6029 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6030 +                    addToPendingCount(1);
6031 +                    (rights = new MapReduceEntriesToLongTask<K,V>
6032 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
6033 +                      rights, transformer, r, reducer)).fork();
6034 +                }
6035 +                for (Node<K,V> p; (p = advance()) != null; )
6036 +                    r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6037 +                result = r;
6038 +                CountedCompleter<?> c;
6039 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6040 +                    @SuppressWarnings("unchecked")
6041 +                    MapReduceEntriesToLongTask<K,V>
6042 +                        t = (MapReduceEntriesToLongTask<K,V>)c,
6043 +                        s = t.rights;
6044 +                    while (s != null) {
6045 +                        t.result = reducer.applyAsLong(t.result, s.result);
6046 +                        s = t.rights = s.nextRight;
6047 +                    }
6048 +                }
6049 +            }
6050 +        }
6051 +    }
6052 +
6053 +    @SuppressWarnings("serial")
6054 +    static final class MapReduceMappingsToLongTask<K,V>
6055 +        extends BulkTask<K,V,Long> {
6056 +        final ToLongBiFunction<? super K, ? super V> transformer;
6057 +        final LongBinaryOperator reducer;
6058 +        final long basis;
6059 +        long result;
6060 +        MapReduceMappingsToLongTask<K,V> rights, nextRight;
6061 +        MapReduceMappingsToLongTask
6062 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6063 +             MapReduceMappingsToLongTask<K,V> nextRight,
6064 +             ToLongBiFunction<? super K, ? super V> transformer,
6065 +             long basis,
6066 +             LongBinaryOperator reducer) {
6067 +            super(p, b, i, f, t); this.nextRight = nextRight;
6068 +            this.transformer = transformer;
6069 +            this.basis = basis; this.reducer = reducer;
6070 +        }
6071 +        public final Long getRawResult() { return result; }
6072 +        public final void compute() {
6073 +            final ToLongBiFunction<? super K, ? super V> transformer;
6074 +            final LongBinaryOperator reducer;
6075 +            if ((transformer = this.transformer) != null &&
6076 +                (reducer = this.reducer) != null) {
6077 +                long r = this.basis;
6078 +                for (int i = baseIndex, f, h; batch > 0 &&
6079 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6080 +                    addToPendingCount(1);
6081 +                    (rights = new MapReduceMappingsToLongTask<K,V>
6082 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
6083 +                      rights, transformer, r, reducer)).fork();
6084 +                }
6085 +                for (Node<K,V> p; (p = advance()) != null; )
6086 +                    r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6087 +                result = r;
6088 +                CountedCompleter<?> c;
6089 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6090 +                    @SuppressWarnings("unchecked")
6091 +                    MapReduceMappingsToLongTask<K,V>
6092 +                        t = (MapReduceMappingsToLongTask<K,V>)c,
6093 +                        s = t.rights;
6094 +                    while (s != null) {
6095 +                        t.result = reducer.applyAsLong(t.result, s.result);
6096 +                        s = t.rights = s.nextRight;
6097 +                    }
6098 +                }
6099 +            }
6100 +        }
6101 +    }
6102 +
6103 +    @SuppressWarnings("serial")
6104 +    static final class MapReduceKeysToIntTask<K,V>
6105 +        extends BulkTask<K,V,Integer> {
6106 +        final ToIntFunction<? super K> transformer;
6107 +        final IntBinaryOperator reducer;
6108 +        final int basis;
6109 +        int result;
6110 +        MapReduceKeysToIntTask<K,V> rights, nextRight;
6111 +        MapReduceKeysToIntTask
6112 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6113 +             MapReduceKeysToIntTask<K,V> nextRight,
6114 +             ToIntFunction<? super K> transformer,
6115 +             int basis,
6116 +             IntBinaryOperator reducer) {
6117 +            super(p, b, i, f, t); this.nextRight = nextRight;
6118 +            this.transformer = transformer;
6119 +            this.basis = basis; this.reducer = reducer;
6120 +        }
6121 +        public final Integer getRawResult() { return result; }
6122 +        public final void compute() {
6123 +            final ToIntFunction<? super K> transformer;
6124 +            final IntBinaryOperator reducer;
6125 +            if ((transformer = this.transformer) != null &&
6126 +                (reducer = this.reducer) != null) {
6127 +                int r = this.basis;
6128 +                for (int i = baseIndex, f, h; batch > 0 &&
6129 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6130 +                    addToPendingCount(1);
6131 +                    (rights = new MapReduceKeysToIntTask<K,V>
6132 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
6133 +                      rights, transformer, r, reducer)).fork();
6134 +                }
6135 +                for (Node<K,V> p; (p = advance()) != null; )
6136 +                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6137 +                result = r;
6138 +                CountedCompleter<?> c;
6139 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6140 +                    @SuppressWarnings("unchecked")
6141 +                    MapReduceKeysToIntTask<K,V>
6142 +                        t = (MapReduceKeysToIntTask<K,V>)c,
6143 +                        s = t.rights;
6144 +                    while (s != null) {
6145 +                        t.result = reducer.applyAsInt(t.result, s.result);
6146 +                        s = t.rights = s.nextRight;
6147 +                    }
6148 +                }
6149 +            }
6150 +        }
6151 +    }
6152 +
6153 +    @SuppressWarnings("serial")
6154 +    static final class MapReduceValuesToIntTask<K,V>
6155 +        extends BulkTask<K,V,Integer> {
6156 +        final ToIntFunction<? super V> transformer;
6157 +        final IntBinaryOperator reducer;
6158 +        final int basis;
6159 +        int result;
6160 +        MapReduceValuesToIntTask<K,V> rights, nextRight;
6161 +        MapReduceValuesToIntTask
6162 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6163 +             MapReduceValuesToIntTask<K,V> nextRight,
6164 +             ToIntFunction<? super V> transformer,
6165 +             int basis,
6166 +             IntBinaryOperator reducer) {
6167 +            super(p, b, i, f, t); this.nextRight = nextRight;
6168 +            this.transformer = transformer;
6169 +            this.basis = basis; this.reducer = reducer;
6170 +        }
6171 +        public final Integer getRawResult() { return result; }
6172 +        public final void compute() {
6173 +            final ToIntFunction<? super V> transformer;
6174 +            final IntBinaryOperator reducer;
6175 +            if ((transformer = this.transformer) != null &&
6176 +                (reducer = this.reducer) != null) {
6177 +                int r = this.basis;
6178 +                for (int i = baseIndex, f, h; batch > 0 &&
6179 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6180 +                    addToPendingCount(1);
6181 +                    (rights = new MapReduceValuesToIntTask<K,V>
6182 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
6183 +                      rights, transformer, r, reducer)).fork();
6184 +                }
6185 +                for (Node<K,V> p; (p = advance()) != null; )
6186 +                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6187 +                result = r;
6188 +                CountedCompleter<?> c;
6189 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6190 +                    @SuppressWarnings("unchecked")
6191 +                    MapReduceValuesToIntTask<K,V>
6192 +                        t = (MapReduceValuesToIntTask<K,V>)c,
6193 +                        s = t.rights;
6194 +                    while (s != null) {
6195 +                        t.result = reducer.applyAsInt(t.result, s.result);
6196 +                        s = t.rights = s.nextRight;
6197 +                    }
6198 +                }
6199 +            }
6200 +        }
6201 +    }
6202 +
6203 +    @SuppressWarnings("serial")
6204 +    static final class MapReduceEntriesToIntTask<K,V>
6205 +        extends BulkTask<K,V,Integer> {
6206 +        final ToIntFunction<Map.Entry<K,V>> transformer;
6207 +        final IntBinaryOperator reducer;
6208 +        final int basis;
6209 +        int result;
6210 +        MapReduceEntriesToIntTask<K,V> rights, nextRight;
6211 +        MapReduceEntriesToIntTask
6212 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6213 +             MapReduceEntriesToIntTask<K,V> nextRight,
6214 +             ToIntFunction<Map.Entry<K,V>> transformer,
6215 +             int basis,
6216 +             IntBinaryOperator reducer) {
6217 +            super(p, b, i, f, t); this.nextRight = nextRight;
6218 +            this.transformer = transformer;
6219 +            this.basis = basis; this.reducer = reducer;
6220 +        }
6221 +        public final Integer getRawResult() { return result; }
6222 +        public final void compute() {
6223 +            final ToIntFunction<Map.Entry<K,V>> transformer;
6224 +            final IntBinaryOperator reducer;
6225 +            if ((transformer = this.transformer) != null &&
6226 +                (reducer = this.reducer) != null) {
6227 +                int r = this.basis;
6228 +                for (int i = baseIndex, f, h; batch > 0 &&
6229 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6230 +                    addToPendingCount(1);
6231 +                    (rights = new MapReduceEntriesToIntTask<K,V>
6232 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
6233 +                      rights, transformer, r, reducer)).fork();
6234 +                }
6235 +                for (Node<K,V> p; (p = advance()) != null; )
6236 +                    r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6237 +                result = r;
6238 +                CountedCompleter<?> c;
6239 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6240 +                    @SuppressWarnings("unchecked")
6241 +                    MapReduceEntriesToIntTask<K,V>
6242 +                        t = (MapReduceEntriesToIntTask<K,V>)c,
6243 +                        s = t.rights;
6244 +                    while (s != null) {
6245 +                        t.result = reducer.applyAsInt(t.result, s.result);
6246 +                        s = t.rights = s.nextRight;
6247 +                    }
6248 +                }
6249 +            }
6250 +        }
6251 +    }
6252 +
6253 +    @SuppressWarnings("serial")
6254 +    static final class MapReduceMappingsToIntTask<K,V>
6255 +        extends BulkTask<K,V,Integer> {
6256 +        final ToIntBiFunction<? super K, ? super V> transformer;
6257 +        final IntBinaryOperator reducer;
6258 +        final int basis;
6259 +        int result;
6260 +        MapReduceMappingsToIntTask<K,V> rights, nextRight;
6261 +        MapReduceMappingsToIntTask
6262 +            (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6263 +             MapReduceMappingsToIntTask<K,V> nextRight,
6264 +             ToIntBiFunction<? super K, ? super V> transformer,
6265 +             int basis,
6266 +             IntBinaryOperator reducer) {
6267 +            super(p, b, i, f, t); this.nextRight = nextRight;
6268 +            this.transformer = transformer;
6269 +            this.basis = basis; this.reducer = reducer;
6270 +        }
6271 +        public final Integer getRawResult() { return result; }
6272 +        public final void compute() {
6273 +            final ToIntBiFunction<? super K, ? super V> transformer;
6274 +            final IntBinaryOperator reducer;
6275 +            if ((transformer = this.transformer) != null &&
6276 +                (reducer = this.reducer) != null) {
6277 +                int r = this.basis;
6278 +                for (int i = baseIndex, f, h; batch > 0 &&
6279 +                         (h = ((f = baseLimit) + i) >>> 1) > i;) {
6280 +                    addToPendingCount(1);
6281 +                    (rights = new MapReduceMappingsToIntTask<K,V>
6282 +                     (this, batch >>>= 1, baseLimit = h, f, tab,
6283 +                      rights, transformer, r, reducer)).fork();
6284 +                }
6285 +                for (Node<K,V> p; (p = advance()) != null; )
6286 +                    r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6287 +                result = r;
6288 +                CountedCompleter<?> c;
6289 +                for (c = firstComplete(); c != null; c = c.nextComplete()) {
6290 +                    @SuppressWarnings("unchecked")
6291 +                    MapReduceMappingsToIntTask<K,V>
6292 +                        t = (MapReduceMappingsToIntTask<K,V>)c,
6293 +                        s = t.rights;
6294 +                    while (s != null) {
6295 +                        t.result = reducer.applyAsInt(t.result, s.result);
6296 +                        s = t.rights = s.nextRight;
6297 +                    }
6298 +                }
6299 +            }
6300 +        }
6301 +    }
6302 +
6303 +    // Unsafe mechanics
6304 +    private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
6305 +    private static final long SIZECTL;
6306 +    private static final long TRANSFERINDEX;
6307 +    private static final long BASECOUNT;
6308 +    private static final long CELLSBUSY;
6309 +    private static final long CELLVALUE;
6310 +    private static final int ABASE;
6311 +    private static final int ASHIFT;
6312 +
6313 +    static {
6314 +        try {
6315 +            SIZECTL = U.objectFieldOffset
6316 +                (ConcurrentHashMap.class.getDeclaredField("sizeCtl"));
6317 +            TRANSFERINDEX = U.objectFieldOffset
6318 +                (ConcurrentHashMap.class.getDeclaredField("transferIndex"));
6319 +            BASECOUNT = U.objectFieldOffset
6320 +                (ConcurrentHashMap.class.getDeclaredField("baseCount"));
6321 +            CELLSBUSY = U.objectFieldOffset
6322 +                (ConcurrentHashMap.class.getDeclaredField("cellsBusy"));
6323 +
6324 +            CELLVALUE = U.objectFieldOffset
6325 +                (CounterCell.class.getDeclaredField("value"));
6326 +
6327 +            ABASE = U.arrayBaseOffset(Node[].class);
6328 +            int scale = U.arrayIndexScale(Node[].class);
6329 +            if ((scale & (scale - 1)) != 0)
6330 +                throw new Error("array index scale not a power of two");
6331 +            ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6332 +        } catch (ReflectiveOperationException e) {
6333 +            throw new Error(e);
6334 +        }
6335 +
6336 +        // Reduce the risk of rare disastrous classloading in first call to
6337 +        // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
6338 +        Class<?> ensureLoaded = LockSupport.class;
6339 +    }
6340 + }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines