ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
Revision: 1.148
Committed: Sat Dec 8 14:10:42 2012 UTC (11 years, 5 months ago) by dl
Branch: MAIN
Changes since 1.147: +19 -16 lines
Log Message:
fix typo; improve tied-hash processing

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8 import java.util.concurrent.atomic.LongAdder;
9 import java.util.concurrent.ForkJoinPool;
10 import java.util.concurrent.CountedCompleter;
11
12 import java.util.Comparator;
13 import java.util.Arrays;
14 import java.util.Map;
15 import java.util.Set;
16 import java.util.Collection;
17 import java.util.AbstractMap;
18 import java.util.AbstractSet;
19 import java.util.AbstractCollection;
20 import java.util.Hashtable;
21 import java.util.HashMap;
22 import java.util.Iterator;
23 import java.util.Enumeration;
24 import java.util.ConcurrentModificationException;
25 import java.util.NoSuchElementException;
26 import java.util.concurrent.ConcurrentMap;
27 import java.util.concurrent.ThreadLocalRandom;
28 import java.util.concurrent.locks.LockSupport;
29 import java.util.concurrent.locks.AbstractQueuedSynchronizer;
30 import java.util.concurrent.atomic.AtomicReference;
31
32 import java.io.Serializable;
33
34 /**
35 * A hash table supporting full concurrency of retrievals and
36 * high expected concurrency for updates. This class obeys the
37 * same functional specification as {@link java.util.Hashtable}, and
38 * includes versions of methods corresponding to each method of
39 * {@code Hashtable}. However, even though all operations are
40 * thread-safe, retrieval operations do <em>not</em> entail locking,
41 * and there is <em>not</em> any support for locking the entire table
42 * in a way that prevents all access. This class is fully
43 * interoperable with {@code Hashtable} in programs that rely on its
44 * thread safety but not on its synchronization details.
45 *
46 * <p>Retrieval operations (including {@code get}) generally do not
47 * block, so may overlap with update operations (including {@code put}
48 * and {@code remove}). Retrievals reflect the results of the most
49 * recently <em>completed</em> update operations holding upon their
50 * onset. (More formally, an update operation for a given key bears a
51 * <em>happens-before</em> relation with any (non-null) retrieval for
52 * that key reporting the updated value.) For aggregate operations
53 * such as {@code putAll} and {@code clear}, concurrent retrievals may
54 * reflect insertion or removal of only some entries. Similarly,
55 * Iterators and Enumerations return elements reflecting the state of
56 * the hash table at some point at or since the creation of the
57 * iterator/enumeration. They do <em>not</em> throw {@link
58 * ConcurrentModificationException}. However, iterators are designed
59 * to be used by only one thread at a time. Bear in mind that the
60 * results of aggregate status methods including {@code size}, {@code
61 * isEmpty}, and {@code containsValue} are typically useful only when
62 * a map is not undergoing concurrent updates in other threads.
63 * Otherwise the results of these methods reflect transient states
64 * that may be adequate for monitoring or estimation purposes, but not
65 * for program control.
66 *
67 * <p>The table is dynamically expanded when there are too many
68 * collisions (i.e., keys that have distinct hash codes but fall into
69 * the same slot modulo the table size), with the expected average
70 * effect of maintaining roughly two bins per mapping (corresponding
71 * to a 0.75 load factor threshold for resizing). There may be much
72 * variance around this average as mappings are added and removed, but
73 * overall, this maintains a commonly accepted time/space tradeoff for
74 * hash tables. However, resizing this or any other kind of hash
75 * table may be a relatively slow operation. When possible, it is a
76 * good idea to provide a size estimate as an optional {@code
77 * initialCapacity} constructor argument. An additional optional
78 * {@code loadFactor} constructor argument provides a further means of
79 * customizing initial table capacity by specifying the table density
80 * to be used in calculating the amount of space to allocate for the
81 * given number of elements. Also, for compatibility with previous
82 * versions of this class, constructors may optionally specify an
83 * expected {@code concurrencyLevel} as an additional hint for
84 * internal sizing. Note that using many keys with exactly the same
85 * {@code hashCode()} is a sure way to slow down performance of any
86 * hash table.
87 *
88 * <p>A {@link Set} projection of a ConcurrentHashMap may be created
89 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
90 * (using {@link #keySet(Object)} when only keys are of interest, and the
91 * mapped values are (perhaps transiently) not used or all take the
92 * same mapping value.
93 *
94 * <p>A ConcurrentHashMap can be used as scalable frequency map (a
95 * form of histogram or multiset) by using {@link LongAdder} values
96 * and initializing via {@link #computeIfAbsent}. For example, to add
97 * a count to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you
98 * can use {@code freqs.computeIfAbsent(k -> new
99 * LongAdder()).increment();}
100 *
101 * <p>This class and its views and iterators implement all of the
102 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
103 * interfaces.
104 *
105 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
106 * does <em>not</em> allow {@code null} to be used as a key or value.
107 *
108 * <p>ConcurrentHashMaps support parallel operations using the {@link
109 * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
110 * are available in class {@link ForkJoinTasks}). These operations are
111 * designed to be safely, and often sensibly, applied even with maps
112 * that are being concurrently updated by other threads; for example,
113 * when computing a snapshot summary of the values in a shared
114 * registry. There are three kinds of operation, each with four
115 * forms, accepting functions with Keys, Values, Entries, and (Key,
116 * Value) arguments and/or return values. (The first three forms are
117 * also available via the {@link #keySet()}, {@link #values()} and
118 * {@link #entrySet()} views). Because the elements of a
119 * ConcurrentHashMap are not ordered in any particular way, and may be
120 * processed in different orders in different parallel executions, the
121 * correctness of supplied functions should not depend on any
122 * ordering, or on any other objects or values that may transiently
123 * change while computation is in progress; and except for forEach
124 * actions, should ideally be side-effect-free.
125 *
126 * <ul>
127 * <li> forEach: Perform a given action on each element.
128 * A variant form applies a given transformation on each element
129 * before performing the action.</li>
130 *
131 * <li> search: Return the first available non-null result of
132 * applying a given function on each element; skipping further
133 * search when a result is found.</li>
134 *
135 * <li> reduce: Accumulate each element. The supplied reduction
136 * function cannot rely on ordering (more formally, it should be
137 * both associative and commutative). There are five variants:
138 *
139 * <ul>
140 *
141 * <li> Plain reductions. (There is not a form of this method for
142 * (key, value) function arguments since there is no corresponding
143 * return type.)</li>
144 *
145 * <li> Mapped reductions that accumulate the results of a given
146 * function applied to each element.</li>
147 *
148 * <li> Reductions to scalar doubles, longs, and ints, using a
149 * given basis value.</li>
150 *
151 * </li>
152 * </ul>
153 * </ul>
154 *
155 * <p>The concurrency properties of bulk operations follow
156 * from those of ConcurrentHashMap: Any non-null result returned
157 * from {@code get(key)} and related access methods bears a
158 * happens-before relation with the associated insertion or
159 * update. The result of any bulk operation reflects the
160 * composition of these per-element relations (but is not
161 * necessarily atomic with respect to the map as a whole unless it
162 * is somehow known to be quiescent). Conversely, because keys
163 * and values in the map are never null, null serves as a reliable
164 * atomic indicator of the current lack of any result. To
165 * maintain this property, null serves as an implicit basis for
166 * all non-scalar reduction operations. For the double, long, and
167 * int versions, the basis should be one that, when combined with
168 * any other value, returns that other value (more formally, it
169 * should be the identity element for the reduction). Most common
170 * reductions have these properties; for example, computing a sum
171 * with basis 0 or a minimum with basis MAX_VALUE.
172 *
173 * <p>Search and transformation functions provided as arguments
174 * should similarly return null to indicate the lack of any result
175 * (in which case it is not used). In the case of mapped
176 * reductions, this also enables transformations to serve as
177 * filters, returning null (or, in the case of primitive
178 * specializations, the identity basis) if the element should not
179 * be combined. You can create compound transformations and
180 * filterings by composing them yourself under this "null means
181 * there is nothing there now" rule before using them in search or
182 * reduce operations.
183 *
184 * <p>Methods accepting and/or returning Entry arguments maintain
185 * key-value associations. They may be useful for example when
186 * finding the key for the greatest value. Note that "plain" Entry
187 * arguments can be supplied using {@code new
188 * AbstractMap.SimpleEntry(k,v)}.
189 *
190 * <p>Bulk operations may complete abruptly, throwing an
191 * exception encountered in the application of a supplied
192 * function. Bear in mind when handling such exceptions that other
193 * concurrently executing functions could also have thrown
194 * exceptions, or would have done so if the first exception had
195 * not occurred.
196 *
197 * <p>Parallel speedups for bulk operations compared to sequential
198 * processing are common but not guaranteed. Operations involving
199 * brief functions on small maps may execute more slowly than
200 * sequential loops if the underlying work to parallelize the
201 * computation is more expensive than the computation itself.
202 * Similarly, parallelization may not lead to much actual parallelism
203 * if all processors are busy performing unrelated tasks.
204 *
205 * <p>All arguments to all task methods must be non-null.
206 *
207 * <p><em>jsr166e note: During transition, this class
208 * uses nested functional interfaces with different names but the
209 * same forms as those expected for JDK8.</em>
210 *
211 * <p>This class is a member of the
212 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
213 * Java Collections Framework</a>.
214 *
215 * @since 1.5
216 * @author Doug Lea
217 * @param <K> the type of keys maintained by this map
218 * @param <V> the type of mapped values
219 */
220 public class ConcurrentHashMap<K, V>
221 implements ConcurrentMap<K, V>, Serializable {
222 private static final long serialVersionUID = 7249069246763182397L;
223
224 /**
225 * A partitionable iterator. A Spliterator can be traversed
226 * directly, but can also be partitioned (before traversal) by
227 * creating another Spliterator that covers a non-overlapping
228 * portion of the elements, and so may be amenable to parallel
229 * execution.
230 *
231 * <p>This interface exports a subset of expected JDK8
232 * functionality.
233 *
234 * <p>Sample usage: Here is one (of the several) ways to compute
235 * the sum of the values held in a map using the ForkJoin
236 * framework. As illustrated here, Spliterators are well suited to
237 * designs in which a task repeatedly splits off half its work
238 * into forked subtasks until small enough to process directly,
239 * and then joins these subtasks. Variants of this style can also
240 * be used in completion-based designs.
241 *
242 * <pre>
243 * {@code ConcurrentHashMap<String, Long> m = ...
244 * // split as if have 8 * parallelism, for load balance
245 * int n = m.size();
246 * int p = aForkJoinPool.getParallelism() * 8;
247 * int split = (n < p)? n : p;
248 * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
249 * // ...
250 * static class SumValues extends RecursiveTask<Long> {
251 * final Spliterator<Long> s;
252 * final int split; // split while > 1
253 * final SumValues nextJoin; // records forked subtasks to join
254 * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
255 * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
256 * }
257 * public Long compute() {
258 * long sum = 0;
259 * SumValues subtasks = null; // fork subtasks
260 * for (int s = split >>> 1; s > 0; s >>>= 1)
261 * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
262 * while (s.hasNext()) // directly process remaining elements
263 * sum += s.next();
264 * for (SumValues t = subtasks; t != null; t = t.nextJoin)
265 * sum += t.join(); // collect subtask results
266 * return sum;
267 * }
268 * }
269 * }</pre>
270 */
271 public static interface Spliterator<T> extends Iterator<T> {
272 /**
273 * Returns a Spliterator covering approximately half of the
274 * elements, guaranteed not to overlap with those subsequently
275 * returned by this Spliterator. After invoking this method,
276 * the current Spliterator will <em>not</em> produce any of
277 * the elements of the returned Spliterator, but the two
278 * Spliterators together will produce all of the elements that
279 * would have been produced by this Spliterator had this
280 * method not been called. The exact number of elements
281 * produced by the returned Spliterator is not guaranteed, and
282 * may be zero (i.e., with {@code hasNext()} reporting {@code
283 * false}) if this Spliterator cannot be further split.
284 *
285 * @return a Spliterator covering approximately half of the
286 * elements
287 * @throws IllegalStateException if this Spliterator has
288 * already commenced traversing elements
289 */
290 Spliterator<T> split();
291 }
292
293
294 /*
295 * Overview:
296 *
297 * The primary design goal of this hash table is to maintain
298 * concurrent readability (typically method get(), but also
299 * iterators and related methods) while minimizing update
300 * contention. Secondary goals are to keep space consumption about
301 * the same or better than java.util.HashMap, and to support high
302 * initial insertion rates on an empty table by many threads.
303 *
304 * Each key-value mapping is held in a Node. Because Node fields
305 * can contain special values, they are defined using plain Object
306 * types. Similarly in turn, all internal methods that use them
307 * work off Object types. And similarly, so do the internal
308 * methods of auxiliary iterator and view classes. All public
309 * generic typed methods relay in/out of these internal methods,
310 * supplying null-checks and casts as needed. This also allows
311 * many of the public methods to be factored into a smaller number
312 * of internal methods (although sadly not so for the five
313 * variants of put-related operations). The validation-based
314 * approach explained below leads to a lot of code sprawl because
315 * retry-control precludes factoring into smaller methods.
316 *
317 * The table is lazily initialized to a power-of-two size upon the
318 * first insertion. Each bin in the table normally contains a
319 * list of Nodes (most often, the list has only zero or one Node).
320 * Table accesses require volatile/atomic reads, writes, and
321 * CASes. Because there is no other way to arrange this without
322 * adding further indirections, we use intrinsics
323 * (sun.misc.Unsafe) operations. The lists of nodes within bins
324 * are always accurately traversable under volatile reads, so long
325 * as lookups check hash code and non-nullness of value before
326 * checking key equality.
327 *
328 * We use the top two bits of Node hash fields for control
329 * purposes -- they are available anyway because of addressing
330 * constraints. As explained further below, these top bits are
331 * used as follows:
332 * 00 - Normal
333 * 01 - Locked
334 * 11 - Locked and may have a thread waiting for lock
335 * 10 - Node is a forwarding node
336 *
337 * The lower 30 bits of each Node's hash field contain a
338 * transformation of the key's hash code, except for forwarding
339 * nodes, for which the lower bits are zero (and so always have
340 * hash field == MOVED).
341 *
342 * Insertion (via put or its variants) of the first node in an
343 * empty bin is performed by just CASing it to the bin. This is
344 * by far the most common case for put operations under most
345 * key/hash distributions. Other update operations (insert,
346 * delete, and replace) require locks. We do not want to waste
347 * the space required to associate a distinct lock object with
348 * each bin, so instead use the first node of a bin list itself as
349 * a lock. Blocking support for these locks relies on the builtin
350 * "synchronized" monitors. However, we also need a tryLock
351 * construction, so we overlay these by using bits of the Node
352 * hash field for lock control (see above), and so normally use
353 * builtin monitors only for blocking and signalling using
354 * wait/notifyAll constructions. See Node.tryAwaitLock.
355 *
356 * Using the first node of a list as a lock does not by itself
357 * suffice though: When a node is locked, any update must first
358 * validate that it is still the first node after locking it, and
359 * retry if not. Because new nodes are always appended to lists,
360 * once a node is first in a bin, it remains first until deleted
361 * or the bin becomes invalidated (upon resizing). However,
362 * operations that only conditionally update may inspect nodes
363 * until the point of update. This is a converse of sorts to the
364 * lazy locking technique described by Herlihy & Shavit.
365 *
366 * The main disadvantage of per-bin locks is that other update
367 * operations on other nodes in a bin list protected by the same
368 * lock can stall, for example when user equals() or mapping
369 * functions take a long time. However, statistically, under
370 * random hash codes, this is not a common problem. Ideally, the
371 * frequency of nodes in bins follows a Poisson distribution
372 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
373 * parameter of about 0.5 on average, given the resizing threshold
374 * of 0.75, although with a large variance because of resizing
375 * granularity. Ignoring variance, the expected occurrences of
376 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
377 * first values are:
378 *
379 * 0: 0.60653066
380 * 1: 0.30326533
381 * 2: 0.07581633
382 * 3: 0.01263606
383 * 4: 0.00157952
384 * 5: 0.00015795
385 * 6: 0.00001316
386 * 7: 0.00000094
387 * 8: 0.00000006
388 * more: less than 1 in ten million
389 *
390 * Lock contention probability for two threads accessing distinct
391 * elements is roughly 1 / (8 * #elements) under random hashes.
392 *
393 * Actual hash code distributions encountered in practice
394 * sometimes deviate significantly from uniform randomness. This
395 * includes the case when N > (1<<30), so some keys MUST collide.
396 * Similarly for dumb or hostile usages in which multiple keys are
397 * designed to have identical hash codes. Also, although we guard
398 * against the worst effects of this (see method spread), sets of
399 * hashes may differ only in bits that do not impact their bin
400 * index for a given power-of-two mask. So we use a secondary
401 * strategy that applies when the number of nodes in a bin exceeds
402 * a threshold, and at least one of the keys implements
403 * Comparable. These TreeBins use a balanced tree to hold nodes
404 * (a specialized form of red-black trees), bounding search time
405 * to O(log N). Each search step in a TreeBin is around twice as
406 * slow as in a regular list, but given that N cannot exceed
407 * (1<<64) (before running out of addresses) this bounds search
408 * steps, lock hold times, etc, to reasonable constants (roughly
409 * 100 nodes inspected per operation worst case) so long as keys
410 * are Comparable (which is very common -- String, Long, etc).
411 * TreeBin nodes (TreeNodes) also maintain the same "next"
412 * traversal pointers as regular nodes, so can be traversed in
413 * iterators in the same way.
414 *
415 * The table is resized when occupancy exceeds a percentage
416 * threshold (nominally, 0.75, but see below). Only a single
417 * thread performs the resize (using field "sizeCtl", to arrange
418 * exclusion), but the table otherwise remains usable for reads
419 * and updates. Resizing proceeds by transferring bins, one by
420 * one, from the table to the next table. Because we are using
421 * power-of-two expansion, the elements from each bin must either
422 * stay at same index, or move with a power of two offset. We
423 * eliminate unnecessary node creation by catching cases where old
424 * nodes can be reused because their next fields won't change. On
425 * average, only about one-sixth of them need cloning when a table
426 * doubles. The nodes they replace will be garbage collectable as
427 * soon as they are no longer referenced by any reader thread that
428 * may be in the midst of concurrently traversing table. Upon
429 * transfer, the old table bin contains only a special forwarding
430 * node (with hash field "MOVED") that contains the next table as
431 * its key. On encountering a forwarding node, access and update
432 * operations restart, using the new table.
433 *
434 * Each bin transfer requires its bin lock. However, unlike other
435 * cases, a transfer can skip a bin if it fails to acquire its
436 * lock, and revisit it later (unless it is a TreeBin). Method
437 * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
438 * have been skipped because of failure to acquire a lock, and
439 * blocks only if none are available (i.e., only very rarely).
440 * The transfer operation must also ensure that all accessible
441 * bins in both the old and new table are usable by any traversal.
442 * When there are no lock acquisition failures, this is arranged
443 * simply by proceeding from the last bin (table.length - 1) up
444 * towards the first. Upon seeing a forwarding node, traversals
445 * (see class Iter) arrange to move to the new table
446 * without revisiting nodes. However, when any node is skipped
447 * during a transfer, all earlier table bins may have become
448 * visible, so are initialized with a reverse-forwarding node back
449 * to the old table until the new ones are established. (This
450 * sometimes requires transiently locking a forwarding node, which
451 * is possible under the above encoding.) These more expensive
452 * mechanics trigger only when necessary.
453 *
454 * The traversal scheme also applies to partial traversals of
455 * ranges of bins (via an alternate Traverser constructor)
456 * to support partitioned aggregate operations. Also, read-only
457 * operations give up if ever forwarded to a null table, which
458 * provides support for shutdown-style clearing, which is also not
459 * currently implemented.
460 *
461 * Lazy table initialization minimizes footprint until first use,
462 * and also avoids resizings when the first operation is from a
463 * putAll, constructor with map argument, or deserialization.
464 * These cases attempt to override the initial capacity settings,
465 * but harmlessly fail to take effect in cases of races.
466 *
467 * The element count is maintained using a LongAdder, which avoids
468 * contention on updates but can encounter cache thrashing if read
469 * too frequently during concurrent access. To avoid reading so
470 * often, resizing is attempted either when a bin lock is
471 * contended, or upon adding to a bin already holding two or more
472 * nodes (checked before adding in the xIfAbsent methods, after
473 * adding in others). Under uniform hash distributions, the
474 * probability of this occurring at threshold is around 13%,
475 * meaning that only about 1 in 8 puts check threshold (and after
476 * resizing, many fewer do so). But this approximation has high
477 * variance for small table sizes, so we check on any collision
478 * for sizes <= 64. The bulk putAll operation further reduces
479 * contention by only committing count updates upon these size
480 * checks.
481 *
482 * Maintaining API and serialization compatibility with previous
483 * versions of this class introduces several oddities. Mainly: We
484 * leave untouched but unused constructor arguments refering to
485 * concurrencyLevel. We accept a loadFactor constructor argument,
486 * but apply it only to initial table capacity (which is the only
487 * time that we can guarantee to honor it.) We also declare an
488 * unused "Segment" class that is instantiated in minimal form
489 * only when serializing.
490 */
491
492 /* ---------------- Constants -------------- */
493
494 /**
495 * The largest possible table capacity. This value must be
496 * exactly 1<<30 to stay within Java array allocation and indexing
497 * bounds for power of two table sizes, and is further required
498 * because the top two bits of 32bit hash fields are used for
499 * control purposes.
500 */
501 private static final int MAXIMUM_CAPACITY = 1 << 30;
502
503 /**
504 * The default initial table capacity. Must be a power of 2
505 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
506 */
507 private static final int DEFAULT_CAPACITY = 16;
508
509 /**
510 * The largest possible (non-power of two) array size.
511 * Needed by toArray and related methods.
512 */
513 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
514
515 /**
516 * The default concurrency level for this table. Unused but
517 * defined for compatibility with previous versions of this class.
518 */
519 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
520
521 /**
522 * The load factor for this table. Overrides of this value in
523 * constructors affect only the initial table capacity. The
524 * actual floating point value isn't normally used -- it is
525 * simpler to use expressions such as {@code n - (n >>> 2)} for
526 * the associated resizing threshold.
527 */
528 private static final float LOAD_FACTOR = 0.75f;
529
530 /**
531 * The buffer size for skipped bins during transfers. The
532 * value is arbitrary but should be large enough to avoid
533 * most locking stalls during resizes.
534 */
535 private static final int TRANSFER_BUFFER_SIZE = 32;
536
537 /**
538 * The bin count threshold for using a tree rather than list for a
539 * bin. The value reflects the approximate break-even point for
540 * using tree-based operations.
541 */
542 private static final int TREE_THRESHOLD = 8;
543
544 /*
545 * Encodings for special uses of Node hash fields. See above for
546 * explanation.
547 */
548 static final int MOVED = 0x80000000; // hash field for forwarding nodes
549 static final int LOCKED = 0x40000000; // set/tested only as a bit
550 static final int WAITING = 0xc0000000; // both bits set/tested together
551 static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
552
553 /* ---------------- Fields -------------- */
554
555 /**
556 * The array of bins. Lazily initialized upon first insertion.
557 * Size is always a power of two. Accessed directly by iterators.
558 */
559 transient volatile Node[] table;
560
561 /**
562 * The counter maintaining number of elements.
563 */
564 private transient final LongAdder counter;
565
566 /**
567 * Table initialization and resizing control. When negative, the
568 * table is being initialized or resized. Otherwise, when table is
569 * null, holds the initial table size to use upon creation, or 0
570 * for default. After initialization, holds the next element count
571 * value upon which to resize the table.
572 */
573 private transient volatile int sizeCtl;
574
575 // views
576 private transient KeySetView<K,V> keySet;
577 private transient ValuesView<K,V> values;
578 private transient EntrySetView<K,V> entrySet;
579
580 /** For serialization compatibility. Null unless serialized; see below */
581 private Segment<K,V>[] segments;
582
583 /* ---------------- Table element access -------------- */
584
585 /*
586 * Volatile access methods are used for table elements as well as
587 * elements of in-progress next table while resizing. Uses are
588 * null checked by callers, and implicitly bounds-checked, relying
589 * on the invariants that tab arrays have non-zero size, and all
590 * indices are masked with (tab.length - 1) which is never
591 * negative and always less than length. Note that, to be correct
592 * wrt arbitrary concurrency errors by users, bounds checks must
593 * operate on local variables, which accounts for some odd-looking
594 * inline assignments below.
595 */
596
597 static final Node tabAt(Node[] tab, int i) { // used by Iter
598 return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
599 }
600
601 private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
602 return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
603 }
604
605 private static final void setTabAt(Node[] tab, int i, Node v) {
606 UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
607 }
608
609 /* ---------------- Nodes -------------- */
610
611 /**
612 * Key-value entry. Note that this is never exported out as a
613 * user-visible Map.Entry (see MapEntry below). Nodes with a hash
614 * field of MOVED are special, and do not contain user keys or
615 * values. Otherwise, keys are never null, and null val fields
616 * indicate that a node is in the process of being deleted or
617 * created. For purposes of read-only access, a key may be read
618 * before a val, but can only be used after checking val to be
619 * non-null.
620 */
621 static class Node {
622 volatile int hash;
623 final Object key;
624 volatile Object val;
625 volatile Node next;
626
627 Node(int hash, Object key, Object val, Node next) {
628 this.hash = hash;
629 this.key = key;
630 this.val = val;
631 this.next = next;
632 }
633
634 /** CompareAndSet the hash field */
635 final boolean casHash(int cmp, int val) {
636 return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
637 }
638
639 /** The number of spins before blocking for a lock */
640 static final int MAX_SPINS =
641 Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
642
643 /**
644 * Spins a while if LOCKED bit set and this node is the first
645 * of its bin, and then sets WAITING bits on hash field and
646 * blocks (once) if they are still set. It is OK for this
647 * method to return even if lock is not available upon exit,
648 * which enables these simple single-wait mechanics.
649 *
650 * The corresponding signalling operation is performed within
651 * callers: Upon detecting that WAITING has been set when
652 * unlocking lock (via a failed CAS from non-waiting LOCKED
653 * state), unlockers acquire the sync lock and perform a
654 * notifyAll.
655 *
656 * The initial sanity check on tab and bounds is not currently
657 * necessary in the only usages of this method, but enables
658 * use in other future contexts.
659 */
660 final void tryAwaitLock(Node[] tab, int i) {
661 if (tab != null && i >= 0 && i < tab.length) { // sanity check
662 int r = ThreadLocalRandom.current().nextInt(); // randomize spins
663 int spins = MAX_SPINS, h;
664 while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
665 if (spins >= 0) {
666 r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
667 if (r >= 0 && --spins == 0)
668 Thread.yield(); // yield before block
669 }
670 else if (casHash(h, h | WAITING)) {
671 synchronized (this) {
672 if (tabAt(tab, i) == this &&
673 (hash & WAITING) == WAITING) {
674 try {
675 wait();
676 } catch (InterruptedException ie) {
677 try {
678 Thread.currentThread().interrupt();
679 } catch (SecurityException ignore) {
680 }
681 }
682 }
683 else
684 notifyAll(); // possibly won race vs signaller
685 }
686 break;
687 }
688 }
689 }
690 }
691
692 // Unsafe mechanics for casHash
693 private static final sun.misc.Unsafe UNSAFE;
694 private static final long hashOffset;
695
696 static {
697 try {
698 UNSAFE = sun.misc.Unsafe.getUnsafe();
699 Class<?> k = Node.class;
700 hashOffset = UNSAFE.objectFieldOffset
701 (k.getDeclaredField("hash"));
702 } catch (Exception e) {
703 throw new Error(e);
704 }
705 }
706 }
707
708 /* ---------------- TreeBins -------------- */
709
710 /**
711 * Nodes for use in TreeBins
712 */
713 static final class TreeNode extends Node {
714 TreeNode parent; // red-black tree links
715 TreeNode left;
716 TreeNode right;
717 TreeNode prev; // needed to unlink next upon deletion
718 boolean red;
719
720 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
721 super(hash, key, val, next);
722 this.parent = parent;
723 }
724 }
725
726 /**
727 * A specialized form of red-black tree for use in bins
728 * whose size exceeds a threshold.
729 *
730 * TreeBins use a special form of comparison for search and
731 * related operations (which is the main reason we cannot use
732 * existing collections such as TreeMaps). TreeBins contain
733 * Comparable elements, but may contain others, as well as
734 * elements that are Comparable but not necessarily Comparable<T>
735 * for the same T, so we cannot invoke compareTo among them. To
736 * handle this, the tree is ordered primarily by hash value, then
737 * by getClass().getName() order, and then by Comparator order
738 * among elements of the same class. On lookup at a node, if
739 * elements are not comparable or compare as 0, both left and
740 * right children may need to be searched in the case of tied hash
741 * values. (This corresponds to the full list search that would be
742 * necessary if all elements were non-Comparable and had tied
743 * hashes.) The red-black balancing code is updated from
744 * pre-jdk-collections
745 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
746 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
747 * Algorithms" (CLR).
748 *
749 * TreeBins also maintain a separate locking discipline than
750 * regular bins. Because they are forwarded via special MOVED
751 * nodes at bin heads (which can never change once established),
752 * we cannot use those nodes as locks. Instead, TreeBin
753 * extends AbstractQueuedSynchronizer to support a simple form of
754 * read-write lock. For update operations and table validation,
755 * the exclusive form of lock behaves in the same way as bin-head
756 * locks. However, lookups use shared read-lock mechanics to allow
757 * multiple readers in the absence of writers. Additionally,
758 * these lookups do not ever block: While the lock is not
759 * available, they proceed along the slow traversal path (via
760 * next-pointers) until the lock becomes available or the list is
761 * exhausted, whichever comes first. (These cases are not fast,
762 * but maximize aggregate expected throughput.) The AQS mechanics
763 * for doing this are straightforward. The lock state is held as
764 * AQS getState(). Read counts are negative; the write count (1)
765 * is positive. There are no signalling preferences among readers
766 * and writers. Since we don't need to export full Lock API, we
767 * just override the minimal AQS methods and use them directly.
768 */
769 static final class TreeBin extends AbstractQueuedSynchronizer {
770 private static final long serialVersionUID = 2249069246763182397L;
771 transient TreeNode root; // root of tree
772 transient TreeNode first; // head of next-pointer list
773
774 /* AQS overrides */
775 public final boolean isHeldExclusively() { return getState() > 0; }
776 public final boolean tryAcquire(int ignore) {
777 if (compareAndSetState(0, 1)) {
778 setExclusiveOwnerThread(Thread.currentThread());
779 return true;
780 }
781 return false;
782 }
783 public final boolean tryRelease(int ignore) {
784 setExclusiveOwnerThread(null);
785 setState(0);
786 return true;
787 }
788 public final int tryAcquireShared(int ignore) {
789 for (int c;;) {
790 if ((c = getState()) > 0)
791 return -1;
792 if (compareAndSetState(c, c -1))
793 return 1;
794 }
795 }
796 public final boolean tryReleaseShared(int ignore) {
797 int c;
798 do {} while (!compareAndSetState(c = getState(), c + 1));
799 return c == -1;
800 }
801
802 /** From CLR */
803 private void rotateLeft(TreeNode p) {
804 if (p != null) {
805 TreeNode r = p.right, pp, rl;
806 if ((rl = p.right = r.left) != null)
807 rl.parent = p;
808 if ((pp = r.parent = p.parent) == null)
809 root = r;
810 else if (pp.left == p)
811 pp.left = r;
812 else
813 pp.right = r;
814 r.left = p;
815 p.parent = r;
816 }
817 }
818
819 /** From CLR */
820 private void rotateRight(TreeNode p) {
821 if (p != null) {
822 TreeNode l = p.left, pp, lr;
823 if ((lr = p.left = l.right) != null)
824 lr.parent = p;
825 if ((pp = l.parent = p.parent) == null)
826 root = l;
827 else if (pp.right == p)
828 pp.right = l;
829 else
830 pp.left = l;
831 l.right = p;
832 p.parent = l;
833 }
834 }
835
836 /**
837 * Returns the TreeNode (or null if not found) for the given key
838 * starting at given root.
839 */
840 @SuppressWarnings("unchecked") final TreeNode getTreeNode
841 (int h, Object k, TreeNode p) {
842 Class<?> c = k.getClass();
843 while (p != null) {
844 int dir, ph; Object pk; Class<?> pc;
845 if ((ph = p.hash) == h) {
846 if ((pk = p.key) == k || k.equals(pk))
847 return p;
848 if (c != (pc = pk.getClass()) ||
849 !(k instanceof Comparable) ||
850 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
851 if ((dir = (c == pc) ? 0 :
852 c.getName().compareTo(pc.getName())) == 0) {
853 TreeNode r = null, pl, pr; // check both sides
854 if ((pr = p.right) != null && h >= pr.hash &&
855 (r = getTreeNode(h, k, pr)) != null)
856 return r;
857 else if ((pl = p.left) != null && h <= pl.hash)
858 dir = -1;
859 else // nothing there
860 return null;
861 }
862 }
863 }
864 else
865 dir = (h < ph) ? -1 : 1;
866 p = (dir > 0) ? p.right : p.left;
867 }
868 return null;
869 }
870
871 /**
872 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
873 * read-lock to call getTreeNode, but during failure to get
874 * lock, searches along next links.
875 */
876 final Object getValue(int h, Object k) {
877 Node r = null;
878 int c = getState(); // Must read lock state first
879 for (Node e = first; e != null; e = e.next) {
880 if (c <= 0 && compareAndSetState(c, c - 1)) {
881 try {
882 r = getTreeNode(h, k, root);
883 } finally {
884 releaseShared(0);
885 }
886 break;
887 }
888 else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
889 r = e;
890 break;
891 }
892 else
893 c = getState();
894 }
895 return r == null ? null : r.val;
896 }
897
898 /**
899 * Finds or adds a node.
900 * @return null if added
901 */
902 @SuppressWarnings("unchecked") final TreeNode putTreeNode
903 (int h, Object k, Object v) {
904 Class<?> c = k.getClass();
905 TreeNode pp = root, p = null;
906 int dir = 0;
907 while (pp != null) { // find existing node or leaf to insert at
908 int ph; Object pk; Class<?> pc;
909 p = pp;
910 if ((ph = p.hash) == h) {
911 if ((pk = p.key) == k || k.equals(pk))
912 return p;
913 if (c != (pc = pk.getClass()) ||
914 !(k instanceof Comparable) ||
915 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
916 TreeNode s = null, r = null, pr;
917 if ((dir = (c == pc) ? 0 :
918 c.getName().compareTo(pc.getName())) == 0) {
919 if ((pr = p.right) != null && h >= pr.hash &&
920 (r = getTreeNode(h, k, pr)) != null)
921 return r;
922 else // continue left
923 dir = -1;
924 }
925 else if ((pr = p.right) != null && h >= pr.hash)
926 s = pr;
927 if (s != null && (r = getTreeNode(h, k, s)) != null)
928 return r;
929 }
930 }
931 else
932 dir = (h < ph) ? -1 : 1;
933 pp = (dir > 0) ? p.right : p.left;
934 }
935
936 TreeNode f = first;
937 TreeNode x = first = new TreeNode(h, k, v, f, p);
938 if (p == null)
939 root = x;
940 else { // attach and rebalance; adapted from CLR
941 TreeNode xp, xpp;
942 if (f != null)
943 f.prev = x;
944 if (dir <= 0)
945 p.left = x;
946 else
947 p.right = x;
948 x.red = true;
949 while (x != null && (xp = x.parent) != null && xp.red &&
950 (xpp = xp.parent) != null) {
951 TreeNode xppl = xpp.left;
952 if (xp == xppl) {
953 TreeNode y = xpp.right;
954 if (y != null && y.red) {
955 y.red = false;
956 xp.red = false;
957 xpp.red = true;
958 x = xpp;
959 }
960 else {
961 if (x == xp.right) {
962 rotateLeft(x = xp);
963 xpp = (xp = x.parent) == null ? null : xp.parent;
964 }
965 if (xp != null) {
966 xp.red = false;
967 if (xpp != null) {
968 xpp.red = true;
969 rotateRight(xpp);
970 }
971 }
972 }
973 }
974 else {
975 TreeNode y = xppl;
976 if (y != null && y.red) {
977 y.red = false;
978 xp.red = false;
979 xpp.red = true;
980 x = xpp;
981 }
982 else {
983 if (x == xp.left) {
984 rotateRight(x = xp);
985 xpp = (xp = x.parent) == null ? null : xp.parent;
986 }
987 if (xp != null) {
988 xp.red = false;
989 if (xpp != null) {
990 xpp.red = true;
991 rotateLeft(xpp);
992 }
993 }
994 }
995 }
996 }
997 TreeNode r = root;
998 if (r != null && r.red)
999 r.red = false;
1000 }
1001 return null;
1002 }
1003
1004 /**
1005 * Removes the given node, that must be present before this
1006 * call. This is messier than typical red-black deletion code
1007 * because we cannot swap the contents of an interior node
1008 * with a leaf successor that is pinned by "next" pointers
1009 * that are accessible independently of lock. So instead we
1010 * swap the tree linkages.
1011 */
1012 final void deleteTreeNode(TreeNode p) {
1013 TreeNode next = (TreeNode)p.next; // unlink traversal pointers
1014 TreeNode pred = p.prev;
1015 if (pred == null)
1016 first = next;
1017 else
1018 pred.next = next;
1019 if (next != null)
1020 next.prev = pred;
1021 TreeNode replacement;
1022 TreeNode pl = p.left;
1023 TreeNode pr = p.right;
1024 if (pl != null && pr != null) {
1025 TreeNode s = pr, sl;
1026 while ((sl = s.left) != null) // find successor
1027 s = sl;
1028 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1029 TreeNode sr = s.right;
1030 TreeNode pp = p.parent;
1031 if (s == pr) { // p was s's direct parent
1032 p.parent = s;
1033 s.right = p;
1034 }
1035 else {
1036 TreeNode sp = s.parent;
1037 if ((p.parent = sp) != null) {
1038 if (s == sp.left)
1039 sp.left = p;
1040 else
1041 sp.right = p;
1042 }
1043 if ((s.right = pr) != null)
1044 pr.parent = s;
1045 }
1046 p.left = null;
1047 if ((p.right = sr) != null)
1048 sr.parent = p;
1049 if ((s.left = pl) != null)
1050 pl.parent = s;
1051 if ((s.parent = pp) == null)
1052 root = s;
1053 else if (p == pp.left)
1054 pp.left = s;
1055 else
1056 pp.right = s;
1057 replacement = sr;
1058 }
1059 else
1060 replacement = (pl != null) ? pl : pr;
1061 TreeNode pp = p.parent;
1062 if (replacement == null) {
1063 if (pp == null) {
1064 root = null;
1065 return;
1066 }
1067 replacement = p;
1068 }
1069 else {
1070 replacement.parent = pp;
1071 if (pp == null)
1072 root = replacement;
1073 else if (p == pp.left)
1074 pp.left = replacement;
1075 else
1076 pp.right = replacement;
1077 p.left = p.right = p.parent = null;
1078 }
1079 if (!p.red) { // rebalance, from CLR
1080 TreeNode x = replacement;
1081 while (x != null) {
1082 TreeNode xp, xpl;
1083 if (x.red || (xp = x.parent) == null) {
1084 x.red = false;
1085 break;
1086 }
1087 if (x == (xpl = xp.left)) {
1088 TreeNode sib = xp.right;
1089 if (sib != null && sib.red) {
1090 sib.red = false;
1091 xp.red = true;
1092 rotateLeft(xp);
1093 sib = (xp = x.parent) == null ? null : xp.right;
1094 }
1095 if (sib == null)
1096 x = xp;
1097 else {
1098 TreeNode sl = sib.left, sr = sib.right;
1099 if ((sr == null || !sr.red) &&
1100 (sl == null || !sl.red)) {
1101 sib.red = true;
1102 x = xp;
1103 }
1104 else {
1105 if (sr == null || !sr.red) {
1106 if (sl != null)
1107 sl.red = false;
1108 sib.red = true;
1109 rotateRight(sib);
1110 sib = (xp = x.parent) == null ? null : xp.right;
1111 }
1112 if (sib != null) {
1113 sib.red = (xp == null) ? false : xp.red;
1114 if ((sr = sib.right) != null)
1115 sr.red = false;
1116 }
1117 if (xp != null) {
1118 xp.red = false;
1119 rotateLeft(xp);
1120 }
1121 x = root;
1122 }
1123 }
1124 }
1125 else { // symmetric
1126 TreeNode sib = xpl;
1127 if (sib != null && sib.red) {
1128 sib.red = false;
1129 xp.red = true;
1130 rotateRight(xp);
1131 sib = (xp = x.parent) == null ? null : xp.left;
1132 }
1133 if (sib == null)
1134 x = xp;
1135 else {
1136 TreeNode sl = sib.left, sr = sib.right;
1137 if ((sl == null || !sl.red) &&
1138 (sr == null || !sr.red)) {
1139 sib.red = true;
1140 x = xp;
1141 }
1142 else {
1143 if (sl == null || !sl.red) {
1144 if (sr != null)
1145 sr.red = false;
1146 sib.red = true;
1147 rotateLeft(sib);
1148 sib = (xp = x.parent) == null ? null : xp.left;
1149 }
1150 if (sib != null) {
1151 sib.red = (xp == null) ? false : xp.red;
1152 if ((sl = sib.left) != null)
1153 sl.red = false;
1154 }
1155 if (xp != null) {
1156 xp.red = false;
1157 rotateRight(xp);
1158 }
1159 x = root;
1160 }
1161 }
1162 }
1163 }
1164 }
1165 if (p == replacement && (pp = p.parent) != null) {
1166 if (p == pp.left) // detach pointers
1167 pp.left = null;
1168 else if (p == pp.right)
1169 pp.right = null;
1170 p.parent = null;
1171 }
1172 }
1173 }
1174
1175 /* ---------------- Collision reduction methods -------------- */
1176
1177 /**
1178 * Spreads higher bits to lower, and also forces top 2 bits to 0.
1179 * Because the table uses power-of-two masking, sets of hashes
1180 * that vary only in bits above the current mask will always
1181 * collide. (Among known examples are sets of Float keys holding
1182 * consecutive whole numbers in small tables.) To counter this,
1183 * we apply a transform that spreads the impact of higher bits
1184 * downward. There is a tradeoff between speed, utility, and
1185 * quality of bit-spreading. Because many common sets of hashes
1186 * are already reasonably distributed across bits (so don't benefit
1187 * from spreading), and because we use trees to handle large sets
1188 * of collisions in bins, we don't need excessively high quality.
1189 */
1190 private static final int spread(int h) {
1191 h ^= (h >>> 18) ^ (h >>> 12);
1192 return (h ^ (h >>> 10)) & HASH_BITS;
1193 }
1194
1195 /**
1196 * Replaces a list bin with a tree bin. Call only when locked.
1197 * Fails to replace if the given key is non-comparable or table
1198 * is, or needs, resizing.
1199 */
1200 private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1201 if ((key instanceof Comparable) &&
1202 (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1203 TreeBin t = new TreeBin();
1204 for (Node e = tabAt(tab, index); e != null; e = e.next)
1205 t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1206 setTabAt(tab, index, new Node(MOVED, t, null, null));
1207 }
1208 }
1209
1210 /* ---------------- Internal access and update methods -------------- */
1211
1212 /** Implementation for get and containsKey */
1213 private final Object internalGet(Object k) {
1214 int h = spread(k.hashCode());
1215 retry: for (Node[] tab = table; tab != null;) {
1216 Node e, p; Object ek, ev; int eh; // locals to read fields once
1217 for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1218 if ((eh = e.hash) == MOVED) {
1219 if ((ek = e.key) instanceof TreeBin) // search TreeBin
1220 return ((TreeBin)ek).getValue(h, k);
1221 else { // restart with new table
1222 tab = (Node[])ek;
1223 continue retry;
1224 }
1225 }
1226 else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1227 ((ek = e.key) == k || k.equals(ek)))
1228 return ev;
1229 }
1230 break;
1231 }
1232 return null;
1233 }
1234
1235 /**
1236 * Implementation for the four public remove/replace methods:
1237 * Replaces node value with v, conditional upon match of cv if
1238 * non-null. If resulting value is null, delete.
1239 */
1240 private final Object internalReplace(Object k, Object v, Object cv) {
1241 int h = spread(k.hashCode());
1242 Object oldVal = null;
1243 for (Node[] tab = table;;) {
1244 Node f; int i, fh; Object fk;
1245 if (tab == null ||
1246 (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1247 break;
1248 else if ((fh = f.hash) == MOVED) {
1249 if ((fk = f.key) instanceof TreeBin) {
1250 TreeBin t = (TreeBin)fk;
1251 boolean validated = false;
1252 boolean deleted = false;
1253 t.acquire(0);
1254 try {
1255 if (tabAt(tab, i) == f) {
1256 validated = true;
1257 TreeNode p = t.getTreeNode(h, k, t.root);
1258 if (p != null) {
1259 Object pv = p.val;
1260 if (cv == null || cv == pv || cv.equals(pv)) {
1261 oldVal = pv;
1262 if ((p.val = v) == null) {
1263 deleted = true;
1264 t.deleteTreeNode(p);
1265 }
1266 }
1267 }
1268 }
1269 } finally {
1270 t.release(0);
1271 }
1272 if (validated) {
1273 if (deleted)
1274 counter.add(-1L);
1275 break;
1276 }
1277 }
1278 else
1279 tab = (Node[])fk;
1280 }
1281 else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1282 break; // rules out possible existence
1283 else if ((fh & LOCKED) != 0) {
1284 checkForResize(); // try resizing if can't get lock
1285 f.tryAwaitLock(tab, i);
1286 }
1287 else if (f.casHash(fh, fh | LOCKED)) {
1288 boolean validated = false;
1289 boolean deleted = false;
1290 try {
1291 if (tabAt(tab, i) == f) {
1292 validated = true;
1293 for (Node e = f, pred = null;;) {
1294 Object ek, ev;
1295 if ((e.hash & HASH_BITS) == h &&
1296 ((ev = e.val) != null) &&
1297 ((ek = e.key) == k || k.equals(ek))) {
1298 if (cv == null || cv == ev || cv.equals(ev)) {
1299 oldVal = ev;
1300 if ((e.val = v) == null) {
1301 deleted = true;
1302 Node en = e.next;
1303 if (pred != null)
1304 pred.next = en;
1305 else
1306 setTabAt(tab, i, en);
1307 }
1308 }
1309 break;
1310 }
1311 pred = e;
1312 if ((e = e.next) == null)
1313 break;
1314 }
1315 }
1316 } finally {
1317 if (!f.casHash(fh | LOCKED, fh)) {
1318 f.hash = fh;
1319 synchronized (f) { f.notifyAll(); };
1320 }
1321 }
1322 if (validated) {
1323 if (deleted)
1324 counter.add(-1L);
1325 break;
1326 }
1327 }
1328 }
1329 return oldVal;
1330 }
1331
1332 /*
1333 * Internal versions of the six insertion methods, each a
1334 * little more complicated than the last. All have
1335 * the same basic structure as the first (internalPut):
1336 * 1. If table uninitialized, create
1337 * 2. If bin empty, try to CAS new node
1338 * 3. If bin stale, use new table
1339 * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1340 * 5. Lock and validate; if valid, scan and add or update
1341 *
1342 * The others interweave other checks and/or alternative actions:
1343 * * Plain put checks for and performs resize after insertion.
1344 * * putIfAbsent prescans for mapping without lock (and fails to add
1345 * if present), which also makes pre-emptive resize checks worthwhile.
1346 * * computeIfAbsent extends form used in putIfAbsent with additional
1347 * mechanics to deal with, calls, potential exceptions and null
1348 * returns from function call.
1349 * * compute uses the same function-call mechanics, but without
1350 * the prescans
1351 * * merge acts as putIfAbsent in the absent case, but invokes the
1352 * update function if present
1353 * * putAll attempts to pre-allocate enough table space
1354 * and more lazily performs count updates and checks.
1355 *
1356 * Someday when details settle down a bit more, it might be worth
1357 * some factoring to reduce sprawl.
1358 */
1359
1360 /** Implementation for put */
1361 private final Object internalPut(Object k, Object v) {
1362 int h = spread(k.hashCode());
1363 int count = 0;
1364 for (Node[] tab = table;;) {
1365 int i; Node f; int fh; Object fk;
1366 if (tab == null)
1367 tab = initTable();
1368 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1369 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1370 break; // no lock when adding to empty bin
1371 }
1372 else if ((fh = f.hash) == MOVED) {
1373 if ((fk = f.key) instanceof TreeBin) {
1374 TreeBin t = (TreeBin)fk;
1375 Object oldVal = null;
1376 t.acquire(0);
1377 try {
1378 if (tabAt(tab, i) == f) {
1379 count = 2;
1380 TreeNode p = t.putTreeNode(h, k, v);
1381 if (p != null) {
1382 oldVal = p.val;
1383 p.val = v;
1384 }
1385 }
1386 } finally {
1387 t.release(0);
1388 }
1389 if (count != 0) {
1390 if (oldVal != null)
1391 return oldVal;
1392 break;
1393 }
1394 }
1395 else
1396 tab = (Node[])fk;
1397 }
1398 else if ((fh & LOCKED) != 0) {
1399 checkForResize();
1400 f.tryAwaitLock(tab, i);
1401 }
1402 else if (f.casHash(fh, fh | LOCKED)) {
1403 Object oldVal = null;
1404 try { // needed in case equals() throws
1405 if (tabAt(tab, i) == f) {
1406 count = 1;
1407 for (Node e = f;; ++count) {
1408 Object ek, ev;
1409 if ((e.hash & HASH_BITS) == h &&
1410 (ev = e.val) != null &&
1411 ((ek = e.key) == k || k.equals(ek))) {
1412 oldVal = ev;
1413 e.val = v;
1414 break;
1415 }
1416 Node last = e;
1417 if ((e = e.next) == null) {
1418 last.next = new Node(h, k, v, null);
1419 if (count >= TREE_THRESHOLD)
1420 replaceWithTreeBin(tab, i, k);
1421 break;
1422 }
1423 }
1424 }
1425 } finally { // unlock and signal if needed
1426 if (!f.casHash(fh | LOCKED, fh)) {
1427 f.hash = fh;
1428 synchronized (f) { f.notifyAll(); };
1429 }
1430 }
1431 if (count != 0) {
1432 if (oldVal != null)
1433 return oldVal;
1434 if (tab.length <= 64)
1435 count = 2;
1436 break;
1437 }
1438 }
1439 }
1440 counter.add(1L);
1441 if (count > 1)
1442 checkForResize();
1443 return null;
1444 }
1445
1446 /** Implementation for putIfAbsent */
1447 private final Object internalPutIfAbsent(Object k, Object v) {
1448 int h = spread(k.hashCode());
1449 int count = 0;
1450 for (Node[] tab = table;;) {
1451 int i; Node f; int fh; Object fk, fv;
1452 if (tab == null)
1453 tab = initTable();
1454 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1455 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1456 break;
1457 }
1458 else if ((fh = f.hash) == MOVED) {
1459 if ((fk = f.key) instanceof TreeBin) {
1460 TreeBin t = (TreeBin)fk;
1461 Object oldVal = null;
1462 t.acquire(0);
1463 try {
1464 if (tabAt(tab, i) == f) {
1465 count = 2;
1466 TreeNode p = t.putTreeNode(h, k, v);
1467 if (p != null)
1468 oldVal = p.val;
1469 }
1470 } finally {
1471 t.release(0);
1472 }
1473 if (count != 0) {
1474 if (oldVal != null)
1475 return oldVal;
1476 break;
1477 }
1478 }
1479 else
1480 tab = (Node[])fk;
1481 }
1482 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1483 ((fk = f.key) == k || k.equals(fk)))
1484 return fv;
1485 else {
1486 Node g = f.next;
1487 if (g != null) { // at least 2 nodes -- search and maybe resize
1488 for (Node e = g;;) {
1489 Object ek, ev;
1490 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1491 ((ek = e.key) == k || k.equals(ek)))
1492 return ev;
1493 if ((e = e.next) == null) {
1494 checkForResize();
1495 break;
1496 }
1497 }
1498 }
1499 if (((fh = f.hash) & LOCKED) != 0) {
1500 checkForResize();
1501 f.tryAwaitLock(tab, i);
1502 }
1503 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1504 Object oldVal = null;
1505 try {
1506 if (tabAt(tab, i) == f) {
1507 count = 1;
1508 for (Node e = f;; ++count) {
1509 Object ek, ev;
1510 if ((e.hash & HASH_BITS) == h &&
1511 (ev = e.val) != null &&
1512 ((ek = e.key) == k || k.equals(ek))) {
1513 oldVal = ev;
1514 break;
1515 }
1516 Node last = e;
1517 if ((e = e.next) == null) {
1518 last.next = new Node(h, k, v, null);
1519 if (count >= TREE_THRESHOLD)
1520 replaceWithTreeBin(tab, i, k);
1521 break;
1522 }
1523 }
1524 }
1525 } finally {
1526 if (!f.casHash(fh | LOCKED, fh)) {
1527 f.hash = fh;
1528 synchronized (f) { f.notifyAll(); };
1529 }
1530 }
1531 if (count != 0) {
1532 if (oldVal != null)
1533 return oldVal;
1534 if (tab.length <= 64)
1535 count = 2;
1536 break;
1537 }
1538 }
1539 }
1540 }
1541 counter.add(1L);
1542 if (count > 1)
1543 checkForResize();
1544 return null;
1545 }
1546
1547 /** Implementation for computeIfAbsent */
1548 private final Object internalComputeIfAbsent(K k,
1549 Fun<? super K, ?> mf) {
1550 int h = spread(k.hashCode());
1551 Object val = null;
1552 int count = 0;
1553 for (Node[] tab = table;;) {
1554 Node f; int i, fh; Object fk, fv;
1555 if (tab == null)
1556 tab = initTable();
1557 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1558 Node node = new Node(fh = h | LOCKED, k, null, null);
1559 if (casTabAt(tab, i, null, node)) {
1560 count = 1;
1561 try {
1562 if ((val = mf.apply(k)) != null)
1563 node.val = val;
1564 } finally {
1565 if (val == null)
1566 setTabAt(tab, i, null);
1567 if (!node.casHash(fh, h)) {
1568 node.hash = h;
1569 synchronized (node) { node.notifyAll(); };
1570 }
1571 }
1572 }
1573 if (count != 0)
1574 break;
1575 }
1576 else if ((fh = f.hash) == MOVED) {
1577 if ((fk = f.key) instanceof TreeBin) {
1578 TreeBin t = (TreeBin)fk;
1579 boolean added = false;
1580 t.acquire(0);
1581 try {
1582 if (tabAt(tab, i) == f) {
1583 count = 1;
1584 TreeNode p = t.getTreeNode(h, k, t.root);
1585 if (p != null)
1586 val = p.val;
1587 else if ((val = mf.apply(k)) != null) {
1588 added = true;
1589 count = 2;
1590 t.putTreeNode(h, k, val);
1591 }
1592 }
1593 } finally {
1594 t.release(0);
1595 }
1596 if (count != 0) {
1597 if (!added)
1598 return val;
1599 break;
1600 }
1601 }
1602 else
1603 tab = (Node[])fk;
1604 }
1605 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1606 ((fk = f.key) == k || k.equals(fk)))
1607 return fv;
1608 else {
1609 Node g = f.next;
1610 if (g != null) {
1611 for (Node e = g;;) {
1612 Object ek, ev;
1613 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1614 ((ek = e.key) == k || k.equals(ek)))
1615 return ev;
1616 if ((e = e.next) == null) {
1617 checkForResize();
1618 break;
1619 }
1620 }
1621 }
1622 if (((fh = f.hash) & LOCKED) != 0) {
1623 checkForResize();
1624 f.tryAwaitLock(tab, i);
1625 }
1626 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1627 boolean added = false;
1628 try {
1629 if (tabAt(tab, i) == f) {
1630 count = 1;
1631 for (Node e = f;; ++count) {
1632 Object ek, ev;
1633 if ((e.hash & HASH_BITS) == h &&
1634 (ev = e.val) != null &&
1635 ((ek = e.key) == k || k.equals(ek))) {
1636 val = ev;
1637 break;
1638 }
1639 Node last = e;
1640 if ((e = e.next) == null) {
1641 if ((val = mf.apply(k)) != null) {
1642 added = true;
1643 last.next = new Node(h, k, val, null);
1644 if (count >= TREE_THRESHOLD)
1645 replaceWithTreeBin(tab, i, k);
1646 }
1647 break;
1648 }
1649 }
1650 }
1651 } finally {
1652 if (!f.casHash(fh | LOCKED, fh)) {
1653 f.hash = fh;
1654 synchronized (f) { f.notifyAll(); };
1655 }
1656 }
1657 if (count != 0) {
1658 if (!added)
1659 return val;
1660 if (tab.length <= 64)
1661 count = 2;
1662 break;
1663 }
1664 }
1665 }
1666 }
1667 if (val != null) {
1668 counter.add(1L);
1669 if (count > 1)
1670 checkForResize();
1671 }
1672 return val;
1673 }
1674
1675 /** Implementation for compute */
1676 @SuppressWarnings("unchecked") private final Object internalCompute
1677 (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1678 int h = spread(k.hashCode());
1679 Object val = null;
1680 int delta = 0;
1681 int count = 0;
1682 for (Node[] tab = table;;) {
1683 Node f; int i, fh; Object fk;
1684 if (tab == null)
1685 tab = initTable();
1686 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1687 if (onlyIfPresent)
1688 break;
1689 Node node = new Node(fh = h | LOCKED, k, null, null);
1690 if (casTabAt(tab, i, null, node)) {
1691 try {
1692 count = 1;
1693 if ((val = mf.apply(k, null)) != null) {
1694 node.val = val;
1695 delta = 1;
1696 }
1697 } finally {
1698 if (delta == 0)
1699 setTabAt(tab, i, null);
1700 if (!node.casHash(fh, h)) {
1701 node.hash = h;
1702 synchronized (node) { node.notifyAll(); };
1703 }
1704 }
1705 }
1706 if (count != 0)
1707 break;
1708 }
1709 else if ((fh = f.hash) == MOVED) {
1710 if ((fk = f.key) instanceof TreeBin) {
1711 TreeBin t = (TreeBin)fk;
1712 t.acquire(0);
1713 try {
1714 if (tabAt(tab, i) == f) {
1715 count = 1;
1716 TreeNode p = t.getTreeNode(h, k, t.root);
1717 Object pv = (p == null) ? null : p.val;
1718 if ((val = mf.apply(k, (V)pv)) != null) {
1719 if (p != null)
1720 p.val = val;
1721 else {
1722 count = 2;
1723 delta = 1;
1724 t.putTreeNode(h, k, val);
1725 }
1726 }
1727 else if (p != null) {
1728 delta = -1;
1729 t.deleteTreeNode(p);
1730 }
1731 }
1732 } finally {
1733 t.release(0);
1734 }
1735 if (count != 0)
1736 break;
1737 }
1738 else
1739 tab = (Node[])fk;
1740 }
1741 else if ((fh & LOCKED) != 0) {
1742 checkForResize();
1743 f.tryAwaitLock(tab, i);
1744 }
1745 else if (f.casHash(fh, fh | LOCKED)) {
1746 try {
1747 if (tabAt(tab, i) == f) {
1748 count = 1;
1749 for (Node e = f, pred = null;; ++count) {
1750 Object ek, ev;
1751 if ((e.hash & HASH_BITS) == h &&
1752 (ev = e.val) != null &&
1753 ((ek = e.key) == k || k.equals(ek))) {
1754 val = mf.apply(k, (V)ev);
1755 if (val != null)
1756 e.val = val;
1757 else {
1758 delta = -1;
1759 Node en = e.next;
1760 if (pred != null)
1761 pred.next = en;
1762 else
1763 setTabAt(tab, i, en);
1764 }
1765 break;
1766 }
1767 pred = e;
1768 if ((e = e.next) == null) {
1769 if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1770 pred.next = new Node(h, k, val, null);
1771 delta = 1;
1772 if (count >= TREE_THRESHOLD)
1773 replaceWithTreeBin(tab, i, k);
1774 }
1775 break;
1776 }
1777 }
1778 }
1779 } finally {
1780 if (!f.casHash(fh | LOCKED, fh)) {
1781 f.hash = fh;
1782 synchronized (f) { f.notifyAll(); };
1783 }
1784 }
1785 if (count != 0) {
1786 if (tab.length <= 64)
1787 count = 2;
1788 break;
1789 }
1790 }
1791 }
1792 if (delta != 0) {
1793 counter.add((long)delta);
1794 if (count > 1)
1795 checkForResize();
1796 }
1797 return val;
1798 }
1799
1800 /** Implementation for merge */
1801 @SuppressWarnings("unchecked") private final Object internalMerge
1802 (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1803 int h = spread(k.hashCode());
1804 Object val = null;
1805 int delta = 0;
1806 int count = 0;
1807 for (Node[] tab = table;;) {
1808 int i; Node f; int fh; Object fk, fv;
1809 if (tab == null)
1810 tab = initTable();
1811 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1812 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1813 delta = 1;
1814 val = v;
1815 break;
1816 }
1817 }
1818 else if ((fh = f.hash) == MOVED) {
1819 if ((fk = f.key) instanceof TreeBin) {
1820 TreeBin t = (TreeBin)fk;
1821 t.acquire(0);
1822 try {
1823 if (tabAt(tab, i) == f) {
1824 count = 1;
1825 TreeNode p = t.getTreeNode(h, k, t.root);
1826 val = (p == null) ? v : mf.apply((V)p.val, v);
1827 if (val != null) {
1828 if (p != null)
1829 p.val = val;
1830 else {
1831 count = 2;
1832 delta = 1;
1833 t.putTreeNode(h, k, val);
1834 }
1835 }
1836 else if (p != null) {
1837 delta = -1;
1838 t.deleteTreeNode(p);
1839 }
1840 }
1841 } finally {
1842 t.release(0);
1843 }
1844 if (count != 0)
1845 break;
1846 }
1847 else
1848 tab = (Node[])fk;
1849 }
1850 else if ((fh & LOCKED) != 0) {
1851 checkForResize();
1852 f.tryAwaitLock(tab, i);
1853 }
1854 else if (f.casHash(fh, fh | LOCKED)) {
1855 try {
1856 if (tabAt(tab, i) == f) {
1857 count = 1;
1858 for (Node e = f, pred = null;; ++count) {
1859 Object ek, ev;
1860 if ((e.hash & HASH_BITS) == h &&
1861 (ev = e.val) != null &&
1862 ((ek = e.key) == k || k.equals(ek))) {
1863 val = mf.apply(v, (V)ev);
1864 if (val != null)
1865 e.val = val;
1866 else {
1867 delta = -1;
1868 Node en = e.next;
1869 if (pred != null)
1870 pred.next = en;
1871 else
1872 setTabAt(tab, i, en);
1873 }
1874 break;
1875 }
1876 pred = e;
1877 if ((e = e.next) == null) {
1878 val = v;
1879 pred.next = new Node(h, k, val, null);
1880 delta = 1;
1881 if (count >= TREE_THRESHOLD)
1882 replaceWithTreeBin(tab, i, k);
1883 break;
1884 }
1885 }
1886 }
1887 } finally {
1888 if (!f.casHash(fh | LOCKED, fh)) {
1889 f.hash = fh;
1890 synchronized (f) { f.notifyAll(); };
1891 }
1892 }
1893 if (count != 0) {
1894 if (tab.length <= 64)
1895 count = 2;
1896 break;
1897 }
1898 }
1899 }
1900 if (delta != 0) {
1901 counter.add((long)delta);
1902 if (count > 1)
1903 checkForResize();
1904 }
1905 return val;
1906 }
1907
1908 /** Implementation for putAll */
1909 private final void internalPutAll(Map<?, ?> m) {
1910 tryPresize(m.size());
1911 long delta = 0L; // number of uncommitted additions
1912 boolean npe = false; // to throw exception on exit for nulls
1913 try { // to clean up counts on other exceptions
1914 for (Map.Entry<?, ?> entry : m.entrySet()) {
1915 Object k, v;
1916 if (entry == null || (k = entry.getKey()) == null ||
1917 (v = entry.getValue()) == null) {
1918 npe = true;
1919 break;
1920 }
1921 int h = spread(k.hashCode());
1922 for (Node[] tab = table;;) {
1923 int i; Node f; int fh; Object fk;
1924 if (tab == null)
1925 tab = initTable();
1926 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1927 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1928 ++delta;
1929 break;
1930 }
1931 }
1932 else if ((fh = f.hash) == MOVED) {
1933 if ((fk = f.key) instanceof TreeBin) {
1934 TreeBin t = (TreeBin)fk;
1935 boolean validated = false;
1936 t.acquire(0);
1937 try {
1938 if (tabAt(tab, i) == f) {
1939 validated = true;
1940 TreeNode p = t.getTreeNode(h, k, t.root);
1941 if (p != null)
1942 p.val = v;
1943 else {
1944 t.putTreeNode(h, k, v);
1945 ++delta;
1946 }
1947 }
1948 } finally {
1949 t.release(0);
1950 }
1951 if (validated)
1952 break;
1953 }
1954 else
1955 tab = (Node[])fk;
1956 }
1957 else if ((fh & LOCKED) != 0) {
1958 counter.add(delta);
1959 delta = 0L;
1960 checkForResize();
1961 f.tryAwaitLock(tab, i);
1962 }
1963 else if (f.casHash(fh, fh | LOCKED)) {
1964 int count = 0;
1965 try {
1966 if (tabAt(tab, i) == f) {
1967 count = 1;
1968 for (Node e = f;; ++count) {
1969 Object ek, ev;
1970 if ((e.hash & HASH_BITS) == h &&
1971 (ev = e.val) != null &&
1972 ((ek = e.key) == k || k.equals(ek))) {
1973 e.val = v;
1974 break;
1975 }
1976 Node last = e;
1977 if ((e = e.next) == null) {
1978 ++delta;
1979 last.next = new Node(h, k, v, null);
1980 if (count >= TREE_THRESHOLD)
1981 replaceWithTreeBin(tab, i, k);
1982 break;
1983 }
1984 }
1985 }
1986 } finally {
1987 if (!f.casHash(fh | LOCKED, fh)) {
1988 f.hash = fh;
1989 synchronized (f) { f.notifyAll(); };
1990 }
1991 }
1992 if (count != 0) {
1993 if (count > 1) {
1994 counter.add(delta);
1995 delta = 0L;
1996 checkForResize();
1997 }
1998 break;
1999 }
2000 }
2001 }
2002 }
2003 } finally {
2004 if (delta != 0)
2005 counter.add(delta);
2006 }
2007 if (npe)
2008 throw new NullPointerException();
2009 }
2010
2011 /* ---------------- Table Initialization and Resizing -------------- */
2012
2013 /**
2014 * Returns a power of two table size for the given desired capacity.
2015 * See Hackers Delight, sec 3.2
2016 */
2017 private static final int tableSizeFor(int c) {
2018 int n = c - 1;
2019 n |= n >>> 1;
2020 n |= n >>> 2;
2021 n |= n >>> 4;
2022 n |= n >>> 8;
2023 n |= n >>> 16;
2024 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
2025 }
2026
2027 /**
2028 * Initializes table, using the size recorded in sizeCtl.
2029 */
2030 private final Node[] initTable() {
2031 Node[] tab; int sc;
2032 while ((tab = table) == null) {
2033 if ((sc = sizeCtl) < 0)
2034 Thread.yield(); // lost initialization race; just spin
2035 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2036 try {
2037 if ((tab = table) == null) {
2038 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2039 tab = table = new Node[n];
2040 sc = n - (n >>> 2);
2041 }
2042 } finally {
2043 sizeCtl = sc;
2044 }
2045 break;
2046 }
2047 }
2048 return tab;
2049 }
2050
2051 /**
2052 * If table is too small and not already resizing, creates next
2053 * table and transfers bins. Rechecks occupancy after a transfer
2054 * to see if another resize is already needed because resizings
2055 * are lagging additions.
2056 */
2057 private final void checkForResize() {
2058 Node[] tab; int n, sc;
2059 while ((tab = table) != null &&
2060 (n = tab.length) < MAXIMUM_CAPACITY &&
2061 (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
2062 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2063 try {
2064 if (tab == table) {
2065 table = rebuild(tab);
2066 sc = (n << 1) - (n >>> 1);
2067 }
2068 } finally {
2069 sizeCtl = sc;
2070 }
2071 }
2072 }
2073
2074 /**
2075 * Tries to presize table to accommodate the given number of elements.
2076 *
2077 * @param size number of elements (doesn't need to be perfectly accurate)
2078 */
2079 private final void tryPresize(int size) {
2080 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2081 tableSizeFor(size + (size >>> 1) + 1);
2082 int sc;
2083 while ((sc = sizeCtl) >= 0) {
2084 Node[] tab = table; int n;
2085 if (tab == null || (n = tab.length) == 0) {
2086 n = (sc > c) ? sc : c;
2087 if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2088 try {
2089 if (table == tab) {
2090 table = new Node[n];
2091 sc = n - (n >>> 2);
2092 }
2093 } finally {
2094 sizeCtl = sc;
2095 }
2096 }
2097 }
2098 else if (c <= sc || n >= MAXIMUM_CAPACITY)
2099 break;
2100 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2101 try {
2102 if (table == tab) {
2103 table = rebuild(tab);
2104 sc = (n << 1) - (n >>> 1);
2105 }
2106 } finally {
2107 sizeCtl = sc;
2108 }
2109 }
2110 }
2111 }
2112
2113 /*
2114 * Moves and/or copies the nodes in each bin to new table. See
2115 * above for explanation.
2116 *
2117 * @return the new table
2118 */
2119 private static final Node[] rebuild(Node[] tab) {
2120 int n = tab.length;
2121 Node[] nextTab = new Node[n << 1];
2122 Node fwd = new Node(MOVED, nextTab, null, null);
2123 int[] buffer = null; // holds bins to revisit; null until needed
2124 Node rev = null; // reverse forwarder; null until needed
2125 int nbuffered = 0; // the number of bins in buffer list
2126 int bufferIndex = 0; // buffer index of current buffered bin
2127 int bin = n - 1; // current non-buffered bin or -1 if none
2128
2129 for (int i = bin;;) { // start upwards sweep
2130 int fh; Node f;
2131 if ((f = tabAt(tab, i)) == null) {
2132 if (bin >= 0) { // Unbuffered; no lock needed (or available)
2133 if (!casTabAt(tab, i, f, fwd))
2134 continue;
2135 }
2136 else { // transiently use a locked forwarding node
2137 Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2138 if (!casTabAt(tab, i, f, g))
2139 continue;
2140 setTabAt(nextTab, i, null);
2141 setTabAt(nextTab, i + n, null);
2142 setTabAt(tab, i, fwd);
2143 if (!g.casHash(MOVED|LOCKED, MOVED)) {
2144 g.hash = MOVED;
2145 synchronized (g) { g.notifyAll(); }
2146 }
2147 }
2148 }
2149 else if ((fh = f.hash) == MOVED) {
2150 Object fk = f.key;
2151 if (fk instanceof TreeBin) {
2152 TreeBin t = (TreeBin)fk;
2153 boolean validated = false;
2154 t.acquire(0);
2155 try {
2156 if (tabAt(tab, i) == f) {
2157 validated = true;
2158 splitTreeBin(nextTab, i, t);
2159 setTabAt(tab, i, fwd);
2160 }
2161 } finally {
2162 t.release(0);
2163 }
2164 if (!validated)
2165 continue;
2166 }
2167 }
2168 else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2169 boolean validated = false;
2170 try { // split to lo and hi lists; copying as needed
2171 if (tabAt(tab, i) == f) {
2172 validated = true;
2173 splitBin(nextTab, i, f);
2174 setTabAt(tab, i, fwd);
2175 }
2176 } finally {
2177 if (!f.casHash(fh | LOCKED, fh)) {
2178 f.hash = fh;
2179 synchronized (f) { f.notifyAll(); };
2180 }
2181 }
2182 if (!validated)
2183 continue;
2184 }
2185 else {
2186 if (buffer == null) // initialize buffer for revisits
2187 buffer = new int[TRANSFER_BUFFER_SIZE];
2188 if (bin < 0 && bufferIndex > 0) {
2189 int j = buffer[--bufferIndex];
2190 buffer[bufferIndex] = i;
2191 i = j; // swap with another bin
2192 continue;
2193 }
2194 if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2195 f.tryAwaitLock(tab, i);
2196 continue; // no other options -- block
2197 }
2198 if (rev == null) // initialize reverse-forwarder
2199 rev = new Node(MOVED, tab, null, null);
2200 if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2201 continue; // recheck before adding to list
2202 buffer[nbuffered++] = i;
2203 setTabAt(nextTab, i, rev); // install place-holders
2204 setTabAt(nextTab, i + n, rev);
2205 }
2206
2207 if (bin > 0)
2208 i = --bin;
2209 else if (buffer != null && nbuffered > 0) {
2210 bin = -1;
2211 i = buffer[bufferIndex = --nbuffered];
2212 }
2213 else
2214 return nextTab;
2215 }
2216 }
2217
2218 /**
2219 * Splits a normal bin with list headed by e into lo and hi parts;
2220 * installs in given table.
2221 */
2222 private static void splitBin(Node[] nextTab, int i, Node e) {
2223 int bit = nextTab.length >>> 1; // bit to split on
2224 int runBit = e.hash & bit;
2225 Node lastRun = e, lo = null, hi = null;
2226 for (Node p = e.next; p != null; p = p.next) {
2227 int b = p.hash & bit;
2228 if (b != runBit) {
2229 runBit = b;
2230 lastRun = p;
2231 }
2232 }
2233 if (runBit == 0)
2234 lo = lastRun;
2235 else
2236 hi = lastRun;
2237 for (Node p = e; p != lastRun; p = p.next) {
2238 int ph = p.hash & HASH_BITS;
2239 Object pk = p.key, pv = p.val;
2240 if ((ph & bit) == 0)
2241 lo = new Node(ph, pk, pv, lo);
2242 else
2243 hi = new Node(ph, pk, pv, hi);
2244 }
2245 setTabAt(nextTab, i, lo);
2246 setTabAt(nextTab, i + bit, hi);
2247 }
2248
2249 /**
2250 * Splits a tree bin into lo and hi parts; installs in given table.
2251 */
2252 private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2253 int bit = nextTab.length >>> 1;
2254 TreeBin lt = new TreeBin();
2255 TreeBin ht = new TreeBin();
2256 int lc = 0, hc = 0;
2257 for (Node e = t.first; e != null; e = e.next) {
2258 int h = e.hash & HASH_BITS;
2259 Object k = e.key, v = e.val;
2260 if ((h & bit) == 0) {
2261 ++lc;
2262 lt.putTreeNode(h, k, v);
2263 }
2264 else {
2265 ++hc;
2266 ht.putTreeNode(h, k, v);
2267 }
2268 }
2269 Node ln, hn; // throw away trees if too small
2270 if (lc <= (TREE_THRESHOLD >>> 1)) {
2271 ln = null;
2272 for (Node p = lt.first; p != null; p = p.next)
2273 ln = new Node(p.hash, p.key, p.val, ln);
2274 }
2275 else
2276 ln = new Node(MOVED, lt, null, null);
2277 setTabAt(nextTab, i, ln);
2278 if (hc <= (TREE_THRESHOLD >>> 1)) {
2279 hn = null;
2280 for (Node p = ht.first; p != null; p = p.next)
2281 hn = new Node(p.hash, p.key, p.val, hn);
2282 }
2283 else
2284 hn = new Node(MOVED, ht, null, null);
2285 setTabAt(nextTab, i + bit, hn);
2286 }
2287
2288 /**
2289 * Implementation for clear. Steps through each bin, removing all
2290 * nodes.
2291 */
2292 private final void internalClear() {
2293 long delta = 0L; // negative number of deletions
2294 int i = 0;
2295 Node[] tab = table;
2296 while (tab != null && i < tab.length) {
2297 int fh; Object fk;
2298 Node f = tabAt(tab, i);
2299 if (f == null)
2300 ++i;
2301 else if ((fh = f.hash) == MOVED) {
2302 if ((fk = f.key) instanceof TreeBin) {
2303 TreeBin t = (TreeBin)fk;
2304 t.acquire(0);
2305 try {
2306 if (tabAt(tab, i) == f) {
2307 for (Node p = t.first; p != null; p = p.next) {
2308 if (p.val != null) { // (currently always true)
2309 p.val = null;
2310 --delta;
2311 }
2312 }
2313 t.first = null;
2314 t.root = null;
2315 ++i;
2316 }
2317 } finally {
2318 t.release(0);
2319 }
2320 }
2321 else
2322 tab = (Node[])fk;
2323 }
2324 else if ((fh & LOCKED) != 0) {
2325 counter.add(delta); // opportunistically update count
2326 delta = 0L;
2327 f.tryAwaitLock(tab, i);
2328 }
2329 else if (f.casHash(fh, fh | LOCKED)) {
2330 try {
2331 if (tabAt(tab, i) == f) {
2332 for (Node e = f; e != null; e = e.next) {
2333 if (e.val != null) { // (currently always true)
2334 e.val = null;
2335 --delta;
2336 }
2337 }
2338 setTabAt(tab, i, null);
2339 ++i;
2340 }
2341 } finally {
2342 if (!f.casHash(fh | LOCKED, fh)) {
2343 f.hash = fh;
2344 synchronized (f) { f.notifyAll(); };
2345 }
2346 }
2347 }
2348 }
2349 if (delta != 0)
2350 counter.add(delta);
2351 }
2352
2353 /* ----------------Table Traversal -------------- */
2354
2355 /**
2356 * Encapsulates traversal for methods such as containsValue; also
2357 * serves as a base class for other iterators and bulk tasks.
2358 *
2359 * At each step, the iterator snapshots the key ("nextKey") and
2360 * value ("nextVal") of a valid node (i.e., one that, at point of
2361 * snapshot, has a non-null user value). Because val fields can
2362 * change (including to null, indicating deletion), field nextVal
2363 * might not be accurate at point of use, but still maintains the
2364 * weak consistency property of holding a value that was once
2365 * valid. To support iterator.remove, the nextKey field is not
2366 * updated (nulled out) when the iterator cannot advance.
2367 *
2368 * Internal traversals directly access these fields, as in:
2369 * {@code while (it.advance() != null) { process(it.nextKey); }}
2370 *
2371 * Exported iterators must track whether the iterator has advanced
2372 * (in hasNext vs next) (by setting/checking/nulling field
2373 * nextVal), and then extract key, value, or key-value pairs as
2374 * return values of next().
2375 *
2376 * The iterator visits once each still-valid node that was
2377 * reachable upon iterator construction. It might miss some that
2378 * were added to a bin after the bin was visited, which is OK wrt
2379 * consistency guarantees. Maintaining this property in the face
2380 * of possible ongoing resizes requires a fair amount of
2381 * bookkeeping state that is difficult to optimize away amidst
2382 * volatile accesses. Even so, traversal maintains reasonable
2383 * throughput.
2384 *
2385 * Normally, iteration proceeds bin-by-bin traversing lists.
2386 * However, if the table has been resized, then all future steps
2387 * must traverse both the bin at the current index as well as at
2388 * (index + baseSize); and so on for further resizings. To
2389 * paranoically cope with potential sharing by users of iterators
2390 * across threads, iteration terminates if a bounds checks fails
2391 * for a table read.
2392 *
2393 * This class extends CountedCompleter to streamline parallel
2394 * iteration in bulk operations. This adds only a few fields of
2395 * space overhead, which is small enough in cases where it is not
2396 * needed to not worry about it. Because CountedCompleter is
2397 * Serializable, but iterators need not be, we need to add warning
2398 * suppressions.
2399 */
2400 @SuppressWarnings("serial") static class Traverser<K,V,R> extends CountedCompleter<R> {
2401 final ConcurrentHashMap<K, V> map;
2402 Node next; // the next entry to use
2403 Object nextKey; // cached key field of next
2404 Object nextVal; // cached val field of next
2405 Node[] tab; // current table; updated if resized
2406 int index; // index of bin to use next
2407 int baseIndex; // current index of initial table
2408 int baseLimit; // index bound for initial table
2409 int baseSize; // initial table size
2410 int batch; // split control
2411
2412 /** Creates iterator for all entries in the table. */
2413 Traverser(ConcurrentHashMap<K, V> map) {
2414 this.map = map;
2415 }
2416
2417 /** Creates iterator for split() methods and task constructors */
2418 Traverser(ConcurrentHashMap<K,V> map, Traverser<K,V,?> it, int batch) {
2419 super(it);
2420 this.batch = batch;
2421 if ((this.map = map) != null && it != null) { // split parent
2422 Node[] t;
2423 if ((t = it.tab) == null &&
2424 (t = it.tab = map.table) != null)
2425 it.baseLimit = it.baseSize = t.length;
2426 this.tab = t;
2427 this.baseSize = it.baseSize;
2428 int hi = this.baseLimit = it.baseLimit;
2429 it.baseLimit = this.index = this.baseIndex =
2430 (hi + it.baseIndex + 1) >>> 1;
2431 }
2432 }
2433
2434 /**
2435 * Advances next; returns nextVal or null if terminated.
2436 * See above for explanation.
2437 */
2438 final Object advance() {
2439 Node e = next;
2440 Object ev = null;
2441 outer: do {
2442 if (e != null) // advance past used/skipped node
2443 e = e.next;
2444 while (e == null) { // get to next non-null bin
2445 ConcurrentHashMap<K, V> m;
2446 Node[] t; int b, i, n; Object ek; // checks must use locals
2447 if ((t = tab) != null)
2448 n = t.length;
2449 else if ((m = map) != null && (t = tab = m.table) != null)
2450 n = baseLimit = baseSize = t.length;
2451 else
2452 break outer;
2453 if ((b = baseIndex) >= baseLimit ||
2454 (i = index) < 0 || i >= n)
2455 break outer;
2456 if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2457 if ((ek = e.key) instanceof TreeBin)
2458 e = ((TreeBin)ek).first;
2459 else {
2460 tab = (Node[])ek;
2461 continue; // restarts due to null val
2462 }
2463 } // visit upper slots if present
2464 index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2465 }
2466 nextKey = e.key;
2467 } while ((ev = e.val) == null); // skip deleted or special nodes
2468 next = e;
2469 return nextVal = ev;
2470 }
2471
2472 public final void remove() {
2473 Object k = nextKey;
2474 if (k == null && (advance() == null || (k = nextKey) == null))
2475 throw new IllegalStateException();
2476 map.internalReplace(k, null, null);
2477 }
2478
2479 public final boolean hasNext() {
2480 return nextVal != null || advance() != null;
2481 }
2482
2483 public final boolean hasMoreElements() { return hasNext(); }
2484
2485 public void compute() { } // default no-op CountedCompleter body
2486
2487 /**
2488 * Returns a batch value > 0 if this task should (and must) be
2489 * split, if so, adding to pending count, and in any case
2490 * updating batch value. The initial batch value is approx
2491 * exp2 of the number of times (minus one) to split task by
2492 * two before executing leaf action. This value is faster to
2493 * compute and more convenient to use as a guide to splitting
2494 * than is the depth, since it is used while dividing by two
2495 * anyway.
2496 */
2497 final int preSplit() {
2498 ConcurrentHashMap<K, V> m; int b; Node[] t; ForkJoinPool pool;
2499 if ((b = batch) < 0 && (m = map) != null) { // force initialization
2500 if ((t = tab) == null && (t = tab = m.table) != null)
2501 baseLimit = baseSize = t.length;
2502 if (t != null) {
2503 long n = m.counter.sum();
2504 int par = ((pool = getPool()) == null) ?
2505 ForkJoinPool.getCommonPoolParallelism() :
2506 pool.getParallelism();
2507 int sp = par << 3; // slack of 8
2508 b = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
2509 }
2510 }
2511 b = (b <= 1 || baseIndex == baseLimit) ? 0 : (b >>> 1);
2512 if ((batch = b) > 0)
2513 addToPendingCount(1);
2514 return b;
2515 }
2516
2517 }
2518
2519 /* ---------------- Public operations -------------- */
2520
2521 /**
2522 * Creates a new, empty map with the default initial table size (16).
2523 */
2524 public ConcurrentHashMap() {
2525 this.counter = new LongAdder();
2526 }
2527
2528 /**
2529 * Creates a new, empty map with an initial table size
2530 * accommodating the specified number of elements without the need
2531 * to dynamically resize.
2532 *
2533 * @param initialCapacity The implementation performs internal
2534 * sizing to accommodate this many elements.
2535 * @throws IllegalArgumentException if the initial capacity of
2536 * elements is negative
2537 */
2538 public ConcurrentHashMap(int initialCapacity) {
2539 if (initialCapacity < 0)
2540 throw new IllegalArgumentException();
2541 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2542 MAXIMUM_CAPACITY :
2543 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2544 this.counter = new LongAdder();
2545 this.sizeCtl = cap;
2546 }
2547
2548 /**
2549 * Creates a new map with the same mappings as the given map.
2550 *
2551 * @param m the map
2552 */
2553 public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
2554 this.counter = new LongAdder();
2555 this.sizeCtl = DEFAULT_CAPACITY;
2556 internalPutAll(m);
2557 }
2558
2559 /**
2560 * Creates a new, empty map with an initial table size based on
2561 * the given number of elements ({@code initialCapacity}) and
2562 * initial table density ({@code loadFactor}).
2563 *
2564 * @param initialCapacity the initial capacity. The implementation
2565 * performs internal sizing to accommodate this many elements,
2566 * given the specified load factor.
2567 * @param loadFactor the load factor (table density) for
2568 * establishing the initial table size
2569 * @throws IllegalArgumentException if the initial capacity of
2570 * elements is negative or the load factor is nonpositive
2571 *
2572 * @since 1.6
2573 */
2574 public ConcurrentHashMap(int initialCapacity, float loadFactor) {
2575 this(initialCapacity, loadFactor, 1);
2576 }
2577
2578 /**
2579 * Creates a new, empty map with an initial table size based on
2580 * the given number of elements ({@code initialCapacity}), table
2581 * density ({@code loadFactor}), and number of concurrently
2582 * updating threads ({@code concurrencyLevel}).
2583 *
2584 * @param initialCapacity the initial capacity. The implementation
2585 * performs internal sizing to accommodate this many elements,
2586 * given the specified load factor.
2587 * @param loadFactor the load factor (table density) for
2588 * establishing the initial table size
2589 * @param concurrencyLevel the estimated number of concurrently
2590 * updating threads. The implementation may use this value as
2591 * a sizing hint.
2592 * @throws IllegalArgumentException if the initial capacity is
2593 * negative or the load factor or concurrencyLevel are
2594 * nonpositive
2595 */
2596 public ConcurrentHashMap(int initialCapacity,
2597 float loadFactor, int concurrencyLevel) {
2598 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2599 throw new IllegalArgumentException();
2600 if (initialCapacity < concurrencyLevel) // Use at least as many bins
2601 initialCapacity = concurrencyLevel; // as estimated threads
2602 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2603 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2604 MAXIMUM_CAPACITY : tableSizeFor((int)size);
2605 this.counter = new LongAdder();
2606 this.sizeCtl = cap;
2607 }
2608
2609 /**
2610 * Creates a new {@link Set} backed by a ConcurrentHashMap
2611 * from the given type to {@code Boolean.TRUE}.
2612 *
2613 * @return the new set
2614 */
2615 public static <K> KeySetView<K,Boolean> newKeySet() {
2616 return new KeySetView<K,Boolean>(new ConcurrentHashMap<K,Boolean>(),
2617 Boolean.TRUE);
2618 }
2619
2620 /**
2621 * Creates a new {@link Set} backed by a ConcurrentHashMap
2622 * from the given type to {@code Boolean.TRUE}.
2623 *
2624 * @param initialCapacity The implementation performs internal
2625 * sizing to accommodate this many elements.
2626 * @throws IllegalArgumentException if the initial capacity of
2627 * elements is negative
2628 * @return the new set
2629 */
2630 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2631 return new KeySetView<K,Boolean>(new ConcurrentHashMap<K,Boolean>(initialCapacity),
2632 Boolean.TRUE);
2633 }
2634
2635 /**
2636 * {@inheritDoc}
2637 */
2638 public boolean isEmpty() {
2639 return counter.sum() <= 0L; // ignore transient negative values
2640 }
2641
2642 /**
2643 * {@inheritDoc}
2644 */
2645 public int size() {
2646 long n = counter.sum();
2647 return ((n < 0L) ? 0 :
2648 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2649 (int)n);
2650 }
2651
2652 /**
2653 * Returns the number of mappings. This method should be used
2654 * instead of {@link #size} because a ConcurrentHashMap may
2655 * contain more mappings than can be represented as an int. The
2656 * value returned is an estimate; the actual count may differ if
2657 * there are concurrent insertions or removals.
2658 *
2659 * @return the number of mappings
2660 */
2661 public long mappingCount() {
2662 long n = counter.sum();
2663 return (n < 0L) ? 0L : n; // ignore transient negative values
2664 }
2665
2666 /**
2667 * Returns the value to which the specified key is mapped,
2668 * or {@code null} if this map contains no mapping for the key.
2669 *
2670 * <p>More formally, if this map contains a mapping from a key
2671 * {@code k} to a value {@code v} such that {@code key.equals(k)},
2672 * then this method returns {@code v}; otherwise it returns
2673 * {@code null}. (There can be at most one such mapping.)
2674 *
2675 * @throws NullPointerException if the specified key is null
2676 */
2677 @SuppressWarnings("unchecked") public V get(Object key) {
2678 if (key == null)
2679 throw new NullPointerException();
2680 return (V)internalGet(key);
2681 }
2682
2683 /**
2684 * Returns the value to which the specified key is mapped,
2685 * or the given defaultValue if this map contains no mapping for the key.
2686 *
2687 * @param key the key
2688 * @param defaultValue the value to return if this map contains
2689 * no mapping for the given key
2690 * @return the mapping for the key, if present; else the defaultValue
2691 * @throws NullPointerException if the specified key is null
2692 */
2693 @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2694 if (key == null)
2695 throw new NullPointerException();
2696 V v = (V) internalGet(key);
2697 return v == null ? defaultValue : v;
2698 }
2699
2700 /**
2701 * Tests if the specified object is a key in this table.
2702 *
2703 * @param key possible key
2704 * @return {@code true} if and only if the specified object
2705 * is a key in this table, as determined by the
2706 * {@code equals} method; {@code false} otherwise
2707 * @throws NullPointerException if the specified key is null
2708 */
2709 public boolean containsKey(Object key) {
2710 if (key == null)
2711 throw new NullPointerException();
2712 return internalGet(key) != null;
2713 }
2714
2715 /**
2716 * Returns {@code true} if this map maps one or more keys to the
2717 * specified value. Note: This method may require a full traversal
2718 * of the map, and is much slower than method {@code containsKey}.
2719 *
2720 * @param value value whose presence in this map is to be tested
2721 * @return {@code true} if this map maps one or more keys to the
2722 * specified value
2723 * @throws NullPointerException if the specified value is null
2724 */
2725 public boolean containsValue(Object value) {
2726 if (value == null)
2727 throw new NullPointerException();
2728 Object v;
2729 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2730 while ((v = it.advance()) != null) {
2731 if (v == value || value.equals(v))
2732 return true;
2733 }
2734 return false;
2735 }
2736
2737 /**
2738 * Legacy method testing if some key maps into the specified value
2739 * in this table. This method is identical in functionality to
2740 * {@link #containsValue}, and exists solely to ensure
2741 * full compatibility with class {@link java.util.Hashtable},
2742 * which supported this method prior to introduction of the
2743 * Java Collections framework.
2744 *
2745 * @param value a value to search for
2746 * @return {@code true} if and only if some key maps to the
2747 * {@code value} argument in this table as
2748 * determined by the {@code equals} method;
2749 * {@code false} otherwise
2750 * @throws NullPointerException if the specified value is null
2751 */
2752 public boolean contains(Object value) {
2753 return containsValue(value);
2754 }
2755
2756 /**
2757 * Maps the specified key to the specified value in this table.
2758 * Neither the key nor the value can be null.
2759 *
2760 * <p>The value can be retrieved by calling the {@code get} method
2761 * with a key that is equal to the original key.
2762 *
2763 * @param key key with which the specified value is to be associated
2764 * @param value value to be associated with the specified key
2765 * @return the previous value associated with {@code key}, or
2766 * {@code null} if there was no mapping for {@code key}
2767 * @throws NullPointerException if the specified key or value is null
2768 */
2769 @SuppressWarnings("unchecked") public V put(K key, V value) {
2770 if (key == null || value == null)
2771 throw new NullPointerException();
2772 return (V)internalPut(key, value);
2773 }
2774
2775 /**
2776 * {@inheritDoc}
2777 *
2778 * @return the previous value associated with the specified key,
2779 * or {@code null} if there was no mapping for the key
2780 * @throws NullPointerException if the specified key or value is null
2781 */
2782 @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2783 if (key == null || value == null)
2784 throw new NullPointerException();
2785 return (V)internalPutIfAbsent(key, value);
2786 }
2787
2788 /**
2789 * Copies all of the mappings from the specified map to this one.
2790 * These mappings replace any mappings that this map had for any of the
2791 * keys currently in the specified map.
2792 *
2793 * @param m mappings to be stored in this map
2794 */
2795 public void putAll(Map<? extends K, ? extends V> m) {
2796 internalPutAll(m);
2797 }
2798
2799 /**
2800 * If the specified key is not already associated with a value,
2801 * computes its value using the given mappingFunction and enters
2802 * it into the map unless null. This is equivalent to
2803 * <pre> {@code
2804 * if (map.containsKey(key))
2805 * return map.get(key);
2806 * value = mappingFunction.apply(key);
2807 * if (value != null)
2808 * map.put(key, value);
2809 * return value;}</pre>
2810 *
2811 * except that the action is performed atomically. If the
2812 * function returns {@code null} no mapping is recorded. If the
2813 * function itself throws an (unchecked) exception, the exception
2814 * is rethrown to its caller, and no mapping is recorded. Some
2815 * attempted update operations on this map by other threads may be
2816 * blocked while computation is in progress, so the computation
2817 * should be short and simple, and must not attempt to update any
2818 * other mappings of this Map. The most appropriate usage is to
2819 * construct a new object serving as an initial mapped value, or
2820 * memoized result, as in:
2821 *
2822 * <pre> {@code
2823 * map.computeIfAbsent(key, new Fun<K, V>() {
2824 * public V map(K k) { return new Value(f(k)); }});}</pre>
2825 *
2826 * @param key key with which the specified value is to be associated
2827 * @param mappingFunction the function to compute a value
2828 * @return the current (existing or computed) value associated with
2829 * the specified key, or null if the computed value is null
2830 * @throws NullPointerException if the specified key or mappingFunction
2831 * is null
2832 * @throws IllegalStateException if the computation detectably
2833 * attempts a recursive update to this map that would
2834 * otherwise never complete
2835 * @throws RuntimeException or Error if the mappingFunction does so,
2836 * in which case the mapping is left unestablished
2837 */
2838 @SuppressWarnings("unchecked") public V computeIfAbsent
2839 (K key, Fun<? super K, ? extends V> mappingFunction) {
2840 if (key == null || mappingFunction == null)
2841 throw new NullPointerException();
2842 return (V)internalComputeIfAbsent(key, mappingFunction);
2843 }
2844
2845 /**
2846 * If the given key is present, computes a new mapping value given a key and
2847 * its current mapped value. This is equivalent to
2848 * <pre> {@code
2849 * if (map.containsKey(key)) {
2850 * value = remappingFunction.apply(key, map.get(key));
2851 * if (value != null)
2852 * map.put(key, value);
2853 * else
2854 * map.remove(key);
2855 * }
2856 * }</pre>
2857 *
2858 * except that the action is performed atomically. If the
2859 * function returns {@code null}, the mapping is removed. If the
2860 * function itself throws an (unchecked) exception, the exception
2861 * is rethrown to its caller, and the current mapping is left
2862 * unchanged. Some attempted update operations on this map by
2863 * other threads may be blocked while computation is in progress,
2864 * so the computation should be short and simple, and must not
2865 * attempt to update any other mappings of this Map.
2866 *
2867 * @param key key with which the specified value is to be associated
2868 * @param remappingFunction the function to compute a value
2869 * @return the new value associated with the specified key, or null if none
2870 * @throws NullPointerException if the specified key or remappingFunction
2871 * is null
2872 * @throws IllegalStateException if the computation detectably
2873 * attempts a recursive update to this map that would
2874 * otherwise never complete
2875 * @throws RuntimeException or Error if the remappingFunction does so,
2876 * in which case the mapping is unchanged
2877 */
2878 @SuppressWarnings("unchecked") public V computeIfPresent
2879 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2880 if (key == null || remappingFunction == null)
2881 throw new NullPointerException();
2882 return (V)internalCompute(key, true, remappingFunction);
2883 }
2884
2885 /**
2886 * Computes a new mapping value given a key and
2887 * its current mapped value (or {@code null} if there is no current
2888 * mapping). This is equivalent to
2889 * <pre> {@code
2890 * value = remappingFunction.apply(key, map.get(key));
2891 * if (value != null)
2892 * map.put(key, value);
2893 * else
2894 * map.remove(key);
2895 * }</pre>
2896 *
2897 * except that the action is performed atomically. If the
2898 * function returns {@code null}, the mapping is removed. If the
2899 * function itself throws an (unchecked) exception, the exception
2900 * is rethrown to its caller, and the current mapping is left
2901 * unchanged. Some attempted update operations on this map by
2902 * other threads may be blocked while computation is in progress,
2903 * so the computation should be short and simple, and must not
2904 * attempt to update any other mappings of this Map. For example,
2905 * to either create or append new messages to a value mapping:
2906 *
2907 * <pre> {@code
2908 * Map<Key, String> map = ...;
2909 * final String msg = ...;
2910 * map.compute(key, new BiFun<Key, String, String>() {
2911 * public String apply(Key k, String v) {
2912 * return (v == null) ? msg : v + msg;});}}</pre>
2913 *
2914 * @param key key with which the specified value is to be associated
2915 * @param remappingFunction the function to compute a value
2916 * @return the new value associated with the specified key, or null if none
2917 * @throws NullPointerException if the specified key or remappingFunction
2918 * is null
2919 * @throws IllegalStateException if the computation detectably
2920 * attempts a recursive update to this map that would
2921 * otherwise never complete
2922 * @throws RuntimeException or Error if the remappingFunction does so,
2923 * in which case the mapping is unchanged
2924 */
2925 @SuppressWarnings("unchecked") public V compute
2926 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2927 if (key == null || remappingFunction == null)
2928 throw new NullPointerException();
2929 return (V)internalCompute(key, false, remappingFunction);
2930 }
2931
2932 /**
2933 * If the specified key is not already associated
2934 * with a value, associate it with the given value.
2935 * Otherwise, replace the value with the results of
2936 * the given remapping function. This is equivalent to:
2937 * <pre> {@code
2938 * if (!map.containsKey(key))
2939 * map.put(value);
2940 * else {
2941 * newValue = remappingFunction.apply(map.get(key), value);
2942 * if (value != null)
2943 * map.put(key, value);
2944 * else
2945 * map.remove(key);
2946 * }
2947 * }</pre>
2948 * except that the action is performed atomically. If the
2949 * function returns {@code null}, the mapping is removed. If the
2950 * function itself throws an (unchecked) exception, the exception
2951 * is rethrown to its caller, and the current mapping is left
2952 * unchanged. Some attempted update operations on this map by
2953 * other threads may be blocked while computation is in progress,
2954 * so the computation should be short and simple, and must not
2955 * attempt to update any other mappings of this Map.
2956 */
2957 @SuppressWarnings("unchecked") public V merge
2958 (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2959 if (key == null || value == null || remappingFunction == null)
2960 throw new NullPointerException();
2961 return (V)internalMerge(key, value, remappingFunction);
2962 }
2963
2964 /**
2965 * Removes the key (and its corresponding value) from this map.
2966 * This method does nothing if the key is not in the map.
2967 *
2968 * @param key the key that needs to be removed
2969 * @return the previous value associated with {@code key}, or
2970 * {@code null} if there was no mapping for {@code key}
2971 * @throws NullPointerException if the specified key is null
2972 */
2973 @SuppressWarnings("unchecked") public V remove(Object key) {
2974 if (key == null)
2975 throw new NullPointerException();
2976 return (V)internalReplace(key, null, null);
2977 }
2978
2979 /**
2980 * {@inheritDoc}
2981 *
2982 * @throws NullPointerException if the specified key is null
2983 */
2984 public boolean remove(Object key, Object value) {
2985 if (key == null)
2986 throw new NullPointerException();
2987 if (value == null)
2988 return false;
2989 return internalReplace(key, null, value) != null;
2990 }
2991
2992 /**
2993 * {@inheritDoc}
2994 *
2995 * @throws NullPointerException if any of the arguments are null
2996 */
2997 public boolean replace(K key, V oldValue, V newValue) {
2998 if (key == null || oldValue == null || newValue == null)
2999 throw new NullPointerException();
3000 return internalReplace(key, newValue, oldValue) != null;
3001 }
3002
3003 /**
3004 * {@inheritDoc}
3005 *
3006 * @return the previous value associated with the specified key,
3007 * or {@code null} if there was no mapping for the key
3008 * @throws NullPointerException if the specified key or value is null
3009 */
3010 @SuppressWarnings("unchecked") public V replace(K key, V value) {
3011 if (key == null || value == null)
3012 throw new NullPointerException();
3013 return (V)internalReplace(key, value, null);
3014 }
3015
3016 /**
3017 * Removes all of the mappings from this map.
3018 */
3019 public void clear() {
3020 internalClear();
3021 }
3022
3023 /**
3024 * Returns a {@link Set} view of the keys contained in this map.
3025 * The set is backed by the map, so changes to the map are
3026 * reflected in the set, and vice-versa.
3027 *
3028 * @return the set view
3029 */
3030 public KeySetView<K,V> keySet() {
3031 KeySetView<K,V> ks = keySet;
3032 return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
3033 }
3034
3035 /**
3036 * Returns a {@link Set} view of the keys in this map, using the
3037 * given common mapped value for any additions (i.e., {@link
3038 * Collection#add} and {@link Collection#addAll}). This is of
3039 * course only appropriate if it is acceptable to use the same
3040 * value for all additions from this view.
3041 *
3042 * @param mappedValue the mapped value to use for any
3043 * additions.
3044 * @return the set view
3045 * @throws NullPointerException if the mappedValue is null
3046 */
3047 public KeySetView<K,V> keySet(V mappedValue) {
3048 if (mappedValue == null)
3049 throw new NullPointerException();
3050 return new KeySetView<K,V>(this, mappedValue);
3051 }
3052
3053 /**
3054 * Returns a {@link Collection} view of the values contained in this map.
3055 * The collection is backed by the map, so changes to the map are
3056 * reflected in the collection, and vice-versa.
3057 */
3058 public ValuesView<K,V> values() {
3059 ValuesView<K,V> vs = values;
3060 return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
3061 }
3062
3063 /**
3064 * Returns a {@link Set} view of the mappings contained in this map.
3065 * The set is backed by the map, so changes to the map are
3066 * reflected in the set, and vice-versa. The set supports element
3067 * removal, which removes the corresponding mapping from the map,
3068 * via the {@code Iterator.remove}, {@code Set.remove},
3069 * {@code removeAll}, {@code retainAll}, and {@code clear}
3070 * operations. It does not support the {@code add} or
3071 * {@code addAll} operations.
3072 *
3073 * <p>The view's {@code iterator} is a "weakly consistent" iterator
3074 * that will never throw {@link ConcurrentModificationException},
3075 * and guarantees to traverse elements as they existed upon
3076 * construction of the iterator, and may (but is not guaranteed to)
3077 * reflect any modifications subsequent to construction.
3078 */
3079 public Set<Map.Entry<K,V>> entrySet() {
3080 EntrySetView<K,V> es = entrySet;
3081 return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
3082 }
3083
3084 /**
3085 * Returns an enumeration of the keys in this table.
3086 *
3087 * @return an enumeration of the keys in this table
3088 * @see #keySet()
3089 */
3090 public Enumeration<K> keys() {
3091 return new KeyIterator<K,V>(this);
3092 }
3093
3094 /**
3095 * Returns an enumeration of the values in this table.
3096 *
3097 * @return an enumeration of the values in this table
3098 * @see #values()
3099 */
3100 public Enumeration<V> elements() {
3101 return new ValueIterator<K,V>(this);
3102 }
3103
3104 /**
3105 * Returns a partitionable iterator of the keys in this map.
3106 *
3107 * @return a partitionable iterator of the keys in this map
3108 */
3109 public Spliterator<K> keySpliterator() {
3110 return new KeyIterator<K,V>(this);
3111 }
3112
3113 /**
3114 * Returns a partitionable iterator of the values in this map.
3115 *
3116 * @return a partitionable iterator of the values in this map
3117 */
3118 public Spliterator<V> valueSpliterator() {
3119 return new ValueIterator<K,V>(this);
3120 }
3121
3122 /**
3123 * Returns a partitionable iterator of the entries in this map.
3124 *
3125 * @return a partitionable iterator of the entries in this map
3126 */
3127 public Spliterator<Map.Entry<K,V>> entrySpliterator() {
3128 return new EntryIterator<K,V>(this);
3129 }
3130
3131 /**
3132 * Returns the hash code value for this {@link Map}, i.e.,
3133 * the sum of, for each key-value pair in the map,
3134 * {@code key.hashCode() ^ value.hashCode()}.
3135 *
3136 * @return the hash code value for this map
3137 */
3138 public int hashCode() {
3139 int h = 0;
3140 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3141 Object v;
3142 while ((v = it.advance()) != null) {
3143 h += it.nextKey.hashCode() ^ v.hashCode();
3144 }
3145 return h;
3146 }
3147
3148 /**
3149 * Returns a string representation of this map. The string
3150 * representation consists of a list of key-value mappings (in no
3151 * particular order) enclosed in braces ("{@code {}}"). Adjacent
3152 * mappings are separated by the characters {@code ", "} (comma
3153 * and space). Each key-value mapping is rendered as the key
3154 * followed by an equals sign ("{@code =}") followed by the
3155 * associated value.
3156 *
3157 * @return a string representation of this map
3158 */
3159 public String toString() {
3160 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3161 StringBuilder sb = new StringBuilder();
3162 sb.append('{');
3163 Object v;
3164 if ((v = it.advance()) != null) {
3165 for (;;) {
3166 Object k = it.nextKey;
3167 sb.append(k == this ? "(this Map)" : k);
3168 sb.append('=');
3169 sb.append(v == this ? "(this Map)" : v);
3170 if ((v = it.advance()) == null)
3171 break;
3172 sb.append(',').append(' ');
3173 }
3174 }
3175 return sb.append('}').toString();
3176 }
3177
3178 /**
3179 * Compares the specified object with this map for equality.
3180 * Returns {@code true} if the given object is a map with the same
3181 * mappings as this map. This operation may return misleading
3182 * results if either map is concurrently modified during execution
3183 * of this method.
3184 *
3185 * @param o object to be compared for equality with this map
3186 * @return {@code true} if the specified object is equal to this map
3187 */
3188 public boolean equals(Object o) {
3189 if (o != this) {
3190 if (!(o instanceof Map))
3191 return false;
3192 Map<?,?> m = (Map<?,?>) o;
3193 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3194 Object val;
3195 while ((val = it.advance()) != null) {
3196 Object v = m.get(it.nextKey);
3197 if (v == null || (v != val && !v.equals(val)))
3198 return false;
3199 }
3200 for (Map.Entry<?,?> e : m.entrySet()) {
3201 Object mk, mv, v;
3202 if ((mk = e.getKey()) == null ||
3203 (mv = e.getValue()) == null ||
3204 (v = internalGet(mk)) == null ||
3205 (mv != v && !mv.equals(v)))
3206 return false;
3207 }
3208 }
3209 return true;
3210 }
3211
3212 /* ----------------Iterators -------------- */
3213
3214 @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3215 implements Spliterator<K>, Enumeration<K> {
3216 KeyIterator(ConcurrentHashMap<K, V> map) { super(map); }
3217 KeyIterator(ConcurrentHashMap<K, V> map, Traverser<K,V,Object> it) {
3218 super(map, it, -1);
3219 }
3220 public KeyIterator<K,V> split() {
3221 if (nextKey != null)
3222 throw new IllegalStateException();
3223 return new KeyIterator<K,V>(map, this);
3224 }
3225 @SuppressWarnings("unchecked") public final K next() {
3226 if (nextVal == null && advance() == null)
3227 throw new NoSuchElementException();
3228 Object k = nextKey;
3229 nextVal = null;
3230 return (K) k;
3231 }
3232
3233 public final K nextElement() { return next(); }
3234 }
3235
3236 @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3237 implements Spliterator<V>, Enumeration<V> {
3238 ValueIterator(ConcurrentHashMap<K, V> map) { super(map); }
3239 ValueIterator(ConcurrentHashMap<K, V> map, Traverser<K,V,Object> it) {
3240 super(map, it, -1);
3241 }
3242 public ValueIterator<K,V> split() {
3243 if (nextKey != null)
3244 throw new IllegalStateException();
3245 return new ValueIterator<K,V>(map, this);
3246 }
3247
3248 @SuppressWarnings("unchecked") public final V next() {
3249 Object v;
3250 if ((v = nextVal) == null && (v = advance()) == null)
3251 throw new NoSuchElementException();
3252 nextVal = null;
3253 return (V) v;
3254 }
3255
3256 public final V nextElement() { return next(); }
3257 }
3258
3259 @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3260 implements Spliterator<Map.Entry<K,V>> {
3261 EntryIterator(ConcurrentHashMap<K, V> map) { super(map); }
3262 EntryIterator(ConcurrentHashMap<K, V> map, Traverser<K,V,Object> it) {
3263 super(map, it, -1);
3264 }
3265 public EntryIterator<K,V> split() {
3266 if (nextKey != null)
3267 throw new IllegalStateException();
3268 return new EntryIterator<K,V>(map, this);
3269 }
3270
3271 @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3272 Object v;
3273 if ((v = nextVal) == null && (v = advance()) == null)
3274 throw new NoSuchElementException();
3275 Object k = nextKey;
3276 nextVal = null;
3277 return new MapEntry<K,V>((K)k, (V)v, map);
3278 }
3279 }
3280
3281 /**
3282 * Exported Entry for iterators
3283 */
3284 static final class MapEntry<K,V> implements Map.Entry<K, V> {
3285 final K key; // non-null
3286 V val; // non-null
3287 final ConcurrentHashMap<K, V> map;
3288 MapEntry(K key, V val, ConcurrentHashMap<K, V> map) {
3289 this.key = key;
3290 this.val = val;
3291 this.map = map;
3292 }
3293 public final K getKey() { return key; }
3294 public final V getValue() { return val; }
3295 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3296 public final String toString(){ return key + "=" + val; }
3297
3298 public final boolean equals(Object o) {
3299 Object k, v; Map.Entry<?,?> e;
3300 return ((o instanceof Map.Entry) &&
3301 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3302 (v = e.getValue()) != null &&
3303 (k == key || k.equals(key)) &&
3304 (v == val || v.equals(val)));
3305 }
3306
3307 /**
3308 * Sets our entry's value and writes through to the map. The
3309 * value to return is somewhat arbitrary here. Since we do not
3310 * necessarily track asynchronous changes, the most recent
3311 * "previous" value could be different from what we return (or
3312 * could even have been removed in which case the put will
3313 * re-establish). We do not and cannot guarantee more.
3314 */
3315 public final V setValue(V value) {
3316 if (value == null) throw new NullPointerException();
3317 V v = val;
3318 val = value;
3319 map.put(key, value);
3320 return v;
3321 }
3322 }
3323
3324 /**
3325 * Returns exportable snapshot entry for the given key and value
3326 * when write-through can't or shouldn't be used.
3327 */
3328 static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
3329 return new AbstractMap.SimpleEntry<K,V>(k, v);
3330 }
3331
3332 /* ---------------- Serialization Support -------------- */
3333
3334 /**
3335 * Stripped-down version of helper class used in previous version,
3336 * declared for the sake of serialization compatibility
3337 */
3338 static class Segment<K,V> implements Serializable {
3339 private static final long serialVersionUID = 2249069246763182397L;
3340 final float loadFactor;
3341 Segment(float lf) { this.loadFactor = lf; }
3342 }
3343
3344 /**
3345 * Saves the state of the {@code ConcurrentHashMap} instance to a
3346 * stream (i.e., serializes it).
3347 * @param s the stream
3348 * @serialData
3349 * the key (Object) and value (Object)
3350 * for each key-value mapping, followed by a null pair.
3351 * The key-value mappings are emitted in no particular order.
3352 */
3353 @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3354 throws java.io.IOException {
3355 if (segments == null) { // for serialization compatibility
3356 segments = (Segment<K,V>[])
3357 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3358 for (int i = 0; i < segments.length; ++i)
3359 segments[i] = new Segment<K,V>(LOAD_FACTOR);
3360 }
3361 s.defaultWriteObject();
3362 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3363 Object v;
3364 while ((v = it.advance()) != null) {
3365 s.writeObject(it.nextKey);
3366 s.writeObject(v);
3367 }
3368 s.writeObject(null);
3369 s.writeObject(null);
3370 segments = null; // throw away
3371 }
3372
3373 /**
3374 * Reconstitutes the instance from a stream (that is, deserializes it).
3375 * @param s the stream
3376 */
3377 @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3378 throws java.io.IOException, ClassNotFoundException {
3379 s.defaultReadObject();
3380 this.segments = null; // unneeded
3381 // initialize transient final field
3382 UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3383
3384 // Create all nodes, then place in table once size is known
3385 long size = 0L;
3386 Node p = null;
3387 for (;;) {
3388 K k = (K) s.readObject();
3389 V v = (V) s.readObject();
3390 if (k != null && v != null) {
3391 int h = spread(k.hashCode());
3392 p = new Node(h, k, v, p);
3393 ++size;
3394 }
3395 else
3396 break;
3397 }
3398 if (p != null) {
3399 boolean init = false;
3400 int n;
3401 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3402 n = MAXIMUM_CAPACITY;
3403 else {
3404 int sz = (int)size;
3405 n = tableSizeFor(sz + (sz >>> 1) + 1);
3406 }
3407 int sc = sizeCtl;
3408 boolean collide = false;
3409 if (n > sc &&
3410 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3411 try {
3412 if (table == null) {
3413 init = true;
3414 Node[] tab = new Node[n];
3415 int mask = n - 1;
3416 while (p != null) {
3417 int j = p.hash & mask;
3418 Node next = p.next;
3419 Node q = p.next = tabAt(tab, j);
3420 setTabAt(tab, j, p);
3421 if (!collide && q != null && q.hash == p.hash)
3422 collide = true;
3423 p = next;
3424 }
3425 table = tab;
3426 counter.add(size);
3427 sc = n - (n >>> 2);
3428 }
3429 } finally {
3430 sizeCtl = sc;
3431 }
3432 if (collide) { // rescan and convert to TreeBins
3433 Node[] tab = table;
3434 for (int i = 0; i < tab.length; ++i) {
3435 int c = 0;
3436 for (Node e = tabAt(tab, i); e != null; e = e.next) {
3437 if (++c > TREE_THRESHOLD &&
3438 (e.key instanceof Comparable)) {
3439 replaceWithTreeBin(tab, i, e.key);
3440 break;
3441 }
3442 }
3443 }
3444 }
3445 }
3446 if (!init) { // Can only happen if unsafely published.
3447 while (p != null) {
3448 internalPut(p.key, p.val);
3449 p = p.next;
3450 }
3451 }
3452 }
3453 }
3454
3455
3456 // -------------------------------------------------------
3457
3458 // Sams
3459 /** Interface describing a void action of one argument */
3460 public interface Action<A> { void apply(A a); }
3461 /** Interface describing a void action of two arguments */
3462 public interface BiAction<A,B> { void apply(A a, B b); }
3463 /** Interface describing a function of one argument */
3464 public interface Fun<A,T> { T apply(A a); }
3465 /** Interface describing a function of two arguments */
3466 public interface BiFun<A,B,T> { T apply(A a, B b); }
3467 /** Interface describing a function of no arguments */
3468 public interface Generator<T> { T apply(); }
3469 /** Interface describing a function mapping its argument to a double */
3470 public interface ObjectToDouble<A> { double apply(A a); }
3471 /** Interface describing a function mapping its argument to a long */
3472 public interface ObjectToLong<A> { long apply(A a); }
3473 /** Interface describing a function mapping its argument to an int */
3474 public interface ObjectToInt<A> {int apply(A a); }
3475 /** Interface describing a function mapping two arguments to a double */
3476 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3477 /** Interface describing a function mapping two arguments to a long */
3478 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3479 /** Interface describing a function mapping two arguments to an int */
3480 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3481 /** Interface describing a function mapping a double to a double */
3482 public interface DoubleToDouble { double apply(double a); }
3483 /** Interface describing a function mapping a long to a long */
3484 public interface LongToLong { long apply(long a); }
3485 /** Interface describing a function mapping an int to an int */
3486 public interface IntToInt { int apply(int a); }
3487 /** Interface describing a function mapping two doubles to a double */
3488 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3489 /** Interface describing a function mapping two longs to a long */
3490 public interface LongByLongToLong { long apply(long a, long b); }
3491 /** Interface describing a function mapping two ints to an int */
3492 public interface IntByIntToInt { int apply(int a, int b); }
3493
3494
3495 // -------------------------------------------------------
3496
3497 /**
3498 * Performs the given action for each (key, value).
3499 *
3500 * @param action the action
3501 */
3502 public void forEach(BiAction<K,V> action) {
3503 ForkJoinTasks.forEach
3504 (this, action).invoke();
3505 }
3506
3507 /**
3508 * Performs the given action for each non-null transformation
3509 * of each (key, value).
3510 *
3511 * @param transformer a function returning the transformation
3512 * for an element, or null of there is no transformation (in
3513 * which case the action is not applied).
3514 * @param action the action
3515 */
3516 public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3517 Action<U> action) {
3518 ForkJoinTasks.forEach
3519 (this, transformer, action).invoke();
3520 }
3521
3522 /**
3523 * Returns a non-null result from applying the given search
3524 * function on each (key, value), or null if none. Upon
3525 * success, further element processing is suppressed and the
3526 * results of any other parallel invocations of the search
3527 * function are ignored.
3528 *
3529 * @param searchFunction a function returning a non-null
3530 * result on success, else null
3531 * @return a non-null result from applying the given search
3532 * function on each (key, value), or null if none
3533 */
3534 public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3535 return ForkJoinTasks.search
3536 (this, searchFunction).invoke();
3537 }
3538
3539 /**
3540 * Returns the result of accumulating the given transformation
3541 * of all (key, value) pairs using the given reducer to
3542 * combine values, or null if none.
3543 *
3544 * @param transformer a function returning the transformation
3545 * for an element, or null of there is no transformation (in
3546 * which case it is not combined).
3547 * @param reducer a commutative associative combining function
3548 * @return the result of accumulating the given transformation
3549 * of all (key, value) pairs
3550 */
3551 public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3552 BiFun<? super U, ? super U, ? extends U> reducer) {
3553 return ForkJoinTasks.reduce
3554 (this, transformer, reducer).invoke();
3555 }
3556
3557 /**
3558 * Returns the result of accumulating the given transformation
3559 * of all (key, value) pairs using the given reducer to
3560 * combine values, and the given basis as an identity value.
3561 *
3562 * @param transformer a function returning the transformation
3563 * for an element
3564 * @param basis the identity (initial default value) for the reduction
3565 * @param reducer a commutative associative combining function
3566 * @return the result of accumulating the given transformation
3567 * of all (key, value) pairs
3568 */
3569 public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3570 double basis,
3571 DoubleByDoubleToDouble reducer) {
3572 return ForkJoinTasks.reduceToDouble
3573 (this, transformer, basis, reducer).invoke();
3574 }
3575
3576 /**
3577 * Returns the result of accumulating the given transformation
3578 * of all (key, value) pairs using the given reducer to
3579 * combine values, and the given basis as an identity value.
3580 *
3581 * @param transformer a function returning the transformation
3582 * for an element
3583 * @param basis the identity (initial default value) for the reduction
3584 * @param reducer a commutative associative combining function
3585 * @return the result of accumulating the given transformation
3586 * of all (key, value) pairs
3587 */
3588 public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3589 long basis,
3590 LongByLongToLong reducer) {
3591 return ForkJoinTasks.reduceToLong
3592 (this, transformer, basis, reducer).invoke();
3593 }
3594
3595 /**
3596 * Returns the result of accumulating the given transformation
3597 * of all (key, value) pairs using the given reducer to
3598 * combine values, and the given basis as an identity value.
3599 *
3600 * @param transformer a function returning the transformation
3601 * for an element
3602 * @param basis the identity (initial default value) for the reduction
3603 * @param reducer a commutative associative combining function
3604 * @return the result of accumulating the given transformation
3605 * of all (key, value) pairs
3606 */
3607 public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3608 int basis,
3609 IntByIntToInt reducer) {
3610 return ForkJoinTasks.reduceToInt
3611 (this, transformer, basis, reducer).invoke();
3612 }
3613
3614 /**
3615 * Performs the given action for each key.
3616 *
3617 * @param action the action
3618 */
3619 public void forEachKey(Action<K> action) {
3620 ForkJoinTasks.forEachKey
3621 (this, action).invoke();
3622 }
3623
3624 /**
3625 * Performs the given action for each non-null transformation
3626 * of each key.
3627 *
3628 * @param transformer a function returning the transformation
3629 * for an element, or null of there is no transformation (in
3630 * which case the action is not applied).
3631 * @param action the action
3632 */
3633 public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3634 Action<U> action) {
3635 ForkJoinTasks.forEachKey
3636 (this, transformer, action).invoke();
3637 }
3638
3639 /**
3640 * Returns a non-null result from applying the given search
3641 * function on each key, or null if none. Upon success,
3642 * further element processing is suppressed and the results of
3643 * any other parallel invocations of the search function are
3644 * ignored.
3645 *
3646 * @param searchFunction a function returning a non-null
3647 * result on success, else null
3648 * @return a non-null result from applying the given search
3649 * function on each key, or null if none
3650 */
3651 public <U> U searchKeys(Fun<? super K, ? extends U> searchFunction) {
3652 return ForkJoinTasks.searchKeys
3653 (this, searchFunction).invoke();
3654 }
3655
3656 /**
3657 * Returns the result of accumulating all keys using the given
3658 * reducer to combine values, or null if none.
3659 *
3660 * @param reducer a commutative associative combining function
3661 * @return the result of accumulating all keys using the given
3662 * reducer to combine values, or null if none
3663 */
3664 public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3665 return ForkJoinTasks.reduceKeys
3666 (this, reducer).invoke();
3667 }
3668
3669 /**
3670 * Returns the result of accumulating the given transformation
3671 * of all keys using the given reducer to combine values, or
3672 * null if none.
3673 *
3674 * @param transformer a function returning the transformation
3675 * for an element, or null of there is no transformation (in
3676 * which case it is not combined).
3677 * @param reducer a commutative associative combining function
3678 * @return the result of accumulating the given transformation
3679 * of all keys
3680 */
3681 public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3682 BiFun<? super U, ? super U, ? extends U> reducer) {
3683 return ForkJoinTasks.reduceKeys
3684 (this, transformer, reducer).invoke();
3685 }
3686
3687 /**
3688 * Returns the result of accumulating the given transformation
3689 * of all keys using the given reducer to combine values, and
3690 * the given basis as an identity value.
3691 *
3692 * @param transformer a function returning the transformation
3693 * for an element
3694 * @param basis the identity (initial default value) for the reduction
3695 * @param reducer a commutative associative combining function
3696 * @return the result of accumulating the given transformation
3697 * of all keys
3698 */
3699 public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3700 double basis,
3701 DoubleByDoubleToDouble reducer) {
3702 return ForkJoinTasks.reduceKeysToDouble
3703 (this, transformer, basis, reducer).invoke();
3704 }
3705
3706 /**
3707 * Returns the result of accumulating the given transformation
3708 * of all keys using the given reducer to combine values, and
3709 * the given basis as an identity value.
3710 *
3711 * @param transformer a function returning the transformation
3712 * for an element
3713 * @param basis the identity (initial default value) for the reduction
3714 * @param reducer a commutative associative combining function
3715 * @return the result of accumulating the given transformation
3716 * of all keys
3717 */
3718 public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3719 long basis,
3720 LongByLongToLong reducer) {
3721 return ForkJoinTasks.reduceKeysToLong
3722 (this, transformer, basis, reducer).invoke();
3723 }
3724
3725 /**
3726 * Returns the result of accumulating the given transformation
3727 * of all keys using the given reducer to combine values, and
3728 * the given basis as an identity value.
3729 *
3730 * @param transformer a function returning the transformation
3731 * for an element
3732 * @param basis the identity (initial default value) for the reduction
3733 * @param reducer a commutative associative combining function
3734 * @return the result of accumulating the given transformation
3735 * of all keys
3736 */
3737 public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3738 int basis,
3739 IntByIntToInt reducer) {
3740 return ForkJoinTasks.reduceKeysToInt
3741 (this, transformer, basis, reducer).invoke();
3742 }
3743
3744 /**
3745 * Performs the given action for each value.
3746 *
3747 * @param action the action
3748 */
3749 public void forEachValue(Action<V> action) {
3750 ForkJoinTasks.forEachValue
3751 (this, action).invoke();
3752 }
3753
3754 /**
3755 * Performs the given action for each non-null transformation
3756 * of each value.
3757 *
3758 * @param transformer a function returning the transformation
3759 * for an element, or null of there is no transformation (in
3760 * which case the action is not applied).
3761 */
3762 public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3763 Action<U> action) {
3764 ForkJoinTasks.forEachValue
3765 (this, transformer, action).invoke();
3766 }
3767
3768 /**
3769 * Returns a non-null result from applying the given search
3770 * function on each value, or null if none. Upon success,
3771 * further element processing is suppressed and the results of
3772 * any other parallel invocations of the search function are
3773 * ignored.
3774 *
3775 * @param searchFunction a function returning a non-null
3776 * result on success, else null
3777 * @return a non-null result from applying the given search
3778 * function on each value, or null if none
3779 *
3780 */
3781 public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
3782 return ForkJoinTasks.searchValues
3783 (this, searchFunction).invoke();
3784 }
3785
3786 /**
3787 * Returns the result of accumulating all values using the
3788 * given reducer to combine values, or null if none.
3789 *
3790 * @param reducer a commutative associative combining function
3791 * @return the result of accumulating all values
3792 */
3793 public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
3794 return ForkJoinTasks.reduceValues
3795 (this, reducer).invoke();
3796 }
3797
3798 /**
3799 * Returns the result of accumulating the given transformation
3800 * of all values using the given reducer to combine values, or
3801 * null if none.
3802 *
3803 * @param transformer a function returning the transformation
3804 * for an element, or null of there is no transformation (in
3805 * which case it is not combined).
3806 * @param reducer a commutative associative combining function
3807 * @return the result of accumulating the given transformation
3808 * of all values
3809 */
3810 public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
3811 BiFun<? super U, ? super U, ? extends U> reducer) {
3812 return ForkJoinTasks.reduceValues
3813 (this, transformer, reducer).invoke();
3814 }
3815
3816 /**
3817 * Returns the result of accumulating the given transformation
3818 * of all values using the given reducer to combine values,
3819 * and the given basis as an identity value.
3820 *
3821 * @param transformer a function returning the transformation
3822 * for an element
3823 * @param basis the identity (initial default value) for the reduction
3824 * @param reducer a commutative associative combining function
3825 * @return the result of accumulating the given transformation
3826 * of all values
3827 */
3828 public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
3829 double basis,
3830 DoubleByDoubleToDouble reducer) {
3831 return ForkJoinTasks.reduceValuesToDouble
3832 (this, transformer, basis, reducer).invoke();
3833 }
3834
3835 /**
3836 * Returns the result of accumulating the given transformation
3837 * of all values using the given reducer to combine values,
3838 * and the given basis as an identity value.
3839 *
3840 * @param transformer a function returning the transformation
3841 * for an element
3842 * @param basis the identity (initial default value) for the reduction
3843 * @param reducer a commutative associative combining function
3844 * @return the result of accumulating the given transformation
3845 * of all values
3846 */
3847 public long reduceValuesToLong(ObjectToLong<? super V> transformer,
3848 long basis,
3849 LongByLongToLong reducer) {
3850 return ForkJoinTasks.reduceValuesToLong
3851 (this, transformer, basis, reducer).invoke();
3852 }
3853
3854 /**
3855 * Returns the result of accumulating the given transformation
3856 * of all values using the given reducer to combine values,
3857 * and the given basis as an identity value.
3858 *
3859 * @param transformer a function returning the transformation
3860 * for an element
3861 * @param basis the identity (initial default value) for the reduction
3862 * @param reducer a commutative associative combining function
3863 * @return the result of accumulating the given transformation
3864 * of all values
3865 */
3866 public int reduceValuesToInt(ObjectToInt<? super V> transformer,
3867 int basis,
3868 IntByIntToInt reducer) {
3869 return ForkJoinTasks.reduceValuesToInt
3870 (this, transformer, basis, reducer).invoke();
3871 }
3872
3873 /**
3874 * Performs the given action for each entry.
3875 *
3876 * @param action the action
3877 */
3878 public void forEachEntry(Action<Map.Entry<K,V>> action) {
3879 ForkJoinTasks.forEachEntry
3880 (this, action).invoke();
3881 }
3882
3883 /**
3884 * Performs the given action for each non-null transformation
3885 * of each entry.
3886 *
3887 * @param transformer a function returning the transformation
3888 * for an element, or null of there is no transformation (in
3889 * which case the action is not applied).
3890 * @param action the action
3891 */
3892 public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
3893 Action<U> action) {
3894 ForkJoinTasks.forEachEntry
3895 (this, transformer, action).invoke();
3896 }
3897
3898 /**
3899 * Returns a non-null result from applying the given search
3900 * function on each entry, or null if none. Upon success,
3901 * further element processing is suppressed and the results of
3902 * any other parallel invocations of the search function are
3903 * ignored.
3904 *
3905 * @param searchFunction a function returning a non-null
3906 * result on success, else null
3907 * @return a non-null result from applying the given search
3908 * function on each entry, or null if none
3909 */
3910 public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
3911 return ForkJoinTasks.searchEntries
3912 (this, searchFunction).invoke();
3913 }
3914
3915 /**
3916 * Returns the result of accumulating all entries using the
3917 * given reducer to combine values, or null if none.
3918 *
3919 * @param reducer a commutative associative combining function
3920 * @return the result of accumulating all entries
3921 */
3922 public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
3923 return ForkJoinTasks.reduceEntries
3924 (this, reducer).invoke();
3925 }
3926
3927 /**
3928 * Returns the result of accumulating the given transformation
3929 * of all entries using the given reducer to combine values,
3930 * or null if none.
3931 *
3932 * @param transformer a function returning the transformation
3933 * for an element, or null of there is no transformation (in
3934 * which case it is not combined).
3935 * @param reducer a commutative associative combining function
3936 * @return the result of accumulating the given transformation
3937 * of all entries
3938 */
3939 public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
3940 BiFun<? super U, ? super U, ? extends U> reducer) {
3941 return ForkJoinTasks.reduceEntries
3942 (this, transformer, reducer).invoke();
3943 }
3944
3945 /**
3946 * Returns the result of accumulating the given transformation
3947 * of all entries using the given reducer to combine values,
3948 * and the given basis as an identity value.
3949 *
3950 * @param transformer a function returning the transformation
3951 * for an element
3952 * @param basis the identity (initial default value) for the reduction
3953 * @param reducer a commutative associative combining function
3954 * @return the result of accumulating the given transformation
3955 * of all entries
3956 */
3957 public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
3958 double basis,
3959 DoubleByDoubleToDouble reducer) {
3960 return ForkJoinTasks.reduceEntriesToDouble
3961 (this, transformer, basis, reducer).invoke();
3962 }
3963
3964 /**
3965 * Returns the result of accumulating the given transformation
3966 * of all entries using the given reducer to combine values,
3967 * and the given basis as an identity value.
3968 *
3969 * @param transformer a function returning the transformation
3970 * for an element
3971 * @param basis the identity (initial default value) for the reduction
3972 * @param reducer a commutative associative combining function
3973 * @return the result of accumulating the given transformation
3974 * of all entries
3975 */
3976 public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
3977 long basis,
3978 LongByLongToLong reducer) {
3979 return ForkJoinTasks.reduceEntriesToLong
3980 (this, transformer, basis, reducer).invoke();
3981 }
3982
3983 /**
3984 * Returns the result of accumulating the given transformation
3985 * of all entries using the given reducer to combine values,
3986 * and the given basis as an identity value.
3987 *
3988 * @param transformer a function returning the transformation
3989 * for an element
3990 * @param basis the identity (initial default value) for the reduction
3991 * @param reducer a commutative associative combining function
3992 * @return the result of accumulating the given transformation
3993 * of all entries
3994 */
3995 public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
3996 int basis,
3997 IntByIntToInt reducer) {
3998 return ForkJoinTasks.reduceEntriesToInt
3999 (this, transformer, basis, reducer).invoke();
4000 }
4001
4002 /* ----------------Views -------------- */
4003
4004 /**
4005 * Base class for views.
4006 */
4007 static abstract class CHMView<K, V> {
4008 final ConcurrentHashMap<K, V> map;
4009 CHMView(ConcurrentHashMap<K, V> map) { this.map = map; }
4010
4011 /**
4012 * Returns the map backing this view.
4013 *
4014 * @return the map backing this view
4015 */
4016 public ConcurrentHashMap<K,V> getMap() { return map; }
4017
4018 public final int size() { return map.size(); }
4019 public final boolean isEmpty() { return map.isEmpty(); }
4020 public final void clear() { map.clear(); }
4021
4022 // implementations below rely on concrete classes supplying these
4023 abstract public Iterator<?> iterator();
4024 abstract public boolean contains(Object o);
4025 abstract public boolean remove(Object o);
4026
4027 private static final String oomeMsg = "Required array size too large";
4028
4029 public final Object[] toArray() {
4030 long sz = map.mappingCount();
4031 if (sz > (long)(MAX_ARRAY_SIZE))
4032 throw new OutOfMemoryError(oomeMsg);
4033 int n = (int)sz;
4034 Object[] r = new Object[n];
4035 int i = 0;
4036 Iterator<?> it = iterator();
4037 while (it.hasNext()) {
4038 if (i == n) {
4039 if (n >= MAX_ARRAY_SIZE)
4040 throw new OutOfMemoryError(oomeMsg);
4041 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4042 n = MAX_ARRAY_SIZE;
4043 else
4044 n += (n >>> 1) + 1;
4045 r = Arrays.copyOf(r, n);
4046 }
4047 r[i++] = it.next();
4048 }
4049 return (i == n) ? r : Arrays.copyOf(r, i);
4050 }
4051
4052 @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
4053 long sz = map.mappingCount();
4054 if (sz > (long)(MAX_ARRAY_SIZE))
4055 throw new OutOfMemoryError(oomeMsg);
4056 int m = (int)sz;
4057 T[] r = (a.length >= m) ? a :
4058 (T[])java.lang.reflect.Array
4059 .newInstance(a.getClass().getComponentType(), m);
4060 int n = r.length;
4061 int i = 0;
4062 Iterator<?> it = iterator();
4063 while (it.hasNext()) {
4064 if (i == n) {
4065 if (n >= MAX_ARRAY_SIZE)
4066 throw new OutOfMemoryError(oomeMsg);
4067 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4068 n = MAX_ARRAY_SIZE;
4069 else
4070 n += (n >>> 1) + 1;
4071 r = Arrays.copyOf(r, n);
4072 }
4073 r[i++] = (T)it.next();
4074 }
4075 if (a == r && i < n) {
4076 r[i] = null; // null-terminate
4077 return r;
4078 }
4079 return (i == n) ? r : Arrays.copyOf(r, i);
4080 }
4081
4082 public final int hashCode() {
4083 int h = 0;
4084 for (Iterator<?> it = iterator(); it.hasNext();)
4085 h += it.next().hashCode();
4086 return h;
4087 }
4088
4089 public final String toString() {
4090 StringBuilder sb = new StringBuilder();
4091 sb.append('[');
4092 Iterator<?> it = iterator();
4093 if (it.hasNext()) {
4094 for (;;) {
4095 Object e = it.next();
4096 sb.append(e == this ? "(this Collection)" : e);
4097 if (!it.hasNext())
4098 break;
4099 sb.append(',').append(' ');
4100 }
4101 }
4102 return sb.append(']').toString();
4103 }
4104
4105 public final boolean containsAll(Collection<?> c) {
4106 if (c != this) {
4107 for (Iterator<?> it = c.iterator(); it.hasNext();) {
4108 Object e = it.next();
4109 if (e == null || !contains(e))
4110 return false;
4111 }
4112 }
4113 return true;
4114 }
4115
4116 public final boolean removeAll(Collection<?> c) {
4117 boolean modified = false;
4118 for (Iterator<?> it = iterator(); it.hasNext();) {
4119 if (c.contains(it.next())) {
4120 it.remove();
4121 modified = true;
4122 }
4123 }
4124 return modified;
4125 }
4126
4127 public final boolean retainAll(Collection<?> c) {
4128 boolean modified = false;
4129 for (Iterator<?> it = iterator(); it.hasNext();) {
4130 if (!c.contains(it.next())) {
4131 it.remove();
4132 modified = true;
4133 }
4134 }
4135 return modified;
4136 }
4137
4138 }
4139
4140 /**
4141 * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4142 * which additions may optionally be enabled by mapping to a
4143 * common value. This class cannot be directly instantiated. See
4144 * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
4145 * {@link #newKeySet(int)}.
4146 */
4147 public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable {
4148 private static final long serialVersionUID = 7249069246763182397L;
4149 private final V value;
4150 KeySetView(ConcurrentHashMap<K, V> map, V value) { // non-public
4151 super(map);
4152 this.value = value;
4153 }
4154
4155 /**
4156 * Returns the default mapped value for additions,
4157 * or {@code null} if additions are not supported.
4158 *
4159 * @return the default mapped value for additions, or {@code null}
4160 * if not supported.
4161 */
4162 public V getMappedValue() { return value; }
4163
4164 // implement Set API
4165
4166 public boolean contains(Object o) { return map.containsKey(o); }
4167 public boolean remove(Object o) { return map.remove(o) != null; }
4168
4169 /**
4170 * Returns a "weakly consistent" iterator that will never
4171 * throw {@link ConcurrentModificationException}, and
4172 * guarantees to traverse elements as they existed upon
4173 * construction of the iterator, and may (but is not
4174 * guaranteed to) reflect any modifications subsequent to
4175 * construction.
4176 *
4177 * @return an iterator over the keys of this map
4178 */
4179 public Iterator<K> iterator() { return new KeyIterator<K,V>(map); }
4180 public boolean add(K e) {
4181 V v;
4182 if ((v = value) == null)
4183 throw new UnsupportedOperationException();
4184 if (e == null)
4185 throw new NullPointerException();
4186 return map.internalPutIfAbsent(e, v) == null;
4187 }
4188 public boolean addAll(Collection<? extends K> c) {
4189 boolean added = false;
4190 V v;
4191 if ((v = value) == null)
4192 throw new UnsupportedOperationException();
4193 for (K e : c) {
4194 if (e == null)
4195 throw new NullPointerException();
4196 if (map.internalPutIfAbsent(e, v) == null)
4197 added = true;
4198 }
4199 return added;
4200 }
4201 public boolean equals(Object o) {
4202 Set<?> c;
4203 return ((o instanceof Set) &&
4204 ((c = (Set<?>)o) == this ||
4205 (containsAll(c) && c.containsAll(this))));
4206 }
4207
4208 /**
4209 * Performs the given action for each key.
4210 *
4211 * @param action the action
4212 */
4213 public void forEach(Action<K> action) {
4214 ForkJoinTasks.forEachKey
4215 (map, action).invoke();
4216 }
4217
4218 /**
4219 * Performs the given action for each non-null transformation
4220 * of each key.
4221 *
4222 * @param transformer a function returning the transformation
4223 * for an element, or null of there is no transformation (in
4224 * which case the action is not applied).
4225 * @param action the action
4226 */
4227 public <U> void forEach(Fun<? super K, ? extends U> transformer,
4228 Action<U> action) {
4229 ForkJoinTasks.forEachKey
4230 (map, transformer, action).invoke();
4231 }
4232
4233 /**
4234 * Returns a non-null result from applying the given search
4235 * function on each key, or null if none. Upon success,
4236 * further element processing is suppressed and the results of
4237 * any other parallel invocations of the search function are
4238 * ignored.
4239 *
4240 * @param searchFunction a function returning a non-null
4241 * result on success, else null
4242 * @return a non-null result from applying the given search
4243 * function on each key, or null if none
4244 */
4245 public <U> U search(Fun<? super K, ? extends U> searchFunction) {
4246 return ForkJoinTasks.searchKeys
4247 (map, searchFunction).invoke();
4248 }
4249
4250 /**
4251 * Returns the result of accumulating all keys using the given
4252 * reducer to combine values, or null if none.
4253 *
4254 * @param reducer a commutative associative combining function
4255 * @return the result of accumulating all keys using the given
4256 * reducer to combine values, or null if none
4257 */
4258 public K reduce(BiFun<? super K, ? super K, ? extends K> reducer) {
4259 return ForkJoinTasks.reduceKeys
4260 (map, reducer).invoke();
4261 }
4262
4263 /**
4264 * Returns the result of accumulating the given transformation
4265 * of all keys using the given reducer to combine values, and
4266 * the given basis as an identity value.
4267 *
4268 * @param transformer a function returning the transformation
4269 * for an element
4270 * @param basis the identity (initial default value) for the reduction
4271 * @param reducer a commutative associative combining function
4272 * @return the result of accumulating the given transformation
4273 * of all keys
4274 */
4275 public double reduceToDouble(ObjectToDouble<? super K> transformer,
4276 double basis,
4277 DoubleByDoubleToDouble reducer) {
4278 return ForkJoinTasks.reduceKeysToDouble
4279 (map, transformer, basis, reducer).invoke();
4280 }
4281
4282
4283 /**
4284 * Returns the result of accumulating the given transformation
4285 * of all keys using the given reducer to combine values, and
4286 * the given basis as an identity value.
4287 *
4288 * @param transformer a function returning the transformation
4289 * for an element
4290 * @param basis the identity (initial default value) for the reduction
4291 * @param reducer a commutative associative combining function
4292 * @return the result of accumulating the given transformation
4293 * of all keys
4294 */
4295 public long reduceToLong(ObjectToLong<? super K> transformer,
4296 long basis,
4297 LongByLongToLong reducer) {
4298 return ForkJoinTasks.reduceKeysToLong
4299 (map, transformer, basis, reducer).invoke();
4300 }
4301
4302 /**
4303 * Returns the result of accumulating the given transformation
4304 * of all keys using the given reducer to combine values, and
4305 * the given basis as an identity value.
4306 *
4307 * @param transformer a function returning the transformation
4308 * for an element
4309 * @param basis the identity (initial default value) for the reduction
4310 * @param reducer a commutative associative combining function
4311 * @return the result of accumulating the given transformation
4312 * of all keys
4313 */
4314 public int reduceToInt(ObjectToInt<? super K> transformer,
4315 int basis,
4316 IntByIntToInt reducer) {
4317 return ForkJoinTasks.reduceKeysToInt
4318 (map, transformer, basis, reducer).invoke();
4319 }
4320
4321 }
4322
4323 /**
4324 * A view of a ConcurrentHashMap as a {@link Collection} of
4325 * values, in which additions are disabled. This class cannot be
4326 * directly instantiated. See {@link #values},
4327 *
4328 * <p>The view's {@code iterator} is a "weakly consistent" iterator
4329 * that will never throw {@link ConcurrentModificationException},
4330 * and guarantees to traverse elements as they existed upon
4331 * construction of the iterator, and may (but is not guaranteed to)
4332 * reflect any modifications subsequent to construction.
4333 */
4334 public static final class ValuesView<K,V> extends CHMView<K,V>
4335 implements Collection<V> {
4336 ValuesView(ConcurrentHashMap<K, V> map) { super(map); }
4337 public final boolean contains(Object o) { return map.containsValue(o); }
4338 public final boolean remove(Object o) {
4339 if (o != null) {
4340 Iterator<V> it = new ValueIterator<K,V>(map);
4341 while (it.hasNext()) {
4342 if (o.equals(it.next())) {
4343 it.remove();
4344 return true;
4345 }
4346 }
4347 }
4348 return false;
4349 }
4350
4351 /**
4352 * Returns a "weakly consistent" iterator that will never
4353 * throw {@link ConcurrentModificationException}, and
4354 * guarantees to traverse elements as they existed upon
4355 * construction of the iterator, and may (but is not
4356 * guaranteed to) reflect any modifications subsequent to
4357 * construction.
4358 *
4359 * @return an iterator over the values of this map
4360 */
4361 public final Iterator<V> iterator() {
4362 return new ValueIterator<K,V>(map);
4363 }
4364 public final boolean add(V e) {
4365 throw new UnsupportedOperationException();
4366 }
4367 public final boolean addAll(Collection<? extends V> c) {
4368 throw new UnsupportedOperationException();
4369 }
4370
4371 /**
4372 * Performs the given action for each value.
4373 *
4374 * @param action the action
4375 */
4376 public void forEach(Action<V> action) {
4377 ForkJoinTasks.forEachValue
4378 (map, action).invoke();
4379 }
4380
4381 /**
4382 * Performs the given action for each non-null transformation
4383 * of each value.
4384 *
4385 * @param transformer a function returning the transformation
4386 * for an element, or null of there is no transformation (in
4387 * which case the action is not applied).
4388 */
4389 public <U> void forEach(Fun<? super V, ? extends U> transformer,
4390 Action<U> action) {
4391 ForkJoinTasks.forEachValue
4392 (map, transformer, action).invoke();
4393 }
4394
4395 /**
4396 * Returns a non-null result from applying the given search
4397 * function on each value, or null if none. Upon success,
4398 * further element processing is suppressed and the results of
4399 * any other parallel invocations of the search function are
4400 * ignored.
4401 *
4402 * @param searchFunction a function returning a non-null
4403 * result on success, else null
4404 * @return a non-null result from applying the given search
4405 * function on each value, or null if none
4406 *
4407 */
4408 public <U> U search(Fun<? super V, ? extends U> searchFunction) {
4409 return ForkJoinTasks.searchValues
4410 (map, searchFunction).invoke();
4411 }
4412
4413 /**
4414 * Returns the result of accumulating all values using the
4415 * given reducer to combine values, or null if none.
4416 *
4417 * @param reducer a commutative associative combining function
4418 * @return the result of accumulating all values
4419 */
4420 public V reduce(BiFun<? super V, ? super V, ? extends V> reducer) {
4421 return ForkJoinTasks.reduceValues
4422 (map, reducer).invoke();
4423 }
4424
4425 /**
4426 * Returns the result of accumulating the given transformation
4427 * of all values using the given reducer to combine values, or
4428 * null if none.
4429 *
4430 * @param transformer a function returning the transformation
4431 * for an element, or null of there is no transformation (in
4432 * which case it is not combined).
4433 * @param reducer a commutative associative combining function
4434 * @return the result of accumulating the given transformation
4435 * of all values
4436 */
4437 public <U> U reduce(Fun<? super V, ? extends U> transformer,
4438 BiFun<? super U, ? super U, ? extends U> reducer) {
4439 return ForkJoinTasks.reduceValues
4440 (map, transformer, reducer).invoke();
4441 }
4442
4443 /**
4444 * Returns the result of accumulating the given transformation
4445 * of all values using the given reducer to combine values,
4446 * and the given basis as an identity value.
4447 *
4448 * @param transformer a function returning the transformation
4449 * for an element
4450 * @param basis the identity (initial default value) for the reduction
4451 * @param reducer a commutative associative combining function
4452 * @return the result of accumulating the given transformation
4453 * of all values
4454 */
4455 public double reduceToDouble(ObjectToDouble<? super V> transformer,
4456 double basis,
4457 DoubleByDoubleToDouble reducer) {
4458 return ForkJoinTasks.reduceValuesToDouble
4459 (map, transformer, basis, reducer).invoke();
4460 }
4461
4462 /**
4463 * Returns the result of accumulating the given transformation
4464 * of all values using the given reducer to combine values,
4465 * and the given basis as an identity value.
4466 *
4467 * @param transformer a function returning the transformation
4468 * for an element
4469 * @param basis the identity (initial default value) for the reduction
4470 * @param reducer a commutative associative combining function
4471 * @return the result of accumulating the given transformation
4472 * of all values
4473 */
4474 public long reduceToLong(ObjectToLong<? super V> transformer,
4475 long basis,
4476 LongByLongToLong reducer) {
4477 return ForkJoinTasks.reduceValuesToLong
4478 (map, transformer, basis, reducer).invoke();
4479 }
4480
4481 /**
4482 * Returns the result of accumulating the given transformation
4483 * of all values using the given reducer to combine values,
4484 * and the given basis as an identity value.
4485 *
4486 * @param transformer a function returning the transformation
4487 * for an element
4488 * @param basis the identity (initial default value) for the reduction
4489 * @param reducer a commutative associative combining function
4490 * @return the result of accumulating the given transformation
4491 * of all values
4492 */
4493 public int reduceToInt(ObjectToInt<? super V> transformer,
4494 int basis,
4495 IntByIntToInt reducer) {
4496 return ForkJoinTasks.reduceValuesToInt
4497 (map, transformer, basis, reducer).invoke();
4498 }
4499
4500 }
4501
4502 /**
4503 * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4504 * entries. This class cannot be directly instantiated. See
4505 * {@link #entrySet}.
4506 */
4507 public static final class EntrySetView<K,V> extends CHMView<K,V>
4508 implements Set<Map.Entry<K,V>> {
4509 EntrySetView(ConcurrentHashMap<K, V> map) { super(map); }
4510 public final boolean contains(Object o) {
4511 Object k, v, r; Map.Entry<?,?> e;
4512 return ((o instanceof Map.Entry) &&
4513 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4514 (r = map.get(k)) != null &&
4515 (v = e.getValue()) != null &&
4516 (v == r || v.equals(r)));
4517 }
4518 public final boolean remove(Object o) {
4519 Object k, v; Map.Entry<?,?> e;
4520 return ((o instanceof Map.Entry) &&
4521 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4522 (v = e.getValue()) != null &&
4523 map.remove(k, v));
4524 }
4525
4526 /**
4527 * Returns a "weakly consistent" iterator that will never
4528 * throw {@link ConcurrentModificationException}, and
4529 * guarantees to traverse elements as they existed upon
4530 * construction of the iterator, and may (but is not
4531 * guaranteed to) reflect any modifications subsequent to
4532 * construction.
4533 *
4534 * @return an iterator over the entries of this map
4535 */
4536 public final Iterator<Map.Entry<K,V>> iterator() {
4537 return new EntryIterator<K,V>(map);
4538 }
4539
4540 public final boolean add(Entry<K,V> e) {
4541 K key = e.getKey();
4542 V value = e.getValue();
4543 if (key == null || value == null)
4544 throw new NullPointerException();
4545 return map.internalPut(key, value) == null;
4546 }
4547 public final boolean addAll(Collection<? extends Entry<K,V>> c) {
4548 boolean added = false;
4549 for (Entry<K,V> e : c) {
4550 if (add(e))
4551 added = true;
4552 }
4553 return added;
4554 }
4555 public boolean equals(Object o) {
4556 Set<?> c;
4557 return ((o instanceof Set) &&
4558 ((c = (Set<?>)o) == this ||
4559 (containsAll(c) && c.containsAll(this))));
4560 }
4561
4562 /**
4563 * Performs the given action for each entry.
4564 *
4565 * @param action the action
4566 */
4567 public void forEach(Action<Map.Entry<K,V>> action) {
4568 ForkJoinTasks.forEachEntry
4569 (map, action).invoke();
4570 }
4571
4572 /**
4573 * Performs the given action for each non-null transformation
4574 * of each entry.
4575 *
4576 * @param transformer a function returning the transformation
4577 * for an element, or null of there is no transformation (in
4578 * which case the action is not applied).
4579 * @param action the action
4580 */
4581 public <U> void forEach(Fun<Map.Entry<K,V>, ? extends U> transformer,
4582 Action<U> action) {
4583 ForkJoinTasks.forEachEntry
4584 (map, transformer, action).invoke();
4585 }
4586
4587 /**
4588 * Returns a non-null result from applying the given search
4589 * function on each entry, or null if none. Upon success,
4590 * further element processing is suppressed and the results of
4591 * any other parallel invocations of the search function are
4592 * ignored.
4593 *
4594 * @param searchFunction a function returning a non-null
4595 * result on success, else null
4596 * @return a non-null result from applying the given search
4597 * function on each entry, or null if none
4598 */
4599 public <U> U search(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4600 return ForkJoinTasks.searchEntries
4601 (map, searchFunction).invoke();
4602 }
4603
4604 /**
4605 * Returns the result of accumulating all entries using the
4606 * given reducer to combine values, or null if none.
4607 *
4608 * @param reducer a commutative associative combining function
4609 * @return the result of accumulating all entries
4610 */
4611 public Map.Entry<K,V> reduce(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4612 return ForkJoinTasks.reduceEntries
4613 (map, reducer).invoke();
4614 }
4615
4616 /**
4617 * Returns the result of accumulating the given transformation
4618 * of all entries using the given reducer to combine values,
4619 * or null if none.
4620 *
4621 * @param transformer a function returning the transformation
4622 * for an element, or null of there is no transformation (in
4623 * which case it is not combined).
4624 * @param reducer a commutative associative combining function
4625 * @return the result of accumulating the given transformation
4626 * of all entries
4627 */
4628 public <U> U reduce(Fun<Map.Entry<K,V>, ? extends U> transformer,
4629 BiFun<? super U, ? super U, ? extends U> reducer) {
4630 return ForkJoinTasks.reduceEntries
4631 (map, transformer, reducer).invoke();
4632 }
4633
4634 /**
4635 * Returns the result of accumulating the given transformation
4636 * of all entries using the given reducer to combine values,
4637 * and the given basis as an identity value.
4638 *
4639 * @param transformer a function returning the transformation
4640 * for an element
4641 * @param basis the identity (initial default value) for the reduction
4642 * @param reducer a commutative associative combining function
4643 * @return the result of accumulating the given transformation
4644 * of all entries
4645 */
4646 public double reduceToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4647 double basis,
4648 DoubleByDoubleToDouble reducer) {
4649 return ForkJoinTasks.reduceEntriesToDouble
4650 (map, transformer, basis, reducer).invoke();
4651 }
4652
4653 /**
4654 * Returns the result of accumulating the given transformation
4655 * of all entries using the given reducer to combine values,
4656 * and the given basis as an identity value.
4657 *
4658 * @param transformer a function returning the transformation
4659 * for an element
4660 * @param basis the identity (initial default value) for the reduction
4661 * @param reducer a commutative associative combining function
4662 * @return the result of accumulating the given transformation
4663 * of all entries
4664 */
4665 public long reduceToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4666 long basis,
4667 LongByLongToLong reducer) {
4668 return ForkJoinTasks.reduceEntriesToLong
4669 (map, transformer, basis, reducer).invoke();
4670 }
4671
4672 /**
4673 * Returns the result of accumulating the given transformation
4674 * of all entries using the given reducer to combine values,
4675 * and the given basis as an identity value.
4676 *
4677 * @param transformer a function returning the transformation
4678 * for an element
4679 * @param basis the identity (initial default value) for the reduction
4680 * @param reducer a commutative associative combining function
4681 * @return the result of accumulating the given transformation
4682 * of all entries
4683 */
4684 public int reduceToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4685 int basis,
4686 IntByIntToInt reducer) {
4687 return ForkJoinTasks.reduceEntriesToInt
4688 (map, transformer, basis, reducer).invoke();
4689 }
4690
4691 }
4692
4693 // ---------------------------------------------------------------------
4694
4695 /**
4696 * Predefined tasks for performing bulk parallel operations on
4697 * ConcurrentHashMaps. These tasks follow the forms and rules used
4698 * for bulk operations. Each method has the same name, but returns
4699 * a task rather than invoking it. These methods may be useful in
4700 * custom applications such as submitting a task without waiting
4701 * for completion, using a custom pool, or combining with other
4702 * tasks.
4703 */
4704 public static class ForkJoinTasks {
4705 private ForkJoinTasks() {}
4706
4707 /**
4708 * Returns a task that when invoked, performs the given
4709 * action for each (key, value)
4710 *
4711 * @param map the map
4712 * @param action the action
4713 * @return the task
4714 */
4715 public static <K,V> ForkJoinTask<Void> forEach
4716 (ConcurrentHashMap<K,V> map,
4717 BiAction<K,V> action) {
4718 if (action == null) throw new NullPointerException();
4719 return new ForEachMappingTask<K,V>(map, null, -1, action);
4720 }
4721
4722 /**
4723 * Returns a task that when invoked, performs the given
4724 * action for each non-null transformation of each (key, value)
4725 *
4726 * @param map the map
4727 * @param transformer a function returning the transformation
4728 * for an element, or null if there is no transformation (in
4729 * which case the action is not applied)
4730 * @param action the action
4731 * @return the task
4732 */
4733 public static <K,V,U> ForkJoinTask<Void> forEach
4734 (ConcurrentHashMap<K,V> map,
4735 BiFun<? super K, ? super V, ? extends U> transformer,
4736 Action<U> action) {
4737 if (transformer == null || action == null)
4738 throw new NullPointerException();
4739 return new ForEachTransformedMappingTask<K,V,U>
4740 (map, null, -1, transformer, action);
4741 }
4742
4743 /**
4744 * Returns a task that when invoked, returns a non-null result
4745 * from applying the given search function on each (key,
4746 * value), or null if none. Upon success, further element
4747 * processing is suppressed and the results of any other
4748 * parallel invocations of the search function are ignored.
4749 *
4750 * @param map the map
4751 * @param searchFunction a function returning a non-null
4752 * result on success, else null
4753 * @return the task
4754 */
4755 public static <K,V,U> ForkJoinTask<U> search
4756 (ConcurrentHashMap<K,V> map,
4757 BiFun<? super K, ? super V, ? extends U> searchFunction) {
4758 if (searchFunction == null) throw new NullPointerException();
4759 return new SearchMappingsTask<K,V,U>
4760 (map, null, -1, searchFunction,
4761 new AtomicReference<U>());
4762 }
4763
4764 /**
4765 * Returns a task that when invoked, returns the result of
4766 * accumulating the given transformation of all (key, value) pairs
4767 * using the given reducer to combine values, or null if none.
4768 *
4769 * @param map the map
4770 * @param transformer a function returning the transformation
4771 * for an element, or null if there is no transformation (in
4772 * which case it is not combined).
4773 * @param reducer a commutative associative combining function
4774 * @return the task
4775 */
4776 public static <K,V,U> ForkJoinTask<U> reduce
4777 (ConcurrentHashMap<K,V> map,
4778 BiFun<? super K, ? super V, ? extends U> transformer,
4779 BiFun<? super U, ? super U, ? extends U> reducer) {
4780 if (transformer == null || reducer == null)
4781 throw new NullPointerException();
4782 return new MapReduceMappingsTask<K,V,U>
4783 (map, null, -1, null, transformer, reducer);
4784 }
4785
4786 /**
4787 * Returns a task that when invoked, returns the result of
4788 * accumulating the given transformation of all (key, value) pairs
4789 * using the given reducer to combine values, and the given
4790 * basis as an identity value.
4791 *
4792 * @param map the map
4793 * @param transformer a function returning the transformation
4794 * for an element
4795 * @param basis the identity (initial default value) for the reduction
4796 * @param reducer a commutative associative combining function
4797 * @return the task
4798 */
4799 public static <K,V> ForkJoinTask<Double> reduceToDouble
4800 (ConcurrentHashMap<K,V> map,
4801 ObjectByObjectToDouble<? super K, ? super V> transformer,
4802 double basis,
4803 DoubleByDoubleToDouble reducer) {
4804 if (transformer == null || reducer == null)
4805 throw new NullPointerException();
4806 return new MapReduceMappingsToDoubleTask<K,V>
4807 (map, null, -1, null, transformer, basis, reducer);
4808 }
4809
4810 /**
4811 * Returns a task that when invoked, returns the result of
4812 * accumulating the given transformation of all (key, value) pairs
4813 * using the given reducer to combine values, and the given
4814 * basis as an identity value.
4815 *
4816 * @param map the map
4817 * @param transformer a function returning the transformation
4818 * for an element
4819 * @param basis the identity (initial default value) for the reduction
4820 * @param reducer a commutative associative combining function
4821 * @return the task
4822 */
4823 public static <K,V> ForkJoinTask<Long> reduceToLong
4824 (ConcurrentHashMap<K,V> map,
4825 ObjectByObjectToLong<? super K, ? super V> transformer,
4826 long basis,
4827 LongByLongToLong reducer) {
4828 if (transformer == null || reducer == null)
4829 throw new NullPointerException();
4830 return new MapReduceMappingsToLongTask<K,V>
4831 (map, null, -1, null, transformer, basis, reducer);
4832 }
4833
4834 /**
4835 * Returns a task that when invoked, returns the result of
4836 * accumulating the given transformation of all (key, value) pairs
4837 * using the given reducer to combine values, and the given
4838 * basis as an identity value.
4839 *
4840 * @param transformer a function returning the transformation
4841 * for an element
4842 * @param basis the identity (initial default value) for the reduction
4843 * @param reducer a commutative associative combining function
4844 * @return the task
4845 */
4846 public static <K,V> ForkJoinTask<Integer> reduceToInt
4847 (ConcurrentHashMap<K,V> map,
4848 ObjectByObjectToInt<? super K, ? super V> transformer,
4849 int basis,
4850 IntByIntToInt reducer) {
4851 if (transformer == null || reducer == null)
4852 throw new NullPointerException();
4853 return new MapReduceMappingsToIntTask<K,V>
4854 (map, null, -1, null, transformer, basis, reducer);
4855 }
4856
4857 /**
4858 * Returns a task that when invoked, performs the given action
4859 * for each key.
4860 *
4861 * @param map the map
4862 * @param action the action
4863 * @return the task
4864 */
4865 public static <K,V> ForkJoinTask<Void> forEachKey
4866 (ConcurrentHashMap<K,V> map,
4867 Action<K> action) {
4868 if (action == null) throw new NullPointerException();
4869 return new ForEachKeyTask<K,V>(map, null, -1, action);
4870 }
4871
4872 /**
4873 * Returns a task that when invoked, performs the given action
4874 * for each non-null transformation of each key.
4875 *
4876 * @param map the map
4877 * @param transformer a function returning the transformation
4878 * for an element, or null if there is no transformation (in
4879 * which case the action is not applied)
4880 * @param action the action
4881 * @return the task
4882 */
4883 public static <K,V,U> ForkJoinTask<Void> forEachKey
4884 (ConcurrentHashMap<K,V> map,
4885 Fun<? super K, ? extends U> transformer,
4886 Action<U> action) {
4887 if (transformer == null || action == null)
4888 throw new NullPointerException();
4889 return new ForEachTransformedKeyTask<K,V,U>
4890 (map, null, -1, transformer, action);
4891 }
4892
4893 /**
4894 * Returns a task that when invoked, returns a non-null result
4895 * from applying the given search function on each key, or
4896 * null if none. Upon success, further element processing is
4897 * suppressed and the results of any other parallel
4898 * invocations of the search function are ignored.
4899 *
4900 * @param map the map
4901 * @param searchFunction a function returning a non-null
4902 * result on success, else null
4903 * @return the task
4904 */
4905 public static <K,V,U> ForkJoinTask<U> searchKeys
4906 (ConcurrentHashMap<K,V> map,
4907 Fun<? super K, ? extends U> searchFunction) {
4908 if (searchFunction == null) throw new NullPointerException();
4909 return new SearchKeysTask<K,V,U>
4910 (map, null, -1, searchFunction,
4911 new AtomicReference<U>());
4912 }
4913
4914 /**
4915 * Returns a task that when invoked, returns the result of
4916 * accumulating all keys using the given reducer to combine
4917 * values, or null if none.
4918 *
4919 * @param map the map
4920 * @param reducer a commutative associative combining function
4921 * @return the task
4922 */
4923 public static <K,V> ForkJoinTask<K> reduceKeys
4924 (ConcurrentHashMap<K,V> map,
4925 BiFun<? super K, ? super K, ? extends K> reducer) {
4926 if (reducer == null) throw new NullPointerException();
4927 return new ReduceKeysTask<K,V>
4928 (map, null, -1, null, reducer);
4929 }
4930
4931 /**
4932 * Returns a task that when invoked, returns the result of
4933 * accumulating the given transformation of all keys using the given
4934 * reducer to combine values, or null if none.
4935 *
4936 * @param map the map
4937 * @param transformer a function returning the transformation
4938 * for an element, or null if there is no transformation (in
4939 * which case it is not combined).
4940 * @param reducer a commutative associative combining function
4941 * @return the task
4942 */
4943 public static <K,V,U> ForkJoinTask<U> reduceKeys
4944 (ConcurrentHashMap<K,V> map,
4945 Fun<? super K, ? extends U> transformer,
4946 BiFun<? super U, ? super U, ? extends U> reducer) {
4947 if (transformer == null || reducer == null)
4948 throw new NullPointerException();
4949 return new MapReduceKeysTask<K,V,U>
4950 (map, null, -1, null, transformer, reducer);
4951 }
4952
4953 /**
4954 * Returns a task that when invoked, returns the result of
4955 * accumulating the given transformation of all keys using the given
4956 * reducer to combine values, and the given basis as an
4957 * identity value.
4958 *
4959 * @param map the map
4960 * @param transformer a function returning the transformation
4961 * for an element
4962 * @param basis the identity (initial default value) for the reduction
4963 * @param reducer a commutative associative combining function
4964 * @return the task
4965 */
4966 public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4967 (ConcurrentHashMap<K,V> map,
4968 ObjectToDouble<? super K> transformer,
4969 double basis,
4970 DoubleByDoubleToDouble reducer) {
4971 if (transformer == null || reducer == null)
4972 throw new NullPointerException();
4973 return new MapReduceKeysToDoubleTask<K,V>
4974 (map, null, -1, null, transformer, basis, reducer);
4975 }
4976
4977 /**
4978 * Returns a task that when invoked, returns the result of
4979 * accumulating the given transformation of all keys using the given
4980 * reducer to combine values, and the given basis as an
4981 * identity value.
4982 *
4983 * @param map the map
4984 * @param transformer a function returning the transformation
4985 * for an element
4986 * @param basis the identity (initial default value) for the reduction
4987 * @param reducer a commutative associative combining function
4988 * @return the task
4989 */
4990 public static <K,V> ForkJoinTask<Long> reduceKeysToLong
4991 (ConcurrentHashMap<K,V> map,
4992 ObjectToLong<? super K> transformer,
4993 long basis,
4994 LongByLongToLong reducer) {
4995 if (transformer == null || reducer == null)
4996 throw new NullPointerException();
4997 return new MapReduceKeysToLongTask<K,V>
4998 (map, null, -1, null, transformer, basis, reducer);
4999 }
5000
5001 /**
5002 * Returns a task that when invoked, returns the result of
5003 * accumulating the given transformation of all keys using the given
5004 * reducer to combine values, and the given basis as an
5005 * identity value.
5006 *
5007 * @param map the map
5008 * @param transformer a function returning the transformation
5009 * for an element
5010 * @param basis the identity (initial default value) for the reduction
5011 * @param reducer a commutative associative combining function
5012 * @return the task
5013 */
5014 public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
5015 (ConcurrentHashMap<K,V> map,
5016 ObjectToInt<? super K> transformer,
5017 int basis,
5018 IntByIntToInt reducer) {
5019 if (transformer == null || reducer == null)
5020 throw new NullPointerException();
5021 return new MapReduceKeysToIntTask<K,V>
5022 (map, null, -1, null, transformer, basis, reducer);
5023 }
5024
5025 /**
5026 * Returns a task that when invoked, performs the given action
5027 * for each value.
5028 *
5029 * @param map the map
5030 * @param action the action
5031 */
5032 public static <K,V> ForkJoinTask<Void> forEachValue
5033 (ConcurrentHashMap<K,V> map,
5034 Action<V> action) {
5035 if (action == null) throw new NullPointerException();
5036 return new ForEachValueTask<K,V>(map, null, -1, action);
5037 }
5038
5039 /**
5040 * Returns a task that when invoked, performs the given action
5041 * for each non-null transformation of each value.
5042 *
5043 * @param map the map
5044 * @param transformer a function returning the transformation
5045 * for an element, or null if there is no transformation (in
5046 * which case the action is not applied)
5047 * @param action the action
5048 */
5049 public static <K,V,U> ForkJoinTask<Void> forEachValue
5050 (ConcurrentHashMap<K,V> map,
5051 Fun<? super V, ? extends U> transformer,
5052 Action<U> action) {
5053 if (transformer == null || action == null)
5054 throw new NullPointerException();
5055 return new ForEachTransformedValueTask<K,V,U>
5056 (map, null, -1, transformer, action);
5057 }
5058
5059 /**
5060 * Returns a task that when invoked, returns a non-null result
5061 * from applying the given search function on each value, or
5062 * null if none. Upon success, further element processing is
5063 * suppressed and the results of any other parallel
5064 * invocations of the search function are ignored.
5065 *
5066 * @param map the map
5067 * @param searchFunction a function returning a non-null
5068 * result on success, else null
5069 * @return the task
5070 */
5071 public static <K,V,U> ForkJoinTask<U> searchValues
5072 (ConcurrentHashMap<K,V> map,
5073 Fun<? super V, ? extends U> searchFunction) {
5074 if (searchFunction == null) throw new NullPointerException();
5075 return new SearchValuesTask<K,V,U>
5076 (map, null, -1, searchFunction,
5077 new AtomicReference<U>());
5078 }
5079
5080 /**
5081 * Returns a task that when invoked, returns the result of
5082 * accumulating all values using the given reducer to combine
5083 * values, or null if none.
5084 *
5085 * @param map the map
5086 * @param reducer a commutative associative combining function
5087 * @return the task
5088 */
5089 public static <K,V> ForkJoinTask<V> reduceValues
5090 (ConcurrentHashMap<K,V> map,
5091 BiFun<? super V, ? super V, ? extends V> reducer) {
5092 if (reducer == null) throw new NullPointerException();
5093 return new ReduceValuesTask<K,V>
5094 (map, null, -1, null, reducer);
5095 }
5096
5097 /**
5098 * Returns a task that when invoked, returns the result of
5099 * accumulating the given transformation of all values using the
5100 * given reducer to combine values, or null if none.
5101 *
5102 * @param map the map
5103 * @param transformer a function returning the transformation
5104 * for an element, or null if there is no transformation (in
5105 * which case it is not combined).
5106 * @param reducer a commutative associative combining function
5107 * @return the task
5108 */
5109 public static <K,V,U> ForkJoinTask<U> reduceValues
5110 (ConcurrentHashMap<K,V> map,
5111 Fun<? super V, ? extends U> transformer,
5112 BiFun<? super U, ? super U, ? extends U> reducer) {
5113 if (transformer == null || reducer == null)
5114 throw new NullPointerException();
5115 return new MapReduceValuesTask<K,V,U>
5116 (map, null, -1, null, transformer, reducer);
5117 }
5118
5119 /**
5120 * Returns a task that when invoked, returns the result of
5121 * accumulating the given transformation of all values using the
5122 * given reducer to combine values, and the given basis as an
5123 * identity value.
5124 *
5125 * @param map the map
5126 * @param transformer a function returning the transformation
5127 * for an element
5128 * @param basis the identity (initial default value) for the reduction
5129 * @param reducer a commutative associative combining function
5130 * @return the task
5131 */
5132 public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
5133 (ConcurrentHashMap<K,V> map,
5134 ObjectToDouble<? super V> transformer,
5135 double basis,
5136 DoubleByDoubleToDouble reducer) {
5137 if (transformer == null || reducer == null)
5138 throw new NullPointerException();
5139 return new MapReduceValuesToDoubleTask<K,V>
5140 (map, null, -1, null, transformer, basis, reducer);
5141 }
5142
5143 /**
5144 * Returns a task that when invoked, returns the result of
5145 * accumulating the given transformation of all values using the
5146 * given reducer to combine values, and the given basis as an
5147 * identity value.
5148 *
5149 * @param map the map
5150 * @param transformer a function returning the transformation
5151 * for an element
5152 * @param basis the identity (initial default value) for the reduction
5153 * @param reducer a commutative associative combining function
5154 * @return the task
5155 */
5156 public static <K,V> ForkJoinTask<Long> reduceValuesToLong
5157 (ConcurrentHashMap<K,V> map,
5158 ObjectToLong<? super V> transformer,
5159 long basis,
5160 LongByLongToLong reducer) {
5161 if (transformer == null || reducer == null)
5162 throw new NullPointerException();
5163 return new MapReduceValuesToLongTask<K,V>
5164 (map, null, -1, null, transformer, basis, reducer);
5165 }
5166
5167 /**
5168 * Returns a task that when invoked, returns the result of
5169 * accumulating the given transformation of all values using the
5170 * given reducer to combine values, and the given basis as an
5171 * identity value.
5172 *
5173 * @param map the map
5174 * @param transformer a function returning the transformation
5175 * for an element
5176 * @param basis the identity (initial default value) for the reduction
5177 * @param reducer a commutative associative combining function
5178 * @return the task
5179 */
5180 public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
5181 (ConcurrentHashMap<K,V> map,
5182 ObjectToInt<? super V> transformer,
5183 int basis,
5184 IntByIntToInt reducer) {
5185 if (transformer == null || reducer == null)
5186 throw new NullPointerException();
5187 return new MapReduceValuesToIntTask<K,V>
5188 (map, null, -1, null, transformer, basis, reducer);
5189 }
5190
5191 /**
5192 * Returns a task that when invoked, perform the given action
5193 * for each entry.
5194 *
5195 * @param map the map
5196 * @param action the action
5197 */
5198 public static <K,V> ForkJoinTask<Void> forEachEntry
5199 (ConcurrentHashMap<K,V> map,
5200 Action<Map.Entry<K,V>> action) {
5201 if (action == null) throw new NullPointerException();
5202 return new ForEachEntryTask<K,V>(map, null, -1, action);
5203 }
5204
5205 /**
5206 * Returns a task that when invoked, perform the given action
5207 * for each non-null transformation of each entry.
5208 *
5209 * @param map the map
5210 * @param transformer a function returning the transformation
5211 * for an element, or null if there is no transformation (in
5212 * which case the action is not applied)
5213 * @param action the action
5214 */
5215 public static <K,V,U> ForkJoinTask<Void> forEachEntry
5216 (ConcurrentHashMap<K,V> map,
5217 Fun<Map.Entry<K,V>, ? extends U> transformer,
5218 Action<U> action) {
5219 if (transformer == null || action == null)
5220 throw new NullPointerException();
5221 return new ForEachTransformedEntryTask<K,V,U>
5222 (map, null, -1, transformer, action);
5223 }
5224
5225 /**
5226 * Returns a task that when invoked, returns a non-null result
5227 * from applying the given search function on each entry, or
5228 * null if none. Upon success, further element processing is
5229 * suppressed and the results of any other parallel
5230 * invocations of the search function are ignored.
5231 *
5232 * @param map the map
5233 * @param searchFunction a function returning a non-null
5234 * result on success, else null
5235 * @return the task
5236 */
5237 public static <K,V,U> ForkJoinTask<U> searchEntries
5238 (ConcurrentHashMap<K,V> map,
5239 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
5240 if (searchFunction == null) throw new NullPointerException();
5241 return new SearchEntriesTask<K,V,U>
5242 (map, null, -1, searchFunction,
5243 new AtomicReference<U>());
5244 }
5245
5246 /**
5247 * Returns a task that when invoked, returns the result of
5248 * accumulating all entries using the given reducer to combine
5249 * values, or null if none.
5250 *
5251 * @param map the map
5252 * @param reducer a commutative associative combining function
5253 * @return the task
5254 */
5255 public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
5256 (ConcurrentHashMap<K,V> map,
5257 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5258 if (reducer == null) throw new NullPointerException();
5259 return new ReduceEntriesTask<K,V>
5260 (map, null, -1, null, reducer);
5261 }
5262
5263 /**
5264 * Returns a task that when invoked, returns the result of
5265 * accumulating the given transformation of all entries using the
5266 * given reducer to combine values, or null if none.
5267 *
5268 * @param map the map
5269 * @param transformer a function returning the transformation
5270 * for an element, or null if there is no transformation (in
5271 * which case it is not combined).
5272 * @param reducer a commutative associative combining function
5273 * @return the task
5274 */
5275 public static <K,V,U> ForkJoinTask<U> reduceEntries
5276 (ConcurrentHashMap<K,V> map,
5277 Fun<Map.Entry<K,V>, ? extends U> transformer,
5278 BiFun<? super U, ? super U, ? extends U> reducer) {
5279 if (transformer == null || reducer == null)
5280 throw new NullPointerException();
5281 return new MapReduceEntriesTask<K,V,U>
5282 (map, null, -1, null, transformer, reducer);
5283 }
5284
5285 /**
5286 * Returns a task that when invoked, returns the result of
5287 * accumulating the given transformation of all entries using the
5288 * given reducer to combine values, and the given basis as an
5289 * identity value.
5290 *
5291 * @param map the map
5292 * @param transformer a function returning the transformation
5293 * for an element
5294 * @param basis the identity (initial default value) for the reduction
5295 * @param reducer a commutative associative combining function
5296 * @return the task
5297 */
5298 public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
5299 (ConcurrentHashMap<K,V> map,
5300 ObjectToDouble<Map.Entry<K,V>> transformer,
5301 double basis,
5302 DoubleByDoubleToDouble reducer) {
5303 if (transformer == null || reducer == null)
5304 throw new NullPointerException();
5305 return new MapReduceEntriesToDoubleTask<K,V>
5306 (map, null, -1, null, transformer, basis, reducer);
5307 }
5308
5309 /**
5310 * Returns a task that when invoked, returns the result of
5311 * accumulating the given transformation of all entries using the
5312 * given reducer to combine values, and the given basis as an
5313 * identity value.
5314 *
5315 * @param map the map
5316 * @param transformer a function returning the transformation
5317 * for an element
5318 * @param basis the identity (initial default value) for the reduction
5319 * @param reducer a commutative associative combining function
5320 * @return the task
5321 */
5322 public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
5323 (ConcurrentHashMap<K,V> map,
5324 ObjectToLong<Map.Entry<K,V>> transformer,
5325 long basis,
5326 LongByLongToLong reducer) {
5327 if (transformer == null || reducer == null)
5328 throw new NullPointerException();
5329 return new MapReduceEntriesToLongTask<K,V>
5330 (map, null, -1, null, transformer, basis, reducer);
5331 }
5332
5333 /**
5334 * Returns a task that when invoked, returns the result of
5335 * accumulating the given transformation of all entries using the
5336 * given reducer to combine values, and the given basis as an
5337 * identity value.
5338 *
5339 * @param map the map
5340 * @param transformer a function returning the transformation
5341 * for an element
5342 * @param basis the identity (initial default value) for the reduction
5343 * @param reducer a commutative associative combining function
5344 * @return the task
5345 */
5346 public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
5347 (ConcurrentHashMap<K,V> map,
5348 ObjectToInt<Map.Entry<K,V>> transformer,
5349 int basis,
5350 IntByIntToInt reducer) {
5351 if (transformer == null || reducer == null)
5352 throw new NullPointerException();
5353 return new MapReduceEntriesToIntTask<K,V>
5354 (map, null, -1, null, transformer, basis, reducer);
5355 }
5356 }
5357
5358 // -------------------------------------------------------
5359
5360 /*
5361 * Task classes. Coded in a regular but ugly format/style to
5362 * simplify checks that each variant differs in the right way from
5363 * others.
5364 */
5365
5366 @SuppressWarnings("serial") static final class ForEachKeyTask<K,V>
5367 extends Traverser<K,V,Void> {
5368 final Action<K> action;
5369 ForEachKeyTask
5370 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5371 Action<K> action) {
5372 super(m, p, b);
5373 this.action = action;
5374 }
5375 @SuppressWarnings("unchecked") public final void compute() {
5376 final Action<K> action;
5377 if ((action = this.action) == null)
5378 throw new NullPointerException();
5379 for (int b; (b = preSplit()) > 0;)
5380 new ForEachKeyTask<K,V>(map, this, b, action).fork();
5381 while (advance() != null)
5382 action.apply((K)nextKey);
5383 propagateCompletion();
5384 }
5385 }
5386
5387 @SuppressWarnings("serial") static final class ForEachValueTask<K,V>
5388 extends Traverser<K,V,Void> {
5389 final Action<V> action;
5390 ForEachValueTask
5391 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5392 Action<V> action) {
5393 super(m, p, b);
5394 this.action = action;
5395 }
5396 @SuppressWarnings("unchecked") public final void compute() {
5397 final Action<V> action;
5398 if ((action = this.action) == null)
5399 throw new NullPointerException();
5400 for (int b; (b = preSplit()) > 0;)
5401 new ForEachValueTask<K,V>(map, this, b, action).fork();
5402 Object v;
5403 while ((v = advance()) != null)
5404 action.apply((V)v);
5405 propagateCompletion();
5406 }
5407 }
5408
5409 @SuppressWarnings("serial") static final class ForEachEntryTask<K,V>
5410 extends Traverser<K,V,Void> {
5411 final Action<Entry<K,V>> action;
5412 ForEachEntryTask
5413 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5414 Action<Entry<K,V>> action) {
5415 super(m, p, b);
5416 this.action = action;
5417 }
5418 @SuppressWarnings("unchecked") public final void compute() {
5419 final Action<Entry<K,V>> action;
5420 if ((action = this.action) == null)
5421 throw new NullPointerException();
5422 for (int b; (b = preSplit()) > 0;)
5423 new ForEachEntryTask<K,V>(map, this, b, action).fork();
5424 Object v;
5425 while ((v = advance()) != null)
5426 action.apply(entryFor((K)nextKey, (V)v));
5427 propagateCompletion();
5428 }
5429 }
5430
5431 @SuppressWarnings("serial") static final class ForEachMappingTask<K,V>
5432 extends Traverser<K,V,Void> {
5433 final BiAction<K,V> action;
5434 ForEachMappingTask
5435 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5436 BiAction<K,V> action) {
5437 super(m, p, b);
5438 this.action = action;
5439 }
5440 @SuppressWarnings("unchecked") public final void compute() {
5441 final BiAction<K,V> action;
5442 if ((action = this.action) == null)
5443 throw new NullPointerException();
5444 for (int b; (b = preSplit()) > 0;)
5445 new ForEachMappingTask<K,V>(map, this, b, action).fork();
5446 Object v;
5447 while ((v = advance()) != null)
5448 action.apply((K)nextKey, (V)v);
5449 propagateCompletion();
5450 }
5451 }
5452
5453 @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U>
5454 extends Traverser<K,V,Void> {
5455 final Fun<? super K, ? extends U> transformer;
5456 final Action<U> action;
5457 ForEachTransformedKeyTask
5458 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5459 Fun<? super K, ? extends U> transformer, Action<U> action) {
5460 super(m, p, b);
5461 this.transformer = transformer; this.action = action;
5462 }
5463 @SuppressWarnings("unchecked") public final void compute() {
5464 final Fun<? super K, ? extends U> transformer;
5465 final Action<U> action;
5466 if ((transformer = this.transformer) == null ||
5467 (action = this.action) == null)
5468 throw new NullPointerException();
5469 for (int b; (b = preSplit()) > 0;)
5470 new ForEachTransformedKeyTask<K,V,U>
5471 (map, this, b, transformer, action).fork();
5472 U u;
5473 while (advance() != null) {
5474 if ((u = transformer.apply((K)nextKey)) != null)
5475 action.apply(u);
5476 }
5477 propagateCompletion();
5478 }
5479 }
5480
5481 @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U>
5482 extends Traverser<K,V,Void> {
5483 final Fun<? super V, ? extends U> transformer;
5484 final Action<U> action;
5485 ForEachTransformedValueTask
5486 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5487 Fun<? super V, ? extends U> transformer, Action<U> action) {
5488 super(m, p, b);
5489 this.transformer = transformer; this.action = action;
5490 }
5491 @SuppressWarnings("unchecked") public final void compute() {
5492 final Fun<? super V, ? extends U> transformer;
5493 final Action<U> action;
5494 if ((transformer = this.transformer) == null ||
5495 (action = this.action) == null)
5496 throw new NullPointerException();
5497 for (int b; (b = preSplit()) > 0;)
5498 new ForEachTransformedValueTask<K,V,U>
5499 (map, this, b, transformer, action).fork();
5500 Object v; U u;
5501 while ((v = advance()) != null) {
5502 if ((u = transformer.apply((V)v)) != null)
5503 action.apply(u);
5504 }
5505 propagateCompletion();
5506 }
5507 }
5508
5509 @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U>
5510 extends Traverser<K,V,Void> {
5511 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5512 final Action<U> action;
5513 ForEachTransformedEntryTask
5514 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5515 Fun<Map.Entry<K,V>, ? extends U> transformer, Action<U> action) {
5516 super(m, p, b);
5517 this.transformer = transformer; this.action = action;
5518 }
5519 @SuppressWarnings("unchecked") public final void compute() {
5520 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5521 final Action<U> action;
5522 if ((transformer = this.transformer) == null ||
5523 (action = this.action) == null)
5524 throw new NullPointerException();
5525 for (int b; (b = preSplit()) > 0;)
5526 new ForEachTransformedEntryTask<K,V,U>
5527 (map, this, b, transformer, action).fork();
5528 Object v; U u;
5529 while ((v = advance()) != null) {
5530 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5531 action.apply(u);
5532 }
5533 propagateCompletion();
5534 }
5535 }
5536
5537 @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U>
5538 extends Traverser<K,V,Void> {
5539 final BiFun<? super K, ? super V, ? extends U> transformer;
5540 final Action<U> action;
5541 ForEachTransformedMappingTask
5542 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5543 BiFun<? super K, ? super V, ? extends U> transformer,
5544 Action<U> action) {
5545 super(m, p, b);
5546 this.transformer = transformer; this.action = action;
5547 }
5548 @SuppressWarnings("unchecked") public final void compute() {
5549 final BiFun<? super K, ? super V, ? extends U> transformer;
5550 final Action<U> action;
5551 if ((transformer = this.transformer) == null ||
5552 (action = this.action) == null)
5553 throw new NullPointerException();
5554 for (int b; (b = preSplit()) > 0;)
5555 new ForEachTransformedMappingTask<K,V,U>
5556 (map, this, b, transformer, action).fork();
5557 Object v; U u;
5558 while ((v = advance()) != null) {
5559 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5560 action.apply(u);
5561 }
5562 propagateCompletion();
5563 }
5564 }
5565
5566 @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U>
5567 extends Traverser<K,V,U> {
5568 final Fun<? super K, ? extends U> searchFunction;
5569 final AtomicReference<U> result;
5570 SearchKeysTask
5571 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5572 Fun<? super K, ? extends U> searchFunction,
5573 AtomicReference<U> result) {
5574 super(m, p, b);
5575 this.searchFunction = searchFunction; this.result = result;
5576 }
5577 public final U getRawResult() { return result.get(); }
5578 @SuppressWarnings("unchecked") public final void compute() {
5579 final Fun<? super K, ? extends U> searchFunction;
5580 final AtomicReference<U> result;
5581 if ((searchFunction = this.searchFunction) == null ||
5582 (result = this.result) == null)
5583 throw new NullPointerException();
5584 for (int b;;) {
5585 if (result.get() != null)
5586 return;
5587 if ((b = preSplit()) <= 0)
5588 break;
5589 new SearchKeysTask<K,V,U>
5590 (map, this, b, searchFunction, result).fork();
5591 }
5592 while (result.get() == null) {
5593 U u;
5594 if (advance() == null) {
5595 propagateCompletion();
5596 break;
5597 }
5598 if ((u = searchFunction.apply((K)nextKey)) != null) {
5599 if (result.compareAndSet(null, u))
5600 quietlyCompleteRoot();
5601 break;
5602 }
5603 }
5604 }
5605 }
5606
5607 @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U>
5608 extends Traverser<K,V,U> {
5609 final Fun<? super V, ? extends U> searchFunction;
5610 final AtomicReference<U> result;
5611 SearchValuesTask
5612 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5613 Fun<? super V, ? extends U> searchFunction,
5614 AtomicReference<U> result) {
5615 super(m, p, b);
5616 this.searchFunction = searchFunction; this.result = result;
5617 }
5618 public final U getRawResult() { return result.get(); }
5619 @SuppressWarnings("unchecked") public final void compute() {
5620 final Fun<? super V, ? extends U> searchFunction;
5621 final AtomicReference<U> result;
5622 if ((searchFunction = this.searchFunction) == null ||
5623 (result = this.result) == null)
5624 throw new NullPointerException();
5625 for (int b;;) {
5626 if (result.get() != null)
5627 return;
5628 if ((b = preSplit()) <= 0)
5629 break;
5630 new SearchValuesTask<K,V,U>
5631 (map, this, b, searchFunction, result).fork();
5632 }
5633 while (result.get() == null) {
5634 Object v; U u;
5635 if ((v = advance()) == null) {
5636 propagateCompletion();
5637 break;
5638 }
5639 if ((u = searchFunction.apply((V)v)) != null) {
5640 if (result.compareAndSet(null, u))
5641 quietlyCompleteRoot();
5642 break;
5643 }
5644 }
5645 }
5646 }
5647
5648 @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U>
5649 extends Traverser<K,V,U> {
5650 final Fun<Entry<K,V>, ? extends U> searchFunction;
5651 final AtomicReference<U> result;
5652 SearchEntriesTask
5653 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5654 Fun<Entry<K,V>, ? extends U> searchFunction,
5655 AtomicReference<U> result) {
5656 super(m, p, b);
5657 this.searchFunction = searchFunction; this.result = result;
5658 }
5659 public final U getRawResult() { return result.get(); }
5660 @SuppressWarnings("unchecked") public final void compute() {
5661 final Fun<Entry<K,V>, ? extends U> searchFunction;
5662 final AtomicReference<U> result;
5663 if ((searchFunction = this.searchFunction) == null ||
5664 (result = this.result) == null)
5665 throw new NullPointerException();
5666 for (int b;;) {
5667 if (result.get() != null)
5668 return;
5669 if ((b = preSplit()) <= 0)
5670 break;
5671 new SearchEntriesTask<K,V,U>
5672 (map, this, b, searchFunction, result).fork();
5673 }
5674 while (result.get() == null) {
5675 Object v; U u;
5676 if ((v = advance()) == null) {
5677 propagateCompletion();
5678 break;
5679 }
5680 if ((u = searchFunction.apply(entryFor((K)nextKey, (V)v))) != null) {
5681 if (result.compareAndSet(null, u))
5682 quietlyCompleteRoot();
5683 return;
5684 }
5685 }
5686 }
5687 }
5688
5689 @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U>
5690 extends Traverser<K,V,U> {
5691 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5692 final AtomicReference<U> result;
5693 SearchMappingsTask
5694 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5695 BiFun<? super K, ? super V, ? extends U> searchFunction,
5696 AtomicReference<U> result) {
5697 super(m, p, b);
5698 this.searchFunction = searchFunction; this.result = result;
5699 }
5700 public final U getRawResult() { return result.get(); }
5701 @SuppressWarnings("unchecked") public final void compute() {
5702 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5703 final AtomicReference<U> result;
5704 if ((searchFunction = this.searchFunction) == null ||
5705 (result = this.result) == null)
5706 throw new NullPointerException();
5707 for (int b;;) {
5708 if (result.get() != null)
5709 return;
5710 if ((b = preSplit()) <= 0)
5711 break;
5712 new SearchMappingsTask<K,V,U>
5713 (map, this, b, searchFunction, result).fork();
5714 }
5715 while (result.get() == null) {
5716 Object v; U u;
5717 if ((v = advance()) == null) {
5718 propagateCompletion();
5719 break;
5720 }
5721 if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5722 if (result.compareAndSet(null, u))
5723 quietlyCompleteRoot();
5724 break;
5725 }
5726 }
5727 }
5728 }
5729
5730 @SuppressWarnings("serial") static final class ReduceKeysTask<K,V>
5731 extends Traverser<K,V,K> {
5732 final BiFun<? super K, ? super K, ? extends K> reducer;
5733 K result;
5734 ReduceKeysTask<K,V> rights, nextRight;
5735 ReduceKeysTask
5736 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5737 ReduceKeysTask<K,V> nextRight,
5738 BiFun<? super K, ? super K, ? extends K> reducer) {
5739 super(m, p, b); this.nextRight = nextRight;
5740 this.reducer = reducer;
5741 }
5742 public final K getRawResult() { return result; }
5743 @SuppressWarnings("unchecked") public final void compute() {
5744 final BiFun<? super K, ? super K, ? extends K> reducer =
5745 this.reducer;
5746 if (reducer == null)
5747 throw new NullPointerException();
5748 for (int b; (b = preSplit()) > 0;)
5749 (rights = new ReduceKeysTask<K,V>
5750 (map, this, b, rights, reducer)).fork();
5751 K r = null;
5752 while (advance() != null) {
5753 K u = (K)nextKey;
5754 r = (r == null) ? u : reducer.apply(r, u);
5755 }
5756 result = r;
5757 CountedCompleter<?> c;
5758 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5759 ReduceKeysTask<K,V>
5760 t = (ReduceKeysTask<K,V>)c,
5761 s = t.rights;
5762 while (s != null) {
5763 K tr, sr;
5764 if ((sr = s.result) != null)
5765 t.result = (((tr = t.result) == null) ? sr :
5766 reducer.apply(tr, sr));
5767 s = t.rights = s.nextRight;
5768 }
5769 }
5770 }
5771 }
5772
5773 @SuppressWarnings("serial") static final class ReduceValuesTask<K,V>
5774 extends Traverser<K,V,V> {
5775 final BiFun<? super V, ? super V, ? extends V> reducer;
5776 V result;
5777 ReduceValuesTask<K,V> rights, nextRight;
5778 ReduceValuesTask
5779 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5780 ReduceValuesTask<K,V> nextRight,
5781 BiFun<? super V, ? super V, ? extends V> reducer) {
5782 super(m, p, b); this.nextRight = nextRight;
5783 this.reducer = reducer;
5784 }
5785 public final V getRawResult() { return result; }
5786 @SuppressWarnings("unchecked") public final void compute() {
5787 final BiFun<? super V, ? super V, ? extends V> reducer =
5788 this.reducer;
5789 if (reducer == null)
5790 throw new NullPointerException();
5791 for (int b; (b = preSplit()) > 0;)
5792 (rights = new ReduceValuesTask<K,V>
5793 (map, this, b, rights, reducer)).fork();
5794 V r = null;
5795 Object v;
5796 while ((v = advance()) != null) {
5797 V u = (V)v;
5798 r = (r == null) ? u : reducer.apply(r, u);
5799 }
5800 result = r;
5801 CountedCompleter<?> c;
5802 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5803 ReduceValuesTask<K,V>
5804 t = (ReduceValuesTask<K,V>)c,
5805 s = t.rights;
5806 while (s != null) {
5807 V tr, sr;
5808 if ((sr = s.result) != null)
5809 t.result = (((tr = t.result) == null) ? sr :
5810 reducer.apply(tr, sr));
5811 s = t.rights = s.nextRight;
5812 }
5813 }
5814 }
5815 }
5816
5817 @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V>
5818 extends Traverser<K,V,Map.Entry<K,V>> {
5819 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5820 Map.Entry<K,V> result;
5821 ReduceEntriesTask<K,V> rights, nextRight;
5822 ReduceEntriesTask
5823 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5824 ReduceEntriesTask<K,V> nextRight,
5825 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5826 super(m, p, b); this.nextRight = nextRight;
5827 this.reducer = reducer;
5828 }
5829 public final Map.Entry<K,V> getRawResult() { return result; }
5830 @SuppressWarnings("unchecked") public final void compute() {
5831 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer =
5832 this.reducer;
5833 if (reducer == null)
5834 throw new NullPointerException();
5835 for (int b; (b = preSplit()) > 0;)
5836 (rights = new ReduceEntriesTask<K,V>
5837 (map, this, b, rights, reducer)).fork();
5838 Map.Entry<K,V> r = null;
5839 Object v;
5840 while ((v = advance()) != null) {
5841 Map.Entry<K,V> u = entryFor((K)nextKey, (V)v);
5842 r = (r == null) ? u : reducer.apply(r, u);
5843 }
5844 result = r;
5845 CountedCompleter<?> c;
5846 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5847 ReduceEntriesTask<K,V>
5848 t = (ReduceEntriesTask<K,V>)c,
5849 s = t.rights;
5850 while (s != null) {
5851 Map.Entry<K,V> tr, sr;
5852 if ((sr = s.result) != null)
5853 t.result = (((tr = t.result) == null) ? sr :
5854 reducer.apply(tr, sr));
5855 s = t.rights = s.nextRight;
5856 }
5857 }
5858 }
5859 }
5860
5861 @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U>
5862 extends Traverser<K,V,U> {
5863 final Fun<? super K, ? extends U> transformer;
5864 final BiFun<? super U, ? super U, ? extends U> reducer;
5865 U result;
5866 MapReduceKeysTask<K,V,U> rights, nextRight;
5867 MapReduceKeysTask
5868 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5869 MapReduceKeysTask<K,V,U> nextRight,
5870 Fun<? super K, ? extends U> transformer,
5871 BiFun<? super U, ? super U, ? extends U> reducer) {
5872 super(m, p, b); this.nextRight = nextRight;
5873 this.transformer = transformer;
5874 this.reducer = reducer;
5875 }
5876 public final U getRawResult() { return result; }
5877 @SuppressWarnings("unchecked") public final void compute() {
5878 final Fun<? super K, ? extends U> transformer =
5879 this.transformer;
5880 final BiFun<? super U, ? super U, ? extends U> reducer =
5881 this.reducer;
5882 if (transformer == null || reducer == null)
5883 throw new NullPointerException();
5884 for (int b; (b = preSplit()) > 0;)
5885 (rights = new MapReduceKeysTask<K,V,U>
5886 (map, this, b, rights, transformer, reducer)).fork();
5887 U r = null, u;
5888 while (advance() != null) {
5889 if ((u = transformer.apply((K)nextKey)) != null)
5890 r = (r == null) ? u : reducer.apply(r, u);
5891 }
5892 result = r;
5893 CountedCompleter<?> c;
5894 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5895 MapReduceKeysTask<K,V,U>
5896 t = (MapReduceKeysTask<K,V,U>)c,
5897 s = t.rights;
5898 while (s != null) {
5899 U tr, sr;
5900 if ((sr = s.result) != null)
5901 t.result = (((tr = t.result) == null) ? sr :
5902 reducer.apply(tr, sr));
5903 s = t.rights = s.nextRight;
5904 }
5905 }
5906 }
5907 }
5908
5909 @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U>
5910 extends Traverser<K,V,U> {
5911 final Fun<? super V, ? extends U> transformer;
5912 final BiFun<? super U, ? super U, ? extends U> reducer;
5913 U result;
5914 MapReduceValuesTask<K,V,U> rights, nextRight;
5915 MapReduceValuesTask
5916 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5917 MapReduceValuesTask<K,V,U> nextRight,
5918 Fun<? super V, ? extends U> transformer,
5919 BiFun<? super U, ? super U, ? extends U> reducer) {
5920 super(m, p, b); this.nextRight = nextRight;
5921 this.transformer = transformer;
5922 this.reducer = reducer;
5923 }
5924 public final U getRawResult() { return result; }
5925 @SuppressWarnings("unchecked") public final void compute() {
5926 final Fun<? super V, ? extends U> transformer =
5927 this.transformer;
5928 final BiFun<? super U, ? super U, ? extends U> reducer =
5929 this.reducer;
5930 if (transformer == null || reducer == null)
5931 throw new NullPointerException();
5932 for (int b; (b = preSplit()) > 0;)
5933 (rights = new MapReduceValuesTask<K,V,U>
5934 (map, this, b, rights, transformer, reducer)).fork();
5935 U r = null, u;
5936 Object v;
5937 while ((v = advance()) != null) {
5938 if ((u = transformer.apply((V)v)) != null)
5939 r = (r == null) ? u : reducer.apply(r, u);
5940 }
5941 result = r;
5942 CountedCompleter<?> c;
5943 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5944 MapReduceValuesTask<K,V,U>
5945 t = (MapReduceValuesTask<K,V,U>)c,
5946 s = t.rights;
5947 while (s != null) {
5948 U tr, sr;
5949 if ((sr = s.result) != null)
5950 t.result = (((tr = t.result) == null) ? sr :
5951 reducer.apply(tr, sr));
5952 s = t.rights = s.nextRight;
5953 }
5954 }
5955 }
5956 }
5957
5958 @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U>
5959 extends Traverser<K,V,U> {
5960 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5961 final BiFun<? super U, ? super U, ? extends U> reducer;
5962 U result;
5963 MapReduceEntriesTask<K,V,U> rights, nextRight;
5964 MapReduceEntriesTask
5965 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5966 MapReduceEntriesTask<K,V,U> nextRight,
5967 Fun<Map.Entry<K,V>, ? extends U> transformer,
5968 BiFun<? super U, ? super U, ? extends U> reducer) {
5969 super(m, p, b); this.nextRight = nextRight;
5970 this.transformer = transformer;
5971 this.reducer = reducer;
5972 }
5973 public final U getRawResult() { return result; }
5974 @SuppressWarnings("unchecked") public final void compute() {
5975 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5976 this.transformer;
5977 final BiFun<? super U, ? super U, ? extends U> reducer =
5978 this.reducer;
5979 if (transformer == null || reducer == null)
5980 throw new NullPointerException();
5981 for (int b; (b = preSplit()) > 0;)
5982 (rights = new MapReduceEntriesTask<K,V,U>
5983 (map, this, b, rights, transformer, reducer)).fork();
5984 U r = null, u;
5985 Object v;
5986 while ((v = advance()) != null) {
5987 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5988 r = (r == null) ? u : reducer.apply(r, u);
5989 }
5990 result = r;
5991 CountedCompleter<?> c;
5992 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5993 MapReduceEntriesTask<K,V,U>
5994 t = (MapReduceEntriesTask<K,V,U>)c,
5995 s = t.rights;
5996 while (s != null) {
5997 U tr, sr;
5998 if ((sr = s.result) != null)
5999 t.result = (((tr = t.result) == null) ? sr :
6000 reducer.apply(tr, sr));
6001 s = t.rights = s.nextRight;
6002 }
6003 }
6004 }
6005 }
6006
6007 @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U>
6008 extends Traverser<K,V,U> {
6009 final BiFun<? super K, ? super V, ? extends U> transformer;
6010 final BiFun<? super U, ? super U, ? extends U> reducer;
6011 U result;
6012 MapReduceMappingsTask<K,V,U> rights, nextRight;
6013 MapReduceMappingsTask
6014 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6015 MapReduceMappingsTask<K,V,U> nextRight,
6016 BiFun<? super K, ? super V, ? extends U> transformer,
6017 BiFun<? super U, ? super U, ? extends U> reducer) {
6018 super(m, p, b); this.nextRight = nextRight;
6019 this.transformer = transformer;
6020 this.reducer = reducer;
6021 }
6022 public final U getRawResult() { return result; }
6023 @SuppressWarnings("unchecked") public final void compute() {
6024 final BiFun<? super K, ? super V, ? extends U> transformer =
6025 this.transformer;
6026 final BiFun<? super U, ? super U, ? extends U> reducer =
6027 this.reducer;
6028 if (transformer == null || reducer == null)
6029 throw new NullPointerException();
6030 for (int b; (b = preSplit()) > 0;)
6031 (rights = new MapReduceMappingsTask<K,V,U>
6032 (map, this, b, rights, transformer, reducer)).fork();
6033 U r = null, u;
6034 Object v;
6035 while ((v = advance()) != null) {
6036 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
6037 r = (r == null) ? u : reducer.apply(r, u);
6038 }
6039 result = r;
6040 CountedCompleter<?> c;
6041 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6042 MapReduceMappingsTask<K,V,U>
6043 t = (MapReduceMappingsTask<K,V,U>)c,
6044 s = t.rights;
6045 while (s != null) {
6046 U tr, sr;
6047 if ((sr = s.result) != null)
6048 t.result = (((tr = t.result) == null) ? sr :
6049 reducer.apply(tr, sr));
6050 s = t.rights = s.nextRight;
6051 }
6052 }
6053 }
6054 }
6055
6056 @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V>
6057 extends Traverser<K,V,Double> {
6058 final ObjectToDouble<? super K> transformer;
6059 final DoubleByDoubleToDouble reducer;
6060 final double basis;
6061 double result;
6062 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
6063 MapReduceKeysToDoubleTask
6064 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6065 MapReduceKeysToDoubleTask<K,V> nextRight,
6066 ObjectToDouble<? super K> transformer,
6067 double basis,
6068 DoubleByDoubleToDouble reducer) {
6069 super(m, p, b); this.nextRight = nextRight;
6070 this.transformer = transformer;
6071 this.basis = basis; this.reducer = reducer;
6072 }
6073 public final Double getRawResult() { return result; }
6074 @SuppressWarnings("unchecked") public final void compute() {
6075 final ObjectToDouble<? super K> transformer =
6076 this.transformer;
6077 final DoubleByDoubleToDouble reducer = this.reducer;
6078 if (transformer == null || reducer == null)
6079 throw new NullPointerException();
6080 double r = this.basis;
6081 for (int b; (b = preSplit()) > 0;)
6082 (rights = new MapReduceKeysToDoubleTask<K,V>
6083 (map, this, b, rights, transformer, r, reducer)).fork();
6084 while (advance() != null)
6085 r = reducer.apply(r, transformer.apply((K)nextKey));
6086 result = r;
6087 CountedCompleter<?> c;
6088 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6089 MapReduceKeysToDoubleTask<K,V>
6090 t = (MapReduceKeysToDoubleTask<K,V>)c,
6091 s = t.rights;
6092 while (s != null) {
6093 t.result = reducer.apply(t.result, s.result);
6094 s = t.rights = s.nextRight;
6095 }
6096 }
6097 }
6098 }
6099
6100 @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V>
6101 extends Traverser<K,V,Double> {
6102 final ObjectToDouble<? super V> transformer;
6103 final DoubleByDoubleToDouble reducer;
6104 final double basis;
6105 double result;
6106 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
6107 MapReduceValuesToDoubleTask
6108 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6109 MapReduceValuesToDoubleTask<K,V> nextRight,
6110 ObjectToDouble<? super V> transformer,
6111 double basis,
6112 DoubleByDoubleToDouble reducer) {
6113 super(m, p, b); this.nextRight = nextRight;
6114 this.transformer = transformer;
6115 this.basis = basis; this.reducer = reducer;
6116 }
6117 public final Double getRawResult() { return result; }
6118 @SuppressWarnings("unchecked") public final void compute() {
6119 final ObjectToDouble<? super V> transformer =
6120 this.transformer;
6121 final DoubleByDoubleToDouble reducer = this.reducer;
6122 if (transformer == null || reducer == null)
6123 throw new NullPointerException();
6124 double r = this.basis;
6125 for (int b; (b = preSplit()) > 0;)
6126 (rights = new MapReduceValuesToDoubleTask<K,V>
6127 (map, this, b, rights, transformer, r, reducer)).fork();
6128 Object v;
6129 while ((v = advance()) != null)
6130 r = reducer.apply(r, transformer.apply((V)v));
6131 result = r;
6132 CountedCompleter<?> c;
6133 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6134 MapReduceValuesToDoubleTask<K,V>
6135 t = (MapReduceValuesToDoubleTask<K,V>)c,
6136 s = t.rights;
6137 while (s != null) {
6138 t.result = reducer.apply(t.result, s.result);
6139 s = t.rights = s.nextRight;
6140 }
6141 }
6142 }
6143 }
6144
6145 @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V>
6146 extends Traverser<K,V,Double> {
6147 final ObjectToDouble<Map.Entry<K,V>> transformer;
6148 final DoubleByDoubleToDouble reducer;
6149 final double basis;
6150 double result;
6151 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
6152 MapReduceEntriesToDoubleTask
6153 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6154 MapReduceEntriesToDoubleTask<K,V> nextRight,
6155 ObjectToDouble<Map.Entry<K,V>> transformer,
6156 double basis,
6157 DoubleByDoubleToDouble reducer) {
6158 super(m, p, b); this.nextRight = nextRight;
6159 this.transformer = transformer;
6160 this.basis = basis; this.reducer = reducer;
6161 }
6162 public final Double getRawResult() { return result; }
6163 @SuppressWarnings("unchecked") public final void compute() {
6164 final ObjectToDouble<Map.Entry<K,V>> transformer =
6165 this.transformer;
6166 final DoubleByDoubleToDouble reducer = this.reducer;
6167 if (transformer == null || reducer == null)
6168 throw new NullPointerException();
6169 double r = this.basis;
6170 for (int b; (b = preSplit()) > 0;)
6171 (rights = new MapReduceEntriesToDoubleTask<K,V>
6172 (map, this, b, rights, transformer, r, reducer)).fork();
6173 Object v;
6174 while ((v = advance()) != null)
6175 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6176 result = r;
6177 CountedCompleter<?> c;
6178 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6179 MapReduceEntriesToDoubleTask<K,V>
6180 t = (MapReduceEntriesToDoubleTask<K,V>)c,
6181 s = t.rights;
6182 while (s != null) {
6183 t.result = reducer.apply(t.result, s.result);
6184 s = t.rights = s.nextRight;
6185 }
6186 }
6187 }
6188 }
6189
6190 @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V>
6191 extends Traverser<K,V,Double> {
6192 final ObjectByObjectToDouble<? super K, ? super V> transformer;
6193 final DoubleByDoubleToDouble reducer;
6194 final double basis;
6195 double result;
6196 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
6197 MapReduceMappingsToDoubleTask
6198 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6199 MapReduceMappingsToDoubleTask<K,V> nextRight,
6200 ObjectByObjectToDouble<? super K, ? super V> transformer,
6201 double basis,
6202 DoubleByDoubleToDouble reducer) {
6203 super(m, p, b); this.nextRight = nextRight;
6204 this.transformer = transformer;
6205 this.basis = basis; this.reducer = reducer;
6206 }
6207 public final Double getRawResult() { return result; }
6208 @SuppressWarnings("unchecked") public final void compute() {
6209 final ObjectByObjectToDouble<? super K, ? super V> transformer =
6210 this.transformer;
6211 final DoubleByDoubleToDouble reducer = this.reducer;
6212 if (transformer == null || reducer == null)
6213 throw new NullPointerException();
6214 double r = this.basis;
6215 for (int b; (b = preSplit()) > 0;)
6216 (rights = new MapReduceMappingsToDoubleTask<K,V>
6217 (map, this, b, rights, transformer, r, reducer)).fork();
6218 Object v;
6219 while ((v = advance()) != null)
6220 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6221 result = r;
6222 CountedCompleter<?> c;
6223 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6224 MapReduceMappingsToDoubleTask<K,V>
6225 t = (MapReduceMappingsToDoubleTask<K,V>)c,
6226 s = t.rights;
6227 while (s != null) {
6228 t.result = reducer.apply(t.result, s.result);
6229 s = t.rights = s.nextRight;
6230 }
6231 }
6232 }
6233 }
6234
6235 @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V>
6236 extends Traverser<K,V,Long> {
6237 final ObjectToLong<? super K> transformer;
6238 final LongByLongToLong reducer;
6239 final long basis;
6240 long result;
6241 MapReduceKeysToLongTask<K,V> rights, nextRight;
6242 MapReduceKeysToLongTask
6243 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6244 MapReduceKeysToLongTask<K,V> nextRight,
6245 ObjectToLong<? super K> transformer,
6246 long basis,
6247 LongByLongToLong reducer) {
6248 super(m, p, b); this.nextRight = nextRight;
6249 this.transformer = transformer;
6250 this.basis = basis; this.reducer = reducer;
6251 }
6252 public final Long getRawResult() { return result; }
6253 @SuppressWarnings("unchecked") public final void compute() {
6254 final ObjectToLong<? super K> transformer =
6255 this.transformer;
6256 final LongByLongToLong reducer = this.reducer;
6257 if (transformer == null || reducer == null)
6258 throw new NullPointerException();
6259 long r = this.basis;
6260 for (int b; (b = preSplit()) > 0;)
6261 (rights = new MapReduceKeysToLongTask<K,V>
6262 (map, this, b, rights, transformer, r, reducer)).fork();
6263 while (advance() != null)
6264 r = reducer.apply(r, transformer.apply((K)nextKey));
6265 result = r;
6266 CountedCompleter<?> c;
6267 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6268 MapReduceKeysToLongTask<K,V>
6269 t = (MapReduceKeysToLongTask<K,V>)c,
6270 s = t.rights;
6271 while (s != null) {
6272 t.result = reducer.apply(t.result, s.result);
6273 s = t.rights = s.nextRight;
6274 }
6275 }
6276 }
6277 }
6278
6279 @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V>
6280 extends Traverser<K,V,Long> {
6281 final ObjectToLong<? super V> transformer;
6282 final LongByLongToLong reducer;
6283 final long basis;
6284 long result;
6285 MapReduceValuesToLongTask<K,V> rights, nextRight;
6286 MapReduceValuesToLongTask
6287 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6288 MapReduceValuesToLongTask<K,V> nextRight,
6289 ObjectToLong<? super V> transformer,
6290 long basis,
6291 LongByLongToLong reducer) {
6292 super(m, p, b); this.nextRight = nextRight;
6293 this.transformer = transformer;
6294 this.basis = basis; this.reducer = reducer;
6295 }
6296 public final Long getRawResult() { return result; }
6297 @SuppressWarnings("unchecked") public final void compute() {
6298 final ObjectToLong<? super V> transformer =
6299 this.transformer;
6300 final LongByLongToLong reducer = this.reducer;
6301 if (transformer == null || reducer == null)
6302 throw new NullPointerException();
6303 long r = this.basis;
6304 for (int b; (b = preSplit()) > 0;)
6305 (rights = new MapReduceValuesToLongTask<K,V>
6306 (map, this, b, rights, transformer, r, reducer)).fork();
6307 Object v;
6308 while ((v = advance()) != null)
6309 r = reducer.apply(r, transformer.apply((V)v));
6310 result = r;
6311 CountedCompleter<?> c;
6312 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6313 MapReduceValuesToLongTask<K,V>
6314 t = (MapReduceValuesToLongTask<K,V>)c,
6315 s = t.rights;
6316 while (s != null) {
6317 t.result = reducer.apply(t.result, s.result);
6318 s = t.rights = s.nextRight;
6319 }
6320 }
6321 }
6322 }
6323
6324 @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V>
6325 extends Traverser<K,V,Long> {
6326 final ObjectToLong<Map.Entry<K,V>> transformer;
6327 final LongByLongToLong reducer;
6328 final long basis;
6329 long result;
6330 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6331 MapReduceEntriesToLongTask
6332 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6333 MapReduceEntriesToLongTask<K,V> nextRight,
6334 ObjectToLong<Map.Entry<K,V>> transformer,
6335 long basis,
6336 LongByLongToLong reducer) {
6337 super(m, p, b); this.nextRight = nextRight;
6338 this.transformer = transformer;
6339 this.basis = basis; this.reducer = reducer;
6340 }
6341 public final Long getRawResult() { return result; }
6342 @SuppressWarnings("unchecked") public final void compute() {
6343 final ObjectToLong<Map.Entry<K,V>> transformer =
6344 this.transformer;
6345 final LongByLongToLong reducer = this.reducer;
6346 if (transformer == null || reducer == null)
6347 throw new NullPointerException();
6348 long r = this.basis;
6349 for (int b; (b = preSplit()) > 0;)
6350 (rights = new MapReduceEntriesToLongTask<K,V>
6351 (map, this, b, rights, transformer, r, reducer)).fork();
6352 Object v;
6353 while ((v = advance()) != null)
6354 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6355 result = r;
6356 CountedCompleter<?> c;
6357 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6358 MapReduceEntriesToLongTask<K,V>
6359 t = (MapReduceEntriesToLongTask<K,V>)c,
6360 s = t.rights;
6361 while (s != null) {
6362 t.result = reducer.apply(t.result, s.result);
6363 s = t.rights = s.nextRight;
6364 }
6365 }
6366 }
6367 }
6368
6369 @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V>
6370 extends Traverser<K,V,Long> {
6371 final ObjectByObjectToLong<? super K, ? super V> transformer;
6372 final LongByLongToLong reducer;
6373 final long basis;
6374 long result;
6375 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6376 MapReduceMappingsToLongTask
6377 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6378 MapReduceMappingsToLongTask<K,V> nextRight,
6379 ObjectByObjectToLong<? super K, ? super V> transformer,
6380 long basis,
6381 LongByLongToLong reducer) {
6382 super(m, p, b); this.nextRight = nextRight;
6383 this.transformer = transformer;
6384 this.basis = basis; this.reducer = reducer;
6385 }
6386 public final Long getRawResult() { return result; }
6387 @SuppressWarnings("unchecked") public final void compute() {
6388 final ObjectByObjectToLong<? super K, ? super V> transformer =
6389 this.transformer;
6390 final LongByLongToLong reducer = this.reducer;
6391 if (transformer == null || reducer == null)
6392 throw new NullPointerException();
6393 long r = this.basis;
6394 for (int b; (b = preSplit()) > 0;)
6395 (rights = new MapReduceMappingsToLongTask<K,V>
6396 (map, this, b, rights, transformer, r, reducer)).fork();
6397 Object v;
6398 while ((v = advance()) != null)
6399 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6400 result = r;
6401 CountedCompleter<?> c;
6402 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6403 MapReduceMappingsToLongTask<K,V>
6404 t = (MapReduceMappingsToLongTask<K,V>)c,
6405 s = t.rights;
6406 while (s != null) {
6407 t.result = reducer.apply(t.result, s.result);
6408 s = t.rights = s.nextRight;
6409 }
6410 }
6411 }
6412 }
6413
6414 @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V>
6415 extends Traverser<K,V,Integer> {
6416 final ObjectToInt<? super K> transformer;
6417 final IntByIntToInt reducer;
6418 final int basis;
6419 int result;
6420 MapReduceKeysToIntTask<K,V> rights, nextRight;
6421 MapReduceKeysToIntTask
6422 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6423 MapReduceKeysToIntTask<K,V> nextRight,
6424 ObjectToInt<? super K> transformer,
6425 int basis,
6426 IntByIntToInt reducer) {
6427 super(m, p, b); this.nextRight = nextRight;
6428 this.transformer = transformer;
6429 this.basis = basis; this.reducer = reducer;
6430 }
6431 public final Integer getRawResult() { return result; }
6432 @SuppressWarnings("unchecked") public final void compute() {
6433 final ObjectToInt<? super K> transformer =
6434 this.transformer;
6435 final IntByIntToInt reducer = this.reducer;
6436 if (transformer == null || reducer == null)
6437 throw new NullPointerException();
6438 int r = this.basis;
6439 for (int b; (b = preSplit()) > 0;)
6440 (rights = new MapReduceKeysToIntTask<K,V>
6441 (map, this, b, rights, transformer, r, reducer)).fork();
6442 while (advance() != null)
6443 r = reducer.apply(r, transformer.apply((K)nextKey));
6444 result = r;
6445 CountedCompleter<?> c;
6446 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6447 MapReduceKeysToIntTask<K,V>
6448 t = (MapReduceKeysToIntTask<K,V>)c,
6449 s = t.rights;
6450 while (s != null) {
6451 t.result = reducer.apply(t.result, s.result);
6452 s = t.rights = s.nextRight;
6453 }
6454 }
6455 }
6456 }
6457
6458 @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V>
6459 extends Traverser<K,V,Integer> {
6460 final ObjectToInt<? super V> transformer;
6461 final IntByIntToInt reducer;
6462 final int basis;
6463 int result;
6464 MapReduceValuesToIntTask<K,V> rights, nextRight;
6465 MapReduceValuesToIntTask
6466 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6467 MapReduceValuesToIntTask<K,V> nextRight,
6468 ObjectToInt<? super V> transformer,
6469 int basis,
6470 IntByIntToInt reducer) {
6471 super(m, p, b); this.nextRight = nextRight;
6472 this.transformer = transformer;
6473 this.basis = basis; this.reducer = reducer;
6474 }
6475 public final Integer getRawResult() { return result; }
6476 @SuppressWarnings("unchecked") public final void compute() {
6477 final ObjectToInt<? super V> transformer =
6478 this.transformer;
6479 final IntByIntToInt reducer = this.reducer;
6480 if (transformer == null || reducer == null)
6481 throw new NullPointerException();
6482 int r = this.basis;
6483 for (int b; (b = preSplit()) > 0;)
6484 (rights = new MapReduceValuesToIntTask<K,V>
6485 (map, this, b, rights, transformer, r, reducer)).fork();
6486 Object v;
6487 while ((v = advance()) != null)
6488 r = reducer.apply(r, transformer.apply((V)v));
6489 result = r;
6490 CountedCompleter<?> c;
6491 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6492 MapReduceValuesToIntTask<K,V>
6493 t = (MapReduceValuesToIntTask<K,V>)c,
6494 s = t.rights;
6495 while (s != null) {
6496 t.result = reducer.apply(t.result, s.result);
6497 s = t.rights = s.nextRight;
6498 }
6499 }
6500 }
6501 }
6502
6503 @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V>
6504 extends Traverser<K,V,Integer> {
6505 final ObjectToInt<Map.Entry<K,V>> transformer;
6506 final IntByIntToInt reducer;
6507 final int basis;
6508 int result;
6509 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6510 MapReduceEntriesToIntTask
6511 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6512 MapReduceEntriesToIntTask<K,V> nextRight,
6513 ObjectToInt<Map.Entry<K,V>> transformer,
6514 int basis,
6515 IntByIntToInt reducer) {
6516 super(m, p, b); this.nextRight = nextRight;
6517 this.transformer = transformer;
6518 this.basis = basis; this.reducer = reducer;
6519 }
6520 public final Integer getRawResult() { return result; }
6521 @SuppressWarnings("unchecked") public final void compute() {
6522 final ObjectToInt<Map.Entry<K,V>> transformer =
6523 this.transformer;
6524 final IntByIntToInt reducer = this.reducer;
6525 if (transformer == null || reducer == null)
6526 throw new NullPointerException();
6527 int r = this.basis;
6528 for (int b; (b = preSplit()) > 0;)
6529 (rights = new MapReduceEntriesToIntTask<K,V>
6530 (map, this, b, rights, transformer, r, reducer)).fork();
6531 Object v;
6532 while ((v = advance()) != null)
6533 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6534 result = r;
6535 CountedCompleter<?> c;
6536 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6537 MapReduceEntriesToIntTask<K,V>
6538 t = (MapReduceEntriesToIntTask<K,V>)c,
6539 s = t.rights;
6540 while (s != null) {
6541 t.result = reducer.apply(t.result, s.result);
6542 s = t.rights = s.nextRight;
6543 }
6544 }
6545 }
6546 }
6547
6548 @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V>
6549 extends Traverser<K,V,Integer> {
6550 final ObjectByObjectToInt<? super K, ? super V> transformer;
6551 final IntByIntToInt reducer;
6552 final int basis;
6553 int result;
6554 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6555 MapReduceMappingsToIntTask
6556 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6557 MapReduceMappingsToIntTask<K,V> nextRight,
6558 ObjectByObjectToInt<? super K, ? super V> transformer,
6559 int basis,
6560 IntByIntToInt reducer) {
6561 super(m, p, b); this.nextRight = nextRight;
6562 this.transformer = transformer;
6563 this.basis = basis; this.reducer = reducer;
6564 }
6565 public final Integer getRawResult() { return result; }
6566 @SuppressWarnings("unchecked") public final void compute() {
6567 final ObjectByObjectToInt<? super K, ? super V> transformer =
6568 this.transformer;
6569 final IntByIntToInt reducer = this.reducer;
6570 if (transformer == null || reducer == null)
6571 throw new NullPointerException();
6572 int r = this.basis;
6573 for (int b; (b = preSplit()) > 0;)
6574 (rights = new MapReduceMappingsToIntTask<K,V>
6575 (map, this, b, rights, transformer, r, reducer)).fork();
6576 Object v;
6577 while ((v = advance()) != null)
6578 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6579 result = r;
6580 CountedCompleter<?> c;
6581 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6582 MapReduceMappingsToIntTask<K,V>
6583 t = (MapReduceMappingsToIntTask<K,V>)c,
6584 s = t.rights;
6585 while (s != null) {
6586 t.result = reducer.apply(t.result, s.result);
6587 s = t.rights = s.nextRight;
6588 }
6589 }
6590 }
6591 }
6592
6593
6594 // Unsafe mechanics
6595 private static final sun.misc.Unsafe UNSAFE;
6596 private static final long counterOffset;
6597 private static final long sizeCtlOffset;
6598 private static final long ABASE;
6599 private static final int ASHIFT;
6600
6601 static {
6602 int ss;
6603 try {
6604 UNSAFE = sun.misc.Unsafe.getUnsafe();
6605 Class<?> k = ConcurrentHashMap.class;
6606 counterOffset = UNSAFE.objectFieldOffset
6607 (k.getDeclaredField("counter"));
6608 sizeCtlOffset = UNSAFE.objectFieldOffset
6609 (k.getDeclaredField("sizeCtl"));
6610 Class<?> sc = Node[].class;
6611 ABASE = UNSAFE.arrayBaseOffset(sc);
6612 ss = UNSAFE.arrayIndexScale(sc);
6613 } catch (Exception e) {
6614 throw new Error(e);
6615 }
6616 if ((ss & (ss-1)) != 0)
6617 throw new Error("data type scale not a power of two");
6618 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
6619 }
6620 }