ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
Revision: 1.136
Committed: Sun Oct 21 06:14:11 2012 UTC (11 years, 7 months ago) by jsr166
Branch: MAIN
Changes since 1.135: +0 -3 lines
Log Message:
delete trailing empty lines of javadoc

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8 import java.util.concurrent.atomic.LongAdder;
9 import java.util.concurrent.ForkJoinPool;
10 import java.util.concurrent.ForkJoinTask;
11
12 import java.util.Comparator;
13 import java.util.Arrays;
14 import java.util.Map;
15 import java.util.Set;
16 import java.util.Collection;
17 import java.util.AbstractMap;
18 import java.util.AbstractSet;
19 import java.util.AbstractCollection;
20 import java.util.Hashtable;
21 import java.util.HashMap;
22 import java.util.Iterator;
23 import java.util.Enumeration;
24 import java.util.ConcurrentModificationException;
25 import java.util.NoSuchElementException;
26 import java.util.concurrent.ConcurrentMap;
27 import java.util.concurrent.ThreadLocalRandom;
28 import java.util.concurrent.locks.LockSupport;
29 import java.util.concurrent.locks.AbstractQueuedSynchronizer;
30 import java.util.concurrent.atomic.AtomicReference;
31
32 import java.io.Serializable;
33
34 /**
35 * A hash table supporting full concurrency of retrievals and
36 * high expected concurrency for updates. This class obeys the
37 * same functional specification as {@link java.util.Hashtable}, and
38 * includes versions of methods corresponding to each method of
39 * {@code Hashtable}. However, even though all operations are
40 * thread-safe, retrieval operations do <em>not</em> entail locking,
41 * and there is <em>not</em> any support for locking the entire table
42 * in a way that prevents all access. This class is fully
43 * interoperable with {@code Hashtable} in programs that rely on its
44 * thread safety but not on its synchronization details.
45 *
46 * <p> Retrieval operations (including {@code get}) generally do not
47 * block, so may overlap with update operations (including {@code put}
48 * and {@code remove}). Retrievals reflect the results of the most
49 * recently <em>completed</em> update operations holding upon their
50 * onset. (More formally, an update operation for a given key bears a
51 * <em>happens-before</em> relation with any (non-null) retrieval for
52 * that key reporting the updated value.) For aggregate operations
53 * such as {@code putAll} and {@code clear}, concurrent retrievals may
54 * reflect insertion or removal of only some entries. Similarly,
55 * Iterators and Enumerations return elements reflecting the state of
56 * the hash table at some point at or since the creation of the
57 * iterator/enumeration. They do <em>not</em> throw {@link
58 * ConcurrentModificationException}. However, iterators are designed
59 * to be used by only one thread at a time. Bear in mind that the
60 * results of aggregate status methods including {@code size}, {@code
61 * isEmpty}, and {@code containsValue} are typically useful only when
62 * a map is not undergoing concurrent updates in other threads.
63 * Otherwise the results of these methods reflect transient states
64 * that may be adequate for monitoring or estimation purposes, but not
65 * for program control.
66 *
67 * <p> The table is dynamically expanded when there are too many
68 * collisions (i.e., keys that have distinct hash codes but fall into
69 * the same slot modulo the table size), with the expected average
70 * effect of maintaining roughly two bins per mapping (corresponding
71 * to a 0.75 load factor threshold for resizing). There may be much
72 * variance around this average as mappings are added and removed, but
73 * overall, this maintains a commonly accepted time/space tradeoff for
74 * hash tables. However, resizing this or any other kind of hash
75 * table may be a relatively slow operation. When possible, it is a
76 * good idea to provide a size estimate as an optional {@code
77 * initialCapacity} constructor argument. An additional optional
78 * {@code loadFactor} constructor argument provides a further means of
79 * customizing initial table capacity by specifying the table density
80 * to be used in calculating the amount of space to allocate for the
81 * given number of elements. Also, for compatibility with previous
82 * versions of this class, constructors may optionally specify an
83 * expected {@code concurrencyLevel} as an additional hint for
84 * internal sizing. Note that using many keys with exactly the same
85 * {@code hashCode()} is a sure way to slow down performance of any
86 * hash table.
87 *
88 * <p>This class and its views and iterators implement all of the
89 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
90 * interfaces.
91 *
92 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
93 * does <em>not</em> allow {@code null} to be used as a key or value.
94 *
95 * <p>This class is a member of the
96 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
97 * Java Collections Framework</a>.
98 *
99 * <p><em> During transition to JDK8, this
100 * class declares and uses nested functional interfaces with different
101 * names but the same forms as those expected for JDK8.<em>
102 *
103 * @since 1.5
104 * @author Doug Lea
105 * @param <K> the type of keys maintained by this map
106 * @param <V> the type of mapped values
107 */
108 public class ConcurrentHashMap<K, V>
109 implements ConcurrentMap<K, V>, Serializable {
110 private static final long serialVersionUID = 7249069246763182397L;
111
112 /**
113 * A partitionable iterator. A Spliterator can be traversed
114 * directly, but can also be partitioned (before traversal) by
115 * creating another Spliterator that covers a non-overlapping
116 * portion of the elements, and so may be amenable to parallel
117 * execution.
118 *
119 * <p> This interface exports a subset of expected JDK8
120 * functionality.
121 *
122 * <p>Sample usage: Here is one (of the several) ways to compute
123 * the sum of the values held in a map using the ForkJoin
124 * framework. As illustrated here, Spliterators are well suited to
125 * designs in which a task repeatedly splits off half its work
126 * into forked subtasks until small enough to process directly,
127 * and then joins these subtasks. Variants of this style can also
128 * be used in completion-based designs.
129 *
130 * <pre>
131 * {@code ConcurrentHashMap<String, Long> m = ...
132 * // split as if have 8 * parallelism, for load balance
133 * int n = m.size();
134 * int p = aForkJoinPool.getParallelism() * 8;
135 * int split = (n < p)? n : p;
136 * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
137 * // ...
138 * static class SumValues extends RecursiveTask<Long> {
139 * final Spliterator<Long> s;
140 * final int split; // split while > 1
141 * final SumValues nextJoin; // records forked subtasks to join
142 * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
143 * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
144 * }
145 * public Long compute() {
146 * long sum = 0;
147 * SumValues subtasks = null; // fork subtasks
148 * for (int s = split >>> 1; s > 0; s >>>= 1)
149 * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
150 * while (s.hasNext()) // directly process remaining elements
151 * sum += s.next();
152 * for (SumValues t = subtasks; t != null; t = t.nextJoin)
153 * sum += t.join(); // collect subtask results
154 * return sum;
155 * }
156 * }
157 * }</pre>
158 */
159 public static interface Spliterator<T> extends Iterator<T> {
160 /**
161 * Returns a Spliterator covering approximately half of the
162 * elements, guaranteed not to overlap with those subsequently
163 * returned by this Spliterator. After invoking this method,
164 * the current Spliterator will <em>not</em> produce any of
165 * the elements of the returned Spliterator, but the two
166 * Spliterators together will produce all of the elements that
167 * would have been produced by this Spliterator had this
168 * method not been called. The exact number of elements
169 * produced by the returned Spliterator is not guaranteed, and
170 * may be zero (i.e., with {@code hasNext()} reporting {@code
171 * false}) if this Spliterator cannot be further split.
172 *
173 * @return a Spliterator covering approximately half of the
174 * elements
175 * @throws IllegalStateException if this Spliterator has
176 * already commenced traversing elements
177 */
178 Spliterator<T> split();
179 }
180
181 /*
182 * Overview:
183 *
184 * The primary design goal of this hash table is to maintain
185 * concurrent readability (typically method get(), but also
186 * iterators and related methods) while minimizing update
187 * contention. Secondary goals are to keep space consumption about
188 * the same or better than java.util.HashMap, and to support high
189 * initial insertion rates on an empty table by many threads.
190 *
191 * Each key-value mapping is held in a Node. Because Node fields
192 * can contain special values, they are defined using plain Object
193 * types. Similarly in turn, all internal methods that use them
194 * work off Object types. And similarly, so do the internal
195 * methods of auxiliary iterator and view classes. All public
196 * generic typed methods relay in/out of these internal methods,
197 * supplying null-checks and casts as needed. This also allows
198 * many of the public methods to be factored into a smaller number
199 * of internal methods (although sadly not so for the five
200 * variants of put-related operations). The validation-based
201 * approach explained below leads to a lot of code sprawl because
202 * retry-control precludes factoring into smaller methods.
203 *
204 * The table is lazily initialized to a power-of-two size upon the
205 * first insertion. Each bin in the table normally contains a
206 * list of Nodes (most often, the list has only zero or one Node).
207 * Table accesses require volatile/atomic reads, writes, and
208 * CASes. Because there is no other way to arrange this without
209 * adding further indirections, we use intrinsics
210 * (sun.misc.Unsafe) operations. The lists of nodes within bins
211 * are always accurately traversable under volatile reads, so long
212 * as lookups check hash code and non-nullness of value before
213 * checking key equality.
214 *
215 * We use the top two bits of Node hash fields for control
216 * purposes -- they are available anyway because of addressing
217 * constraints. As explained further below, these top bits are
218 * used as follows:
219 * 00 - Normal
220 * 01 - Locked
221 * 11 - Locked and may have a thread waiting for lock
222 * 10 - Node is a forwarding node
223 *
224 * The lower 30 bits of each Node's hash field contain a
225 * transformation of the key's hash code, except for forwarding
226 * nodes, for which the lower bits are zero (and so always have
227 * hash field == MOVED).
228 *
229 * Insertion (via put or its variants) of the first node in an
230 * empty bin is performed by just CASing it to the bin. This is
231 * by far the most common case for put operations under most
232 * key/hash distributions. Other update operations (insert,
233 * delete, and replace) require locks. We do not want to waste
234 * the space required to associate a distinct lock object with
235 * each bin, so instead use the first node of a bin list itself as
236 * a lock. Blocking support for these locks relies on the builtin
237 * "synchronized" monitors. However, we also need a tryLock
238 * construction, so we overlay these by using bits of the Node
239 * hash field for lock control (see above), and so normally use
240 * builtin monitors only for blocking and signalling using
241 * wait/notifyAll constructions. See Node.tryAwaitLock.
242 *
243 * Using the first node of a list as a lock does not by itself
244 * suffice though: When a node is locked, any update must first
245 * validate that it is still the first node after locking it, and
246 * retry if not. Because new nodes are always appended to lists,
247 * once a node is first in a bin, it remains first until deleted
248 * or the bin becomes invalidated (upon resizing). However,
249 * operations that only conditionally update may inspect nodes
250 * until the point of update. This is a converse of sorts to the
251 * lazy locking technique described by Herlihy & Shavit.
252 *
253 * The main disadvantage of per-bin locks is that other update
254 * operations on other nodes in a bin list protected by the same
255 * lock can stall, for example when user equals() or mapping
256 * functions take a long time. However, statistically, under
257 * random hash codes, this is not a common problem. Ideally, the
258 * frequency of nodes in bins follows a Poisson distribution
259 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
260 * parameter of about 0.5 on average, given the resizing threshold
261 * of 0.75, although with a large variance because of resizing
262 * granularity. Ignoring variance, the expected occurrences of
263 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
264 * first values are:
265 *
266 * 0: 0.60653066
267 * 1: 0.30326533
268 * 2: 0.07581633
269 * 3: 0.01263606
270 * 4: 0.00157952
271 * 5: 0.00015795
272 * 6: 0.00001316
273 * 7: 0.00000094
274 * 8: 0.00000006
275 * more: less than 1 in ten million
276 *
277 * Lock contention probability for two threads accessing distinct
278 * elements is roughly 1 / (8 * #elements) under random hashes.
279 *
280 * Actual hash code distributions encountered in practice
281 * sometimes deviate significantly from uniform randomness. This
282 * includes the case when N > (1<<30), so some keys MUST collide.
283 * Similarly for dumb or hostile usages in which multiple keys are
284 * designed to have identical hash codes. Also, although we guard
285 * against the worst effects of this (see method spread), sets of
286 * hashes may differ only in bits that do not impact their bin
287 * index for a given power-of-two mask. So we use a secondary
288 * strategy that applies when the number of nodes in a bin exceeds
289 * a threshold, and at least one of the keys implements
290 * Comparable. These TreeBins use a balanced tree to hold nodes
291 * (a specialized form of red-black trees), bounding search time
292 * to O(log N). Each search step in a TreeBin is around twice as
293 * slow as in a regular list, but given that N cannot exceed
294 * (1<<64) (before running out of addresses) this bounds search
295 * steps, lock hold times, etc, to reasonable constants (roughly
296 * 100 nodes inspected per operation worst case) so long as keys
297 * are Comparable (which is very common -- String, Long, etc).
298 * TreeBin nodes (TreeNodes) also maintain the same "next"
299 * traversal pointers as regular nodes, so can be traversed in
300 * iterators in the same way.
301 *
302 * The table is resized when occupancy exceeds a percentage
303 * threshold (nominally, 0.75, but see below). Only a single
304 * thread performs the resize (using field "sizeCtl", to arrange
305 * exclusion), but the table otherwise remains usable for reads
306 * and updates. Resizing proceeds by transferring bins, one by
307 * one, from the table to the next table. Because we are using
308 * power-of-two expansion, the elements from each bin must either
309 * stay at same index, or move with a power of two offset. We
310 * eliminate unnecessary node creation by catching cases where old
311 * nodes can be reused because their next fields won't change. On
312 * average, only about one-sixth of them need cloning when a table
313 * doubles. The nodes they replace will be garbage collectable as
314 * soon as they are no longer referenced by any reader thread that
315 * may be in the midst of concurrently traversing table. Upon
316 * transfer, the old table bin contains only a special forwarding
317 * node (with hash field "MOVED") that contains the next table as
318 * its key. On encountering a forwarding node, access and update
319 * operations restart, using the new table.
320 *
321 * Each bin transfer requires its bin lock. However, unlike other
322 * cases, a transfer can skip a bin if it fails to acquire its
323 * lock, and revisit it later (unless it is a TreeBin). Method
324 * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
325 * have been skipped because of failure to acquire a lock, and
326 * blocks only if none are available (i.e., only very rarely).
327 * The transfer operation must also ensure that all accessible
328 * bins in both the old and new table are usable by any traversal.
329 * When there are no lock acquisition failures, this is arranged
330 * simply by proceeding from the last bin (table.length - 1) up
331 * towards the first. Upon seeing a forwarding node, traversals
332 * (see class Iter) arrange to move to the new table
333 * without revisiting nodes. However, when any node is skipped
334 * during a transfer, all earlier table bins may have become
335 * visible, so are initialized with a reverse-forwarding node back
336 * to the old table until the new ones are established. (This
337 * sometimes requires transiently locking a forwarding node, which
338 * is possible under the above encoding.) These more expensive
339 * mechanics trigger only when necessary.
340 *
341 * The traversal scheme also applies to partial traversals of
342 * ranges of bins (via an alternate Traverser constructor)
343 * to support partitioned aggregate operations. Also, read-only
344 * operations give up if ever forwarded to a null table, which
345 * provides support for shutdown-style clearing, which is also not
346 * currently implemented.
347 *
348 * Lazy table initialization minimizes footprint until first use,
349 * and also avoids resizings when the first operation is from a
350 * putAll, constructor with map argument, or deserialization.
351 * These cases attempt to override the initial capacity settings,
352 * but harmlessly fail to take effect in cases of races.
353 *
354 * The element count is maintained using a LongAdder, which avoids
355 * contention on updates but can encounter cache thrashing if read
356 * too frequently during concurrent access. To avoid reading so
357 * often, resizing is attempted either when a bin lock is
358 * contended, or upon adding to a bin already holding two or more
359 * nodes (checked before adding in the xIfAbsent methods, after
360 * adding in others). Under uniform hash distributions, the
361 * probability of this occurring at threshold is around 13%,
362 * meaning that only about 1 in 8 puts check threshold (and after
363 * resizing, many fewer do so). But this approximation has high
364 * variance for small table sizes, so we check on any collision
365 * for sizes <= 64. The bulk putAll operation further reduces
366 * contention by only committing count updates upon these size
367 * checks.
368 *
369 * Maintaining API and serialization compatibility with previous
370 * versions of this class introduces several oddities. Mainly: We
371 * leave untouched but unused constructor arguments refering to
372 * concurrencyLevel. We accept a loadFactor constructor argument,
373 * but apply it only to initial table capacity (which is the only
374 * time that we can guarantee to honor it.) We also declare an
375 * unused "Segment" class that is instantiated in minimal form
376 * only when serializing.
377 */
378
379 /* ---------------- Constants -------------- */
380
381 /**
382 * The largest possible table capacity. This value must be
383 * exactly 1<<30 to stay within Java array allocation and indexing
384 * bounds for power of two table sizes, and is further required
385 * because the top two bits of 32bit hash fields are used for
386 * control purposes.
387 */
388 private static final int MAXIMUM_CAPACITY = 1 << 30;
389
390 /**
391 * The default initial table capacity. Must be a power of 2
392 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
393 */
394 private static final int DEFAULT_CAPACITY = 16;
395
396 /**
397 * The largest possible (non-power of two) array size.
398 * Needed by toArray and related methods.
399 */
400 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
401
402 /**
403 * The default concurrency level for this table. Unused but
404 * defined for compatibility with previous versions of this class.
405 */
406 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
407
408 /**
409 * The load factor for this table. Overrides of this value in
410 * constructors affect only the initial table capacity. The
411 * actual floating point value isn't normally used -- it is
412 * simpler to use expressions such as {@code n - (n >>> 2)} for
413 * the associated resizing threshold.
414 */
415 private static final float LOAD_FACTOR = 0.75f;
416
417 /**
418 * The buffer size for skipped bins during transfers. The
419 * value is arbitrary but should be large enough to avoid
420 * most locking stalls during resizes.
421 */
422 private static final int TRANSFER_BUFFER_SIZE = 32;
423
424 /**
425 * The bin count threshold for using a tree rather than list for a
426 * bin. The value reflects the approximate break-even point for
427 * using tree-based operations.
428 */
429 private static final int TREE_THRESHOLD = 8;
430
431 /*
432 * Encodings for special uses of Node hash fields. See above for
433 * explanation.
434 */
435 static final int MOVED = 0x80000000; // hash field for forwarding nodes
436 static final int LOCKED = 0x40000000; // set/tested only as a bit
437 static final int WAITING = 0xc0000000; // both bits set/tested together
438 static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
439
440 /* ---------------- Fields -------------- */
441
442 /**
443 * The array of bins. Lazily initialized upon first insertion.
444 * Size is always a power of two. Accessed directly by iterators.
445 */
446 transient volatile Node[] table;
447
448 /**
449 * The counter maintaining number of elements.
450 */
451 private transient final LongAdder counter;
452
453 /**
454 * Table initialization and resizing control. When negative, the
455 * table is being initialized or resized. Otherwise, when table is
456 * null, holds the initial table size to use upon creation, or 0
457 * for default. After initialization, holds the next element count
458 * value upon which to resize the table.
459 */
460 private transient volatile int sizeCtl;
461
462 // views
463 private transient KeySet<K,V> keySet;
464 private transient Values<K,V> values;
465 private transient EntrySet<K,V> entrySet;
466
467 /** For serialization compatibility. Null unless serialized; see below */
468 private Segment<K,V>[] segments;
469
470 /* ---------------- Table element access -------------- */
471
472 /*
473 * Volatile access methods are used for table elements as well as
474 * elements of in-progress next table while resizing. Uses are
475 * null checked by callers, and implicitly bounds-checked, relying
476 * on the invariants that tab arrays have non-zero size, and all
477 * indices are masked with (tab.length - 1) which is never
478 * negative and always less than length. Note that, to be correct
479 * wrt arbitrary concurrency errors by users, bounds checks must
480 * operate on local variables, which accounts for some odd-looking
481 * inline assignments below.
482 */
483
484 static final Node tabAt(Node[] tab, int i) { // used by Iter
485 return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
486 }
487
488 private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
489 return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
490 }
491
492 private static final void setTabAt(Node[] tab, int i, Node v) {
493 UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
494 }
495
496 /* ---------------- Nodes -------------- */
497
498 /**
499 * Key-value entry. Note that this is never exported out as a
500 * user-visible Map.Entry (see MapEntry below). Nodes with a hash
501 * field of MOVED are special, and do not contain user keys or
502 * values. Otherwise, keys are never null, and null val fields
503 * indicate that a node is in the process of being deleted or
504 * created. For purposes of read-only access, a key may be read
505 * before a val, but can only be used after checking val to be
506 * non-null.
507 */
508 static class Node {
509 volatile int hash;
510 final Object key;
511 volatile Object val;
512 volatile Node next;
513
514 Node(int hash, Object key, Object val, Node next) {
515 this.hash = hash;
516 this.key = key;
517 this.val = val;
518 this.next = next;
519 }
520
521 /** CompareAndSet the hash field */
522 final boolean casHash(int cmp, int val) {
523 return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
524 }
525
526 /** The number of spins before blocking for a lock */
527 static final int MAX_SPINS =
528 Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
529
530 /**
531 * Spins a while if LOCKED bit set and this node is the first
532 * of its bin, and then sets WAITING bits on hash field and
533 * blocks (once) if they are still set. It is OK for this
534 * method to return even if lock is not available upon exit,
535 * which enables these simple single-wait mechanics.
536 *
537 * The corresponding signalling operation is performed within
538 * callers: Upon detecting that WAITING has been set when
539 * unlocking lock (via a failed CAS from non-waiting LOCKED
540 * state), unlockers acquire the sync lock and perform a
541 * notifyAll.
542 *
543 * The initial sanity check on tab and bounds is not currently
544 * necessary in the only usages of this method, but enables
545 * use in other future contexts.
546 */
547 final void tryAwaitLock(Node[] tab, int i) {
548 if (tab != null && i >= 0 && i < tab.length) { // sanity check
549 int r = ThreadLocalRandom.current().nextInt(); // randomize spins
550 int spins = MAX_SPINS, h;
551 while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
552 if (spins >= 0) {
553 r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
554 if (r >= 0 && --spins == 0)
555 Thread.yield(); // yield before block
556 }
557 else if (casHash(h, h | WAITING)) {
558 synchronized (this) {
559 if (tabAt(tab, i) == this &&
560 (hash & WAITING) == WAITING) {
561 try {
562 wait();
563 } catch (InterruptedException ie) {
564 Thread.currentThread().interrupt();
565 }
566 }
567 else
568 notifyAll(); // possibly won race vs signaller
569 }
570 break;
571 }
572 }
573 }
574 }
575
576 // Unsafe mechanics for casHash
577 private static final sun.misc.Unsafe UNSAFE;
578 private static final long hashOffset;
579
580 static {
581 try {
582 UNSAFE = sun.misc.Unsafe.getUnsafe();
583 Class<?> k = Node.class;
584 hashOffset = UNSAFE.objectFieldOffset
585 (k.getDeclaredField("hash"));
586 } catch (Exception e) {
587 throw new Error(e);
588 }
589 }
590 }
591
592 /* ---------------- TreeBins -------------- */
593
594 /**
595 * Nodes for use in TreeBins
596 */
597 static final class TreeNode extends Node {
598 TreeNode parent; // red-black tree links
599 TreeNode left;
600 TreeNode right;
601 TreeNode prev; // needed to unlink next upon deletion
602 boolean red;
603
604 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
605 super(hash, key, val, next);
606 this.parent = parent;
607 }
608 }
609
610 /**
611 * A specialized form of red-black tree for use in bins
612 * whose size exceeds a threshold.
613 *
614 * TreeBins use a special form of comparison for search and
615 * related operations (which is the main reason we cannot use
616 * existing collections such as TreeMaps). TreeBins contain
617 * Comparable elements, but may contain others, as well as
618 * elements that are Comparable but not necessarily Comparable<T>
619 * for the same T, so we cannot invoke compareTo among them. To
620 * handle this, the tree is ordered primarily by hash value, then
621 * by getClass().getName() order, and then by Comparator order
622 * among elements of the same class. On lookup at a node, if
623 * elements are not comparable or compare as 0, both left and
624 * right children may need to be searched in the case of tied hash
625 * values. (This corresponds to the full list search that would be
626 * necessary if all elements were non-Comparable and had tied
627 * hashes.) The red-black balancing code is updated from
628 * pre-jdk-collections
629 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
630 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
631 * Algorithms" (CLR).
632 *
633 * TreeBins also maintain a separate locking discipline than
634 * regular bins. Because they are forwarded via special MOVED
635 * nodes at bin heads (which can never change once established),
636 * we cannot use those nodes as locks. Instead, TreeBin
637 * extends AbstractQueuedSynchronizer to support a simple form of
638 * read-write lock. For update operations and table validation,
639 * the exclusive form of lock behaves in the same way as bin-head
640 * locks. However, lookups use shared read-lock mechanics to allow
641 * multiple readers in the absence of writers. Additionally,
642 * these lookups do not ever block: While the lock is not
643 * available, they proceed along the slow traversal path (via
644 * next-pointers) until the lock becomes available or the list is
645 * exhausted, whichever comes first. (These cases are not fast,
646 * but maximize aggregate expected throughput.) The AQS mechanics
647 * for doing this are straightforward. The lock state is held as
648 * AQS getState(). Read counts are negative; the write count (1)
649 * is positive. There are no signalling preferences among readers
650 * and writers. Since we don't need to export full Lock API, we
651 * just override the minimal AQS methods and use them directly.
652 */
653 static final class TreeBin extends AbstractQueuedSynchronizer {
654 private static final long serialVersionUID = 2249069246763182397L;
655 transient TreeNode root; // root of tree
656 transient TreeNode first; // head of next-pointer list
657
658 /* AQS overrides */
659 public final boolean isHeldExclusively() { return getState() > 0; }
660 public final boolean tryAcquire(int ignore) {
661 if (compareAndSetState(0, 1)) {
662 setExclusiveOwnerThread(Thread.currentThread());
663 return true;
664 }
665 return false;
666 }
667 public final boolean tryRelease(int ignore) {
668 setExclusiveOwnerThread(null);
669 setState(0);
670 return true;
671 }
672 public final int tryAcquireShared(int ignore) {
673 for (int c;;) {
674 if ((c = getState()) > 0)
675 return -1;
676 if (compareAndSetState(c, c -1))
677 return 1;
678 }
679 }
680 public final boolean tryReleaseShared(int ignore) {
681 int c;
682 do {} while (!compareAndSetState(c = getState(), c + 1));
683 return c == -1;
684 }
685
686 /** From CLR */
687 private void rotateLeft(TreeNode p) {
688 if (p != null) {
689 TreeNode r = p.right, pp, rl;
690 if ((rl = p.right = r.left) != null)
691 rl.parent = p;
692 if ((pp = r.parent = p.parent) == null)
693 root = r;
694 else if (pp.left == p)
695 pp.left = r;
696 else
697 pp.right = r;
698 r.left = p;
699 p.parent = r;
700 }
701 }
702
703 /** From CLR */
704 private void rotateRight(TreeNode p) {
705 if (p != null) {
706 TreeNode l = p.left, pp, lr;
707 if ((lr = p.left = l.right) != null)
708 lr.parent = p;
709 if ((pp = l.parent = p.parent) == null)
710 root = l;
711 else if (pp.right == p)
712 pp.right = l;
713 else
714 pp.left = l;
715 l.right = p;
716 p.parent = l;
717 }
718 }
719
720 /**
721 * Returns the TreeNode (or null if not found) for the given key
722 * starting at given root.
723 */
724 @SuppressWarnings("unchecked") final TreeNode getTreeNode
725 (int h, Object k, TreeNode p) {
726 Class<?> c = k.getClass();
727 while (p != null) {
728 int dir, ph; Object pk; Class<?> pc;
729 if ((ph = p.hash) == h) {
730 if ((pk = p.key) == k || k.equals(pk))
731 return p;
732 if (c != (pc = pk.getClass()) ||
733 !(k instanceof Comparable) ||
734 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
735 dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
736 TreeNode r = null, s = null, pl, pr;
737 if (dir >= 0) {
738 if ((pl = p.left) != null && h <= pl.hash)
739 s = pl;
740 }
741 else if ((pr = p.right) != null && h >= pr.hash)
742 s = pr;
743 if (s != null && (r = getTreeNode(h, k, s)) != null)
744 return r;
745 }
746 }
747 else
748 dir = (h < ph) ? -1 : 1;
749 p = (dir > 0) ? p.right : p.left;
750 }
751 return null;
752 }
753
754 /**
755 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
756 * read-lock to call getTreeNode, but during failure to get
757 * lock, searches along next links.
758 */
759 final Object getValue(int h, Object k) {
760 Node r = null;
761 int c = getState(); // Must read lock state first
762 for (Node e = first; e != null; e = e.next) {
763 if (c <= 0 && compareAndSetState(c, c - 1)) {
764 try {
765 r = getTreeNode(h, k, root);
766 } finally {
767 releaseShared(0);
768 }
769 break;
770 }
771 else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
772 r = e;
773 break;
774 }
775 else
776 c = getState();
777 }
778 return r == null ? null : r.val;
779 }
780
781 /**
782 * Finds or adds a node.
783 * @return null if added
784 */
785 @SuppressWarnings("unchecked") final TreeNode putTreeNode
786 (int h, Object k, Object v) {
787 Class<?> c = k.getClass();
788 TreeNode pp = root, p = null;
789 int dir = 0;
790 while (pp != null) { // find existing node or leaf to insert at
791 int ph; Object pk; Class<?> pc;
792 p = pp;
793 if ((ph = p.hash) == h) {
794 if ((pk = p.key) == k || k.equals(pk))
795 return p;
796 if (c != (pc = pk.getClass()) ||
797 !(k instanceof Comparable) ||
798 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
799 dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
800 TreeNode r = null, s = null, pl, pr;
801 if (dir >= 0) {
802 if ((pl = p.left) != null && h <= pl.hash)
803 s = pl;
804 }
805 else if ((pr = p.right) != null && h >= pr.hash)
806 s = pr;
807 if (s != null && (r = getTreeNode(h, k, s)) != null)
808 return r;
809 }
810 }
811 else
812 dir = (h < ph) ? -1 : 1;
813 pp = (dir > 0) ? p.right : p.left;
814 }
815
816 TreeNode f = first;
817 TreeNode x = first = new TreeNode(h, k, v, f, p);
818 if (p == null)
819 root = x;
820 else { // attach and rebalance; adapted from CLR
821 TreeNode xp, xpp;
822 if (f != null)
823 f.prev = x;
824 if (dir <= 0)
825 p.left = x;
826 else
827 p.right = x;
828 x.red = true;
829 while (x != null && (xp = x.parent) != null && xp.red &&
830 (xpp = xp.parent) != null) {
831 TreeNode xppl = xpp.left;
832 if (xp == xppl) {
833 TreeNode y = xpp.right;
834 if (y != null && y.red) {
835 y.red = false;
836 xp.red = false;
837 xpp.red = true;
838 x = xpp;
839 }
840 else {
841 if (x == xp.right) {
842 rotateLeft(x = xp);
843 xpp = (xp = x.parent) == null ? null : xp.parent;
844 }
845 if (xp != null) {
846 xp.red = false;
847 if (xpp != null) {
848 xpp.red = true;
849 rotateRight(xpp);
850 }
851 }
852 }
853 }
854 else {
855 TreeNode y = xppl;
856 if (y != null && y.red) {
857 y.red = false;
858 xp.red = false;
859 xpp.red = true;
860 x = xpp;
861 }
862 else {
863 if (x == xp.left) {
864 rotateRight(x = xp);
865 xpp = (xp = x.parent) == null ? null : xp.parent;
866 }
867 if (xp != null) {
868 xp.red = false;
869 if (xpp != null) {
870 xpp.red = true;
871 rotateLeft(xpp);
872 }
873 }
874 }
875 }
876 }
877 TreeNode r = root;
878 if (r != null && r.red)
879 r.red = false;
880 }
881 return null;
882 }
883
884 /**
885 * Removes the given node, that must be present before this
886 * call. This is messier than typical red-black deletion code
887 * because we cannot swap the contents of an interior node
888 * with a leaf successor that is pinned by "next" pointers
889 * that are accessible independently of lock. So instead we
890 * swap the tree linkages.
891 */
892 final void deleteTreeNode(TreeNode p) {
893 TreeNode next = (TreeNode)p.next; // unlink traversal pointers
894 TreeNode pred = p.prev;
895 if (pred == null)
896 first = next;
897 else
898 pred.next = next;
899 if (next != null)
900 next.prev = pred;
901 TreeNode replacement;
902 TreeNode pl = p.left;
903 TreeNode pr = p.right;
904 if (pl != null && pr != null) {
905 TreeNode s = pr, sl;
906 while ((sl = s.left) != null) // find successor
907 s = sl;
908 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
909 TreeNode sr = s.right;
910 TreeNode pp = p.parent;
911 if (s == pr) { // p was s's direct parent
912 p.parent = s;
913 s.right = p;
914 }
915 else {
916 TreeNode sp = s.parent;
917 if ((p.parent = sp) != null) {
918 if (s == sp.left)
919 sp.left = p;
920 else
921 sp.right = p;
922 }
923 if ((s.right = pr) != null)
924 pr.parent = s;
925 }
926 p.left = null;
927 if ((p.right = sr) != null)
928 sr.parent = p;
929 if ((s.left = pl) != null)
930 pl.parent = s;
931 if ((s.parent = pp) == null)
932 root = s;
933 else if (p == pp.left)
934 pp.left = s;
935 else
936 pp.right = s;
937 replacement = sr;
938 }
939 else
940 replacement = (pl != null) ? pl : pr;
941 TreeNode pp = p.parent;
942 if (replacement == null) {
943 if (pp == null) {
944 root = null;
945 return;
946 }
947 replacement = p;
948 }
949 else {
950 replacement.parent = pp;
951 if (pp == null)
952 root = replacement;
953 else if (p == pp.left)
954 pp.left = replacement;
955 else
956 pp.right = replacement;
957 p.left = p.right = p.parent = null;
958 }
959 if (!p.red) { // rebalance, from CLR
960 TreeNode x = replacement;
961 while (x != null) {
962 TreeNode xp, xpl;
963 if (x.red || (xp = x.parent) == null) {
964 x.red = false;
965 break;
966 }
967 if (x == (xpl = xp.left)) {
968 TreeNode sib = xp.right;
969 if (sib != null && sib.red) {
970 sib.red = false;
971 xp.red = true;
972 rotateLeft(xp);
973 sib = (xp = x.parent) == null ? null : xp.right;
974 }
975 if (sib == null)
976 x = xp;
977 else {
978 TreeNode sl = sib.left, sr = sib.right;
979 if ((sr == null || !sr.red) &&
980 (sl == null || !sl.red)) {
981 sib.red = true;
982 x = xp;
983 }
984 else {
985 if (sr == null || !sr.red) {
986 if (sl != null)
987 sl.red = false;
988 sib.red = true;
989 rotateRight(sib);
990 sib = (xp = x.parent) == null ? null : xp.right;
991 }
992 if (sib != null) {
993 sib.red = (xp == null) ? false : xp.red;
994 if ((sr = sib.right) != null)
995 sr.red = false;
996 }
997 if (xp != null) {
998 xp.red = false;
999 rotateLeft(xp);
1000 }
1001 x = root;
1002 }
1003 }
1004 }
1005 else { // symmetric
1006 TreeNode sib = xpl;
1007 if (sib != null && sib.red) {
1008 sib.red = false;
1009 xp.red = true;
1010 rotateRight(xp);
1011 sib = (xp = x.parent) == null ? null : xp.left;
1012 }
1013 if (sib == null)
1014 x = xp;
1015 else {
1016 TreeNode sl = sib.left, sr = sib.right;
1017 if ((sl == null || !sl.red) &&
1018 (sr == null || !sr.red)) {
1019 sib.red = true;
1020 x = xp;
1021 }
1022 else {
1023 if (sl == null || !sl.red) {
1024 if (sr != null)
1025 sr.red = false;
1026 sib.red = true;
1027 rotateLeft(sib);
1028 sib = (xp = x.parent) == null ? null : xp.left;
1029 }
1030 if (sib != null) {
1031 sib.red = (xp == null) ? false : xp.red;
1032 if ((sl = sib.left) != null)
1033 sl.red = false;
1034 }
1035 if (xp != null) {
1036 xp.red = false;
1037 rotateRight(xp);
1038 }
1039 x = root;
1040 }
1041 }
1042 }
1043 }
1044 }
1045 if (p == replacement && (pp = p.parent) != null) {
1046 if (p == pp.left) // detach pointers
1047 pp.left = null;
1048 else if (p == pp.right)
1049 pp.right = null;
1050 p.parent = null;
1051 }
1052 }
1053 }
1054
1055 /* ---------------- Collision reduction methods -------------- */
1056
1057 /**
1058 * Spreads higher bits to lower, and also forces top 2 bits to 0.
1059 * Because the table uses power-of-two masking, sets of hashes
1060 * that vary only in bits above the current mask will always
1061 * collide. (Among known examples are sets of Float keys holding
1062 * consecutive whole numbers in small tables.) To counter this,
1063 * we apply a transform that spreads the impact of higher bits
1064 * downward. There is a tradeoff between speed, utility, and
1065 * quality of bit-spreading. Because many common sets of hashes
1066 * are already reasonably distributed across bits (so don't benefit
1067 * from spreading), and because we use trees to handle large sets
1068 * of collisions in bins, we don't need excessively high quality.
1069 */
1070 private static final int spread(int h) {
1071 h ^= (h >>> 18) ^ (h >>> 12);
1072 return (h ^ (h >>> 10)) & HASH_BITS;
1073 }
1074
1075 /**
1076 * Replaces a list bin with a tree bin. Call only when locked.
1077 * Fails to replace if the given key is non-comparable or table
1078 * is, or needs, resizing.
1079 */
1080 private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1081 if ((key instanceof Comparable) &&
1082 (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1083 TreeBin t = new TreeBin();
1084 for (Node e = tabAt(tab, index); e != null; e = e.next)
1085 t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1086 setTabAt(tab, index, new Node(MOVED, t, null, null));
1087 }
1088 }
1089
1090 /* ---------------- Internal access and update methods -------------- */
1091
1092 /** Implementation for get and containsKey */
1093 private final Object internalGet(Object k) {
1094 int h = spread(k.hashCode());
1095 retry: for (Node[] tab = table; tab != null;) {
1096 Node e, p; Object ek, ev; int eh; // locals to read fields once
1097 for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1098 if ((eh = e.hash) == MOVED) {
1099 if ((ek = e.key) instanceof TreeBin) // search TreeBin
1100 return ((TreeBin)ek).getValue(h, k);
1101 else { // restart with new table
1102 tab = (Node[])ek;
1103 continue retry;
1104 }
1105 }
1106 else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1107 ((ek = e.key) == k || k.equals(ek)))
1108 return ev;
1109 }
1110 break;
1111 }
1112 return null;
1113 }
1114
1115 /**
1116 * Implementation for the four public remove/replace methods:
1117 * Replaces node value with v, conditional upon match of cv if
1118 * non-null. If resulting value is null, delete.
1119 */
1120 private final Object internalReplace(Object k, Object v, Object cv) {
1121 int h = spread(k.hashCode());
1122 Object oldVal = null;
1123 for (Node[] tab = table;;) {
1124 Node f; int i, fh; Object fk;
1125 if (tab == null ||
1126 (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1127 break;
1128 else if ((fh = f.hash) == MOVED) {
1129 if ((fk = f.key) instanceof TreeBin) {
1130 TreeBin t = (TreeBin)fk;
1131 boolean validated = false;
1132 boolean deleted = false;
1133 t.acquire(0);
1134 try {
1135 if (tabAt(tab, i) == f) {
1136 validated = true;
1137 TreeNode p = t.getTreeNode(h, k, t.root);
1138 if (p != null) {
1139 Object pv = p.val;
1140 if (cv == null || cv == pv || cv.equals(pv)) {
1141 oldVal = pv;
1142 if ((p.val = v) == null) {
1143 deleted = true;
1144 t.deleteTreeNode(p);
1145 }
1146 }
1147 }
1148 }
1149 } finally {
1150 t.release(0);
1151 }
1152 if (validated) {
1153 if (deleted)
1154 counter.add(-1L);
1155 break;
1156 }
1157 }
1158 else
1159 tab = (Node[])fk;
1160 }
1161 else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1162 break; // rules out possible existence
1163 else if ((fh & LOCKED) != 0) {
1164 checkForResize(); // try resizing if can't get lock
1165 f.tryAwaitLock(tab, i);
1166 }
1167 else if (f.casHash(fh, fh | LOCKED)) {
1168 boolean validated = false;
1169 boolean deleted = false;
1170 try {
1171 if (tabAt(tab, i) == f) {
1172 validated = true;
1173 for (Node e = f, pred = null;;) {
1174 Object ek, ev;
1175 if ((e.hash & HASH_BITS) == h &&
1176 ((ev = e.val) != null) &&
1177 ((ek = e.key) == k || k.equals(ek))) {
1178 if (cv == null || cv == ev || cv.equals(ev)) {
1179 oldVal = ev;
1180 if ((e.val = v) == null) {
1181 deleted = true;
1182 Node en = e.next;
1183 if (pred != null)
1184 pred.next = en;
1185 else
1186 setTabAt(tab, i, en);
1187 }
1188 }
1189 break;
1190 }
1191 pred = e;
1192 if ((e = e.next) == null)
1193 break;
1194 }
1195 }
1196 } finally {
1197 if (!f.casHash(fh | LOCKED, fh)) {
1198 f.hash = fh;
1199 synchronized (f) { f.notifyAll(); };
1200 }
1201 }
1202 if (validated) {
1203 if (deleted)
1204 counter.add(-1L);
1205 break;
1206 }
1207 }
1208 }
1209 return oldVal;
1210 }
1211
1212 /*
1213 * Internal versions of the six insertion methods, each a
1214 * little more complicated than the last. All have
1215 * the same basic structure as the first (internalPut):
1216 * 1. If table uninitialized, create
1217 * 2. If bin empty, try to CAS new node
1218 * 3. If bin stale, use new table
1219 * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1220 * 5. Lock and validate; if valid, scan and add or update
1221 *
1222 * The others interweave other checks and/or alternative actions:
1223 * * Plain put checks for and performs resize after insertion.
1224 * * putIfAbsent prescans for mapping without lock (and fails to add
1225 * if present), which also makes pre-emptive resize checks worthwhile.
1226 * * computeIfAbsent extends form used in putIfAbsent with additional
1227 * mechanics to deal with, calls, potential exceptions and null
1228 * returns from function call.
1229 * * compute uses the same function-call mechanics, but without
1230 * the prescans
1231 * * merge acts as putIfAbsent in the absent case, but invokes the
1232 * update function if present
1233 * * putAll attempts to pre-allocate enough table space
1234 * and more lazily performs count updates and checks.
1235 *
1236 * Someday when details settle down a bit more, it might be worth
1237 * some factoring to reduce sprawl.
1238 */
1239
1240 /** Implementation for put */
1241 private final Object internalPut(Object k, Object v) {
1242 int h = spread(k.hashCode());
1243 int count = 0;
1244 for (Node[] tab = table;;) {
1245 int i; Node f; int fh; Object fk;
1246 if (tab == null)
1247 tab = initTable();
1248 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1249 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1250 break; // no lock when adding to empty bin
1251 }
1252 else if ((fh = f.hash) == MOVED) {
1253 if ((fk = f.key) instanceof TreeBin) {
1254 TreeBin t = (TreeBin)fk;
1255 Object oldVal = null;
1256 t.acquire(0);
1257 try {
1258 if (tabAt(tab, i) == f) {
1259 count = 2;
1260 TreeNode p = t.putTreeNode(h, k, v);
1261 if (p != null) {
1262 oldVal = p.val;
1263 p.val = v;
1264 }
1265 }
1266 } finally {
1267 t.release(0);
1268 }
1269 if (count != 0) {
1270 if (oldVal != null)
1271 return oldVal;
1272 break;
1273 }
1274 }
1275 else
1276 tab = (Node[])fk;
1277 }
1278 else if ((fh & LOCKED) != 0) {
1279 checkForResize();
1280 f.tryAwaitLock(tab, i);
1281 }
1282 else if (f.casHash(fh, fh | LOCKED)) {
1283 Object oldVal = null;
1284 try { // needed in case equals() throws
1285 if (tabAt(tab, i) == f) {
1286 count = 1;
1287 for (Node e = f;; ++count) {
1288 Object ek, ev;
1289 if ((e.hash & HASH_BITS) == h &&
1290 (ev = e.val) != null &&
1291 ((ek = e.key) == k || k.equals(ek))) {
1292 oldVal = ev;
1293 e.val = v;
1294 break;
1295 }
1296 Node last = e;
1297 if ((e = e.next) == null) {
1298 last.next = new Node(h, k, v, null);
1299 if (count >= TREE_THRESHOLD)
1300 replaceWithTreeBin(tab, i, k);
1301 break;
1302 }
1303 }
1304 }
1305 } finally { // unlock and signal if needed
1306 if (!f.casHash(fh | LOCKED, fh)) {
1307 f.hash = fh;
1308 synchronized (f) { f.notifyAll(); };
1309 }
1310 }
1311 if (count != 0) {
1312 if (oldVal != null)
1313 return oldVal;
1314 if (tab.length <= 64)
1315 count = 2;
1316 break;
1317 }
1318 }
1319 }
1320 counter.add(1L);
1321 if (count > 1)
1322 checkForResize();
1323 return null;
1324 }
1325
1326 /** Implementation for putIfAbsent */
1327 private final Object internalPutIfAbsent(Object k, Object v) {
1328 int h = spread(k.hashCode());
1329 int count = 0;
1330 for (Node[] tab = table;;) {
1331 int i; Node f; int fh; Object fk, fv;
1332 if (tab == null)
1333 tab = initTable();
1334 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1335 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1336 break;
1337 }
1338 else if ((fh = f.hash) == MOVED) {
1339 if ((fk = f.key) instanceof TreeBin) {
1340 TreeBin t = (TreeBin)fk;
1341 Object oldVal = null;
1342 t.acquire(0);
1343 try {
1344 if (tabAt(tab, i) == f) {
1345 count = 2;
1346 TreeNode p = t.putTreeNode(h, k, v);
1347 if (p != null)
1348 oldVal = p.val;
1349 }
1350 } finally {
1351 t.release(0);
1352 }
1353 if (count != 0) {
1354 if (oldVal != null)
1355 return oldVal;
1356 break;
1357 }
1358 }
1359 else
1360 tab = (Node[])fk;
1361 }
1362 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1363 ((fk = f.key) == k || k.equals(fk)))
1364 return fv;
1365 else {
1366 Node g = f.next;
1367 if (g != null) { // at least 2 nodes -- search and maybe resize
1368 for (Node e = g;;) {
1369 Object ek, ev;
1370 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1371 ((ek = e.key) == k || k.equals(ek)))
1372 return ev;
1373 if ((e = e.next) == null) {
1374 checkForResize();
1375 break;
1376 }
1377 }
1378 }
1379 if (((fh = f.hash) & LOCKED) != 0) {
1380 checkForResize();
1381 f.tryAwaitLock(tab, i);
1382 }
1383 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1384 Object oldVal = null;
1385 try {
1386 if (tabAt(tab, i) == f) {
1387 count = 1;
1388 for (Node e = f;; ++count) {
1389 Object ek, ev;
1390 if ((e.hash & HASH_BITS) == h &&
1391 (ev = e.val) != null &&
1392 ((ek = e.key) == k || k.equals(ek))) {
1393 oldVal = ev;
1394 break;
1395 }
1396 Node last = e;
1397 if ((e = e.next) == null) {
1398 last.next = new Node(h, k, v, null);
1399 if (count >= TREE_THRESHOLD)
1400 replaceWithTreeBin(tab, i, k);
1401 break;
1402 }
1403 }
1404 }
1405 } finally {
1406 if (!f.casHash(fh | LOCKED, fh)) {
1407 f.hash = fh;
1408 synchronized (f) { f.notifyAll(); };
1409 }
1410 }
1411 if (count != 0) {
1412 if (oldVal != null)
1413 return oldVal;
1414 if (tab.length <= 64)
1415 count = 2;
1416 break;
1417 }
1418 }
1419 }
1420 }
1421 counter.add(1L);
1422 if (count > 1)
1423 checkForResize();
1424 return null;
1425 }
1426
1427 /** Implementation for computeIfAbsent */
1428 private final Object internalComputeIfAbsent(K k,
1429 Fun<? super K, ?> mf) {
1430 int h = spread(k.hashCode());
1431 Object val = null;
1432 int count = 0;
1433 for (Node[] tab = table;;) {
1434 Node f; int i, fh; Object fk, fv;
1435 if (tab == null)
1436 tab = initTable();
1437 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1438 Node node = new Node(fh = h | LOCKED, k, null, null);
1439 if (casTabAt(tab, i, null, node)) {
1440 count = 1;
1441 try {
1442 if ((val = mf.apply(k)) != null)
1443 node.val = val;
1444 } finally {
1445 if (val == null)
1446 setTabAt(tab, i, null);
1447 if (!node.casHash(fh, h)) {
1448 node.hash = h;
1449 synchronized (node) { node.notifyAll(); };
1450 }
1451 }
1452 }
1453 if (count != 0)
1454 break;
1455 }
1456 else if ((fh = f.hash) == MOVED) {
1457 if ((fk = f.key) instanceof TreeBin) {
1458 TreeBin t = (TreeBin)fk;
1459 boolean added = false;
1460 t.acquire(0);
1461 try {
1462 if (tabAt(tab, i) == f) {
1463 count = 1;
1464 TreeNode p = t.getTreeNode(h, k, t.root);
1465 if (p != null)
1466 val = p.val;
1467 else if ((val = mf.apply(k)) != null) {
1468 added = true;
1469 count = 2;
1470 t.putTreeNode(h, k, val);
1471 }
1472 }
1473 } finally {
1474 t.release(0);
1475 }
1476 if (count != 0) {
1477 if (!added)
1478 return val;
1479 break;
1480 }
1481 }
1482 else
1483 tab = (Node[])fk;
1484 }
1485 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1486 ((fk = f.key) == k || k.equals(fk)))
1487 return fv;
1488 else {
1489 Node g = f.next;
1490 if (g != null) {
1491 for (Node e = g;;) {
1492 Object ek, ev;
1493 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1494 ((ek = e.key) == k || k.equals(ek)))
1495 return ev;
1496 if ((e = e.next) == null) {
1497 checkForResize();
1498 break;
1499 }
1500 }
1501 }
1502 if (((fh = f.hash) & LOCKED) != 0) {
1503 checkForResize();
1504 f.tryAwaitLock(tab, i);
1505 }
1506 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1507 boolean added = false;
1508 try {
1509 if (tabAt(tab, i) == f) {
1510 count = 1;
1511 for (Node e = f;; ++count) {
1512 Object ek, ev;
1513 if ((e.hash & HASH_BITS) == h &&
1514 (ev = e.val) != null &&
1515 ((ek = e.key) == k || k.equals(ek))) {
1516 val = ev;
1517 break;
1518 }
1519 Node last = e;
1520 if ((e = e.next) == null) {
1521 if ((val = mf.apply(k)) != null) {
1522 added = true;
1523 last.next = new Node(h, k, val, null);
1524 if (count >= TREE_THRESHOLD)
1525 replaceWithTreeBin(tab, i, k);
1526 }
1527 break;
1528 }
1529 }
1530 }
1531 } finally {
1532 if (!f.casHash(fh | LOCKED, fh)) {
1533 f.hash = fh;
1534 synchronized (f) { f.notifyAll(); };
1535 }
1536 }
1537 if (count != 0) {
1538 if (!added)
1539 return val;
1540 if (tab.length <= 64)
1541 count = 2;
1542 break;
1543 }
1544 }
1545 }
1546 }
1547 if (val != null) {
1548 counter.add(1L);
1549 if (count > 1)
1550 checkForResize();
1551 }
1552 return val;
1553 }
1554
1555 /** Implementation for compute */
1556 @SuppressWarnings("unchecked") private final Object internalCompute
1557 (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1558 int h = spread(k.hashCode());
1559 Object val = null;
1560 int delta = 0;
1561 int count = 0;
1562 for (Node[] tab = table;;) {
1563 Node f; int i, fh; Object fk;
1564 if (tab == null)
1565 tab = initTable();
1566 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1567 if (onlyIfPresent)
1568 break;
1569 Node node = new Node(fh = h | LOCKED, k, null, null);
1570 if (casTabAt(tab, i, null, node)) {
1571 try {
1572 count = 1;
1573 if ((val = mf.apply(k, null)) != null) {
1574 node.val = val;
1575 delta = 1;
1576 }
1577 } finally {
1578 if (delta == 0)
1579 setTabAt(tab, i, null);
1580 if (!node.casHash(fh, h)) {
1581 node.hash = h;
1582 synchronized (node) { node.notifyAll(); };
1583 }
1584 }
1585 }
1586 if (count != 0)
1587 break;
1588 }
1589 else if ((fh = f.hash) == MOVED) {
1590 if ((fk = f.key) instanceof TreeBin) {
1591 TreeBin t = (TreeBin)fk;
1592 t.acquire(0);
1593 try {
1594 if (tabAt(tab, i) == f) {
1595 count = 1;
1596 TreeNode p = t.getTreeNode(h, k, t.root);
1597 Object pv = (p == null) ? null : p.val;
1598 if ((val = mf.apply(k, (V)pv)) != null) {
1599 if (p != null)
1600 p.val = val;
1601 else {
1602 count = 2;
1603 delta = 1;
1604 t.putTreeNode(h, k, val);
1605 }
1606 }
1607 else if (p != null) {
1608 delta = -1;
1609 t.deleteTreeNode(p);
1610 }
1611 }
1612 } finally {
1613 t.release(0);
1614 }
1615 if (count != 0)
1616 break;
1617 }
1618 else
1619 tab = (Node[])fk;
1620 }
1621 else if ((fh & LOCKED) != 0) {
1622 checkForResize();
1623 f.tryAwaitLock(tab, i);
1624 }
1625 else if (f.casHash(fh, fh | LOCKED)) {
1626 try {
1627 if (tabAt(tab, i) == f) {
1628 count = 1;
1629 for (Node e = f, pred = null;; ++count) {
1630 Object ek, ev;
1631 if ((e.hash & HASH_BITS) == h &&
1632 (ev = e.val) != null &&
1633 ((ek = e.key) == k || k.equals(ek))) {
1634 val = mf.apply(k, (V)ev);
1635 if (val != null)
1636 e.val = val;
1637 else {
1638 delta = -1;
1639 Node en = e.next;
1640 if (pred != null)
1641 pred.next = en;
1642 else
1643 setTabAt(tab, i, en);
1644 }
1645 break;
1646 }
1647 pred = e;
1648 if ((e = e.next) == null) {
1649 if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1650 pred.next = new Node(h, k, val, null);
1651 delta = 1;
1652 if (count >= TREE_THRESHOLD)
1653 replaceWithTreeBin(tab, i, k);
1654 }
1655 break;
1656 }
1657 }
1658 }
1659 } finally {
1660 if (!f.casHash(fh | LOCKED, fh)) {
1661 f.hash = fh;
1662 synchronized (f) { f.notifyAll(); };
1663 }
1664 }
1665 if (count != 0) {
1666 if (tab.length <= 64)
1667 count = 2;
1668 break;
1669 }
1670 }
1671 }
1672 if (delta != 0) {
1673 counter.add((long)delta);
1674 if (count > 1)
1675 checkForResize();
1676 }
1677 return val;
1678 }
1679
1680 /** Implementation for merge */
1681 @SuppressWarnings("unchecked") private final Object internalMerge
1682 (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1683 int h = spread(k.hashCode());
1684 Object val = null;
1685 int delta = 0;
1686 int count = 0;
1687 for (Node[] tab = table;;) {
1688 int i; Node f; int fh; Object fk, fv;
1689 if (tab == null)
1690 tab = initTable();
1691 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1692 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1693 delta = 1;
1694 val = v;
1695 break;
1696 }
1697 }
1698 else if ((fh = f.hash) == MOVED) {
1699 if ((fk = f.key) instanceof TreeBin) {
1700 TreeBin t = (TreeBin)fk;
1701 t.acquire(0);
1702 try {
1703 if (tabAt(tab, i) == f) {
1704 count = 1;
1705 TreeNode p = t.getTreeNode(h, k, t.root);
1706 val = (p == null) ? v : mf.apply((V)p.val, v);
1707 if (val != null) {
1708 if (p != null)
1709 p.val = val;
1710 else {
1711 count = 2;
1712 delta = 1;
1713 t.putTreeNode(h, k, val);
1714 }
1715 }
1716 else if (p != null) {
1717 delta = -1;
1718 t.deleteTreeNode(p);
1719 }
1720 }
1721 } finally {
1722 t.release(0);
1723 }
1724 if (count != 0)
1725 break;
1726 }
1727 else
1728 tab = (Node[])fk;
1729 }
1730 else if ((fh & LOCKED) != 0) {
1731 checkForResize();
1732 f.tryAwaitLock(tab, i);
1733 }
1734 else if (f.casHash(fh, fh | LOCKED)) {
1735 try {
1736 if (tabAt(tab, i) == f) {
1737 count = 1;
1738 for (Node e = f, pred = null;; ++count) {
1739 Object ek, ev;
1740 if ((e.hash & HASH_BITS) == h &&
1741 (ev = e.val) != null &&
1742 ((ek = e.key) == k || k.equals(ek))) {
1743 val = mf.apply(v, (V)ev);
1744 if (val != null)
1745 e.val = val;
1746 else {
1747 delta = -1;
1748 Node en = e.next;
1749 if (pred != null)
1750 pred.next = en;
1751 else
1752 setTabAt(tab, i, en);
1753 }
1754 break;
1755 }
1756 pred = e;
1757 if ((e = e.next) == null) {
1758 val = v;
1759 pred.next = new Node(h, k, val, null);
1760 delta = 1;
1761 if (count >= TREE_THRESHOLD)
1762 replaceWithTreeBin(tab, i, k);
1763 break;
1764 }
1765 }
1766 }
1767 } finally {
1768 if (!f.casHash(fh | LOCKED, fh)) {
1769 f.hash = fh;
1770 synchronized (f) { f.notifyAll(); };
1771 }
1772 }
1773 if (count != 0) {
1774 if (tab.length <= 64)
1775 count = 2;
1776 break;
1777 }
1778 }
1779 }
1780 if (delta != 0) {
1781 counter.add((long)delta);
1782 if (count > 1)
1783 checkForResize();
1784 }
1785 return val;
1786 }
1787
1788 /** Implementation for putAll */
1789 private final void internalPutAll(Map<?, ?> m) {
1790 tryPresize(m.size());
1791 long delta = 0L; // number of uncommitted additions
1792 boolean npe = false; // to throw exception on exit for nulls
1793 try { // to clean up counts on other exceptions
1794 for (Map.Entry<?, ?> entry : m.entrySet()) {
1795 Object k, v;
1796 if (entry == null || (k = entry.getKey()) == null ||
1797 (v = entry.getValue()) == null) {
1798 npe = true;
1799 break;
1800 }
1801 int h = spread(k.hashCode());
1802 for (Node[] tab = table;;) {
1803 int i; Node f; int fh; Object fk;
1804 if (tab == null)
1805 tab = initTable();
1806 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1807 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1808 ++delta;
1809 break;
1810 }
1811 }
1812 else if ((fh = f.hash) == MOVED) {
1813 if ((fk = f.key) instanceof TreeBin) {
1814 TreeBin t = (TreeBin)fk;
1815 boolean validated = false;
1816 t.acquire(0);
1817 try {
1818 if (tabAt(tab, i) == f) {
1819 validated = true;
1820 TreeNode p = t.getTreeNode(h, k, t.root);
1821 if (p != null)
1822 p.val = v;
1823 else {
1824 t.putTreeNode(h, k, v);
1825 ++delta;
1826 }
1827 }
1828 } finally {
1829 t.release(0);
1830 }
1831 if (validated)
1832 break;
1833 }
1834 else
1835 tab = (Node[])fk;
1836 }
1837 else if ((fh & LOCKED) != 0) {
1838 counter.add(delta);
1839 delta = 0L;
1840 checkForResize();
1841 f.tryAwaitLock(tab, i);
1842 }
1843 else if (f.casHash(fh, fh | LOCKED)) {
1844 int count = 0;
1845 try {
1846 if (tabAt(tab, i) == f) {
1847 count = 1;
1848 for (Node e = f;; ++count) {
1849 Object ek, ev;
1850 if ((e.hash & HASH_BITS) == h &&
1851 (ev = e.val) != null &&
1852 ((ek = e.key) == k || k.equals(ek))) {
1853 e.val = v;
1854 break;
1855 }
1856 Node last = e;
1857 if ((e = e.next) == null) {
1858 ++delta;
1859 last.next = new Node(h, k, v, null);
1860 if (count >= TREE_THRESHOLD)
1861 replaceWithTreeBin(tab, i, k);
1862 break;
1863 }
1864 }
1865 }
1866 } finally {
1867 if (!f.casHash(fh | LOCKED, fh)) {
1868 f.hash = fh;
1869 synchronized (f) { f.notifyAll(); };
1870 }
1871 }
1872 if (count != 0) {
1873 if (count > 1) {
1874 counter.add(delta);
1875 delta = 0L;
1876 checkForResize();
1877 }
1878 break;
1879 }
1880 }
1881 }
1882 }
1883 } finally {
1884 if (delta != 0)
1885 counter.add(delta);
1886 }
1887 if (npe)
1888 throw new NullPointerException();
1889 }
1890
1891 /* ---------------- Table Initialization and Resizing -------------- */
1892
1893 /**
1894 * Returns a power of two table size for the given desired capacity.
1895 * See Hackers Delight, sec 3.2
1896 */
1897 private static final int tableSizeFor(int c) {
1898 int n = c - 1;
1899 n |= n >>> 1;
1900 n |= n >>> 2;
1901 n |= n >>> 4;
1902 n |= n >>> 8;
1903 n |= n >>> 16;
1904 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
1905 }
1906
1907 /**
1908 * Initializes table, using the size recorded in sizeCtl.
1909 */
1910 private final Node[] initTable() {
1911 Node[] tab; int sc;
1912 while ((tab = table) == null) {
1913 if ((sc = sizeCtl) < 0)
1914 Thread.yield(); // lost initialization race; just spin
1915 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1916 try {
1917 if ((tab = table) == null) {
1918 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
1919 tab = table = new Node[n];
1920 sc = n - (n >>> 2);
1921 }
1922 } finally {
1923 sizeCtl = sc;
1924 }
1925 break;
1926 }
1927 }
1928 return tab;
1929 }
1930
1931 /**
1932 * If table is too small and not already resizing, creates next
1933 * table and transfers bins. Rechecks occupancy after a transfer
1934 * to see if another resize is already needed because resizings
1935 * are lagging additions.
1936 */
1937 private final void checkForResize() {
1938 Node[] tab; int n, sc;
1939 while ((tab = table) != null &&
1940 (n = tab.length) < MAXIMUM_CAPACITY &&
1941 (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
1942 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1943 try {
1944 if (tab == table) {
1945 table = rebuild(tab);
1946 sc = (n << 1) - (n >>> 1);
1947 }
1948 } finally {
1949 sizeCtl = sc;
1950 }
1951 }
1952 }
1953
1954 /**
1955 * Tries to presize table to accommodate the given number of elements.
1956 *
1957 * @param size number of elements (doesn't need to be perfectly accurate)
1958 */
1959 private final void tryPresize(int size) {
1960 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
1961 tableSizeFor(size + (size >>> 1) + 1);
1962 int sc;
1963 while ((sc = sizeCtl) >= 0) {
1964 Node[] tab = table; int n;
1965 if (tab == null || (n = tab.length) == 0) {
1966 n = (sc > c) ? sc : c;
1967 if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1968 try {
1969 if (table == tab) {
1970 table = new Node[n];
1971 sc = n - (n >>> 2);
1972 }
1973 } finally {
1974 sizeCtl = sc;
1975 }
1976 }
1977 }
1978 else if (c <= sc || n >= MAXIMUM_CAPACITY)
1979 break;
1980 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1981 try {
1982 if (table == tab) {
1983 table = rebuild(tab);
1984 sc = (n << 1) - (n >>> 1);
1985 }
1986 } finally {
1987 sizeCtl = sc;
1988 }
1989 }
1990 }
1991 }
1992
1993 /*
1994 * Moves and/or copies the nodes in each bin to new table. See
1995 * above for explanation.
1996 *
1997 * @return the new table
1998 */
1999 private static final Node[] rebuild(Node[] tab) {
2000 int n = tab.length;
2001 Node[] nextTab = new Node[n << 1];
2002 Node fwd = new Node(MOVED, nextTab, null, null);
2003 int[] buffer = null; // holds bins to revisit; null until needed
2004 Node rev = null; // reverse forwarder; null until needed
2005 int nbuffered = 0; // the number of bins in buffer list
2006 int bufferIndex = 0; // buffer index of current buffered bin
2007 int bin = n - 1; // current non-buffered bin or -1 if none
2008
2009 for (int i = bin;;) { // start upwards sweep
2010 int fh; Node f;
2011 if ((f = tabAt(tab, i)) == null) {
2012 if (bin >= 0) { // Unbuffered; no lock needed (or available)
2013 if (!casTabAt(tab, i, f, fwd))
2014 continue;
2015 }
2016 else { // transiently use a locked forwarding node
2017 Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2018 if (!casTabAt(tab, i, f, g))
2019 continue;
2020 setTabAt(nextTab, i, null);
2021 setTabAt(nextTab, i + n, null);
2022 setTabAt(tab, i, fwd);
2023 if (!g.casHash(MOVED|LOCKED, MOVED)) {
2024 g.hash = MOVED;
2025 synchronized (g) { g.notifyAll(); }
2026 }
2027 }
2028 }
2029 else if ((fh = f.hash) == MOVED) {
2030 Object fk = f.key;
2031 if (fk instanceof TreeBin) {
2032 TreeBin t = (TreeBin)fk;
2033 boolean validated = false;
2034 t.acquire(0);
2035 try {
2036 if (tabAt(tab, i) == f) {
2037 validated = true;
2038 splitTreeBin(nextTab, i, t);
2039 setTabAt(tab, i, fwd);
2040 }
2041 } finally {
2042 t.release(0);
2043 }
2044 if (!validated)
2045 continue;
2046 }
2047 }
2048 else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2049 boolean validated = false;
2050 try { // split to lo and hi lists; copying as needed
2051 if (tabAt(tab, i) == f) {
2052 validated = true;
2053 splitBin(nextTab, i, f);
2054 setTabAt(tab, i, fwd);
2055 }
2056 } finally {
2057 if (!f.casHash(fh | LOCKED, fh)) {
2058 f.hash = fh;
2059 synchronized (f) { f.notifyAll(); };
2060 }
2061 }
2062 if (!validated)
2063 continue;
2064 }
2065 else {
2066 if (buffer == null) // initialize buffer for revisits
2067 buffer = new int[TRANSFER_BUFFER_SIZE];
2068 if (bin < 0 && bufferIndex > 0) {
2069 int j = buffer[--bufferIndex];
2070 buffer[bufferIndex] = i;
2071 i = j; // swap with another bin
2072 continue;
2073 }
2074 if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2075 f.tryAwaitLock(tab, i);
2076 continue; // no other options -- block
2077 }
2078 if (rev == null) // initialize reverse-forwarder
2079 rev = new Node(MOVED, tab, null, null);
2080 if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2081 continue; // recheck before adding to list
2082 buffer[nbuffered++] = i;
2083 setTabAt(nextTab, i, rev); // install place-holders
2084 setTabAt(nextTab, i + n, rev);
2085 }
2086
2087 if (bin > 0)
2088 i = --bin;
2089 else if (buffer != null && nbuffered > 0) {
2090 bin = -1;
2091 i = buffer[bufferIndex = --nbuffered];
2092 }
2093 else
2094 return nextTab;
2095 }
2096 }
2097
2098 /**
2099 * Splits a normal bin with list headed by e into lo and hi parts;
2100 * installs in given table.
2101 */
2102 private static void splitBin(Node[] nextTab, int i, Node e) {
2103 int bit = nextTab.length >>> 1; // bit to split on
2104 int runBit = e.hash & bit;
2105 Node lastRun = e, lo = null, hi = null;
2106 for (Node p = e.next; p != null; p = p.next) {
2107 int b = p.hash & bit;
2108 if (b != runBit) {
2109 runBit = b;
2110 lastRun = p;
2111 }
2112 }
2113 if (runBit == 0)
2114 lo = lastRun;
2115 else
2116 hi = lastRun;
2117 for (Node p = e; p != lastRun; p = p.next) {
2118 int ph = p.hash & HASH_BITS;
2119 Object pk = p.key, pv = p.val;
2120 if ((ph & bit) == 0)
2121 lo = new Node(ph, pk, pv, lo);
2122 else
2123 hi = new Node(ph, pk, pv, hi);
2124 }
2125 setTabAt(nextTab, i, lo);
2126 setTabAt(nextTab, i + bit, hi);
2127 }
2128
2129 /**
2130 * Splits a tree bin into lo and hi parts; installs in given table.
2131 */
2132 private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2133 int bit = nextTab.length >>> 1;
2134 TreeBin lt = new TreeBin();
2135 TreeBin ht = new TreeBin();
2136 int lc = 0, hc = 0;
2137 for (Node e = t.first; e != null; e = e.next) {
2138 int h = e.hash & HASH_BITS;
2139 Object k = e.key, v = e.val;
2140 if ((h & bit) == 0) {
2141 ++lc;
2142 lt.putTreeNode(h, k, v);
2143 }
2144 else {
2145 ++hc;
2146 ht.putTreeNode(h, k, v);
2147 }
2148 }
2149 Node ln, hn; // throw away trees if too small
2150 if (lc <= (TREE_THRESHOLD >>> 1)) {
2151 ln = null;
2152 for (Node p = lt.first; p != null; p = p.next)
2153 ln = new Node(p.hash, p.key, p.val, ln);
2154 }
2155 else
2156 ln = new Node(MOVED, lt, null, null);
2157 setTabAt(nextTab, i, ln);
2158 if (hc <= (TREE_THRESHOLD >>> 1)) {
2159 hn = null;
2160 for (Node p = ht.first; p != null; p = p.next)
2161 hn = new Node(p.hash, p.key, p.val, hn);
2162 }
2163 else
2164 hn = new Node(MOVED, ht, null, null);
2165 setTabAt(nextTab, i + bit, hn);
2166 }
2167
2168 /**
2169 * Implementation for clear. Steps through each bin, removing all
2170 * nodes.
2171 */
2172 private final void internalClear() {
2173 long delta = 0L; // negative number of deletions
2174 int i = 0;
2175 Node[] tab = table;
2176 while (tab != null && i < tab.length) {
2177 int fh; Object fk;
2178 Node f = tabAt(tab, i);
2179 if (f == null)
2180 ++i;
2181 else if ((fh = f.hash) == MOVED) {
2182 if ((fk = f.key) instanceof TreeBin) {
2183 TreeBin t = (TreeBin)fk;
2184 t.acquire(0);
2185 try {
2186 if (tabAt(tab, i) == f) {
2187 for (Node p = t.first; p != null; p = p.next) {
2188 if (p.val != null) { // (currently always true)
2189 p.val = null;
2190 --delta;
2191 }
2192 }
2193 t.first = null;
2194 t.root = null;
2195 ++i;
2196 }
2197 } finally {
2198 t.release(0);
2199 }
2200 }
2201 else
2202 tab = (Node[])fk;
2203 }
2204 else if ((fh & LOCKED) != 0) {
2205 counter.add(delta); // opportunistically update count
2206 delta = 0L;
2207 f.tryAwaitLock(tab, i);
2208 }
2209 else if (f.casHash(fh, fh | LOCKED)) {
2210 try {
2211 if (tabAt(tab, i) == f) {
2212 for (Node e = f; e != null; e = e.next) {
2213 if (e.val != null) { // (currently always true)
2214 e.val = null;
2215 --delta;
2216 }
2217 }
2218 setTabAt(tab, i, null);
2219 ++i;
2220 }
2221 } finally {
2222 if (!f.casHash(fh | LOCKED, fh)) {
2223 f.hash = fh;
2224 synchronized (f) { f.notifyAll(); };
2225 }
2226 }
2227 }
2228 }
2229 if (delta != 0)
2230 counter.add(delta);
2231 }
2232
2233 /* ----------------Table Traversal -------------- */
2234
2235 /**
2236 * Encapsulates traversal for methods such as containsValue; also
2237 * serves as a base class for other iterators and bulk tasks.
2238 *
2239 * At each step, the iterator snapshots the key ("nextKey") and
2240 * value ("nextVal") of a valid node (i.e., one that, at point of
2241 * snapshot, has a non-null user value). Because val fields can
2242 * change (including to null, indicating deletion), field nextVal
2243 * might not be accurate at point of use, but still maintains the
2244 * weak consistency property of holding a value that was once
2245 * valid.
2246 *
2247 * Internal traversals directly access these fields, as in:
2248 * {@code while (it.advance() != null) { process(it.nextKey); }}
2249 *
2250 * Exported iterators must track whether the iterator has advanced
2251 * (in hasNext vs next) (by setting/checking/nulling field
2252 * nextVal), and then extract key, value, or key-value pairs as
2253 * return values of next().
2254 *
2255 * The iterator visits once each still-valid node that was
2256 * reachable upon iterator construction. It might miss some that
2257 * were added to a bin after the bin was visited, which is OK wrt
2258 * consistency guarantees. Maintaining this property in the face
2259 * of possible ongoing resizes requires a fair amount of
2260 * bookkeeping state that is difficult to optimize away amidst
2261 * volatile accesses. Even so, traversal maintains reasonable
2262 * throughput.
2263 *
2264 * Normally, iteration proceeds bin-by-bin traversing lists.
2265 * However, if the table has been resized, then all future steps
2266 * must traverse both the bin at the current index as well as at
2267 * (index + baseSize); and so on for further resizings. To
2268 * paranoically cope with potential sharing by users of iterators
2269 * across threads, iteration terminates if a bounds checks fails
2270 * for a table read.
2271 *
2272 * This class extends ForkJoinTask to streamline parallel
2273 * iteration in bulk operations (see BulkTask). This adds only an
2274 * int of space overhead, which is close enough to negligible in
2275 * cases where it is not needed to not worry about it. Because
2276 * ForkJoinTask is Serializable, but iterators need not be, we
2277 * need to add warning suppressions.
2278 */
2279 @SuppressWarnings("serial") static class Traverser<K,V,R> extends ForkJoinTask<R> {
2280 final ConcurrentHashMap<K, V> map;
2281 Node next; // the next entry to use
2282 Node last; // the last entry used
2283 Object nextKey; // cached key field of next
2284 Object nextVal; // cached val field of next
2285 Node[] tab; // current table; updated if resized
2286 int index; // index of bin to use next
2287 int baseIndex; // current index of initial table
2288 int baseLimit; // index bound for initial table
2289 int baseSize; // initial table size
2290
2291 /** Creates iterator for all entries in the table. */
2292 Traverser(ConcurrentHashMap<K, V> map) {
2293 this.map = map;
2294 }
2295
2296 /** Creates iterator for split() methods */
2297 Traverser(Traverser<K,V,?> it) {
2298 ConcurrentHashMap<K, V> m; Node[] t;
2299 if ((m = this.map = it.map) == null)
2300 t = null;
2301 else if ((t = it.tab) == null && // force parent tab initialization
2302 (t = it.tab = m.table) != null)
2303 it.baseLimit = it.baseSize = t.length;
2304 this.tab = t;
2305 this.baseSize = it.baseSize;
2306 it.baseLimit = this.index = this.baseIndex =
2307 ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1;
2308 }
2309
2310 /**
2311 * Advances next; returns nextVal or null if terminated.
2312 * See above for explanation.
2313 */
2314 final Object advance() {
2315 Node e = last = next;
2316 Object ev = null;
2317 outer: do {
2318 if (e != null) // advance past used/skipped node
2319 e = e.next;
2320 while (e == null) { // get to next non-null bin
2321 ConcurrentHashMap<K, V> m;
2322 Node[] t; int b, i, n; Object ek; // checks must use locals
2323 if ((t = tab) != null)
2324 n = t.length;
2325 else if ((m = map) != null && (t = tab = m.table) != null)
2326 n = baseLimit = baseSize = t.length;
2327 else
2328 break outer;
2329 if ((b = baseIndex) >= baseLimit ||
2330 (i = index) < 0 || i >= n)
2331 break outer;
2332 if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2333 if ((ek = e.key) instanceof TreeBin)
2334 e = ((TreeBin)ek).first;
2335 else {
2336 tab = (Node[])ek;
2337 continue; // restarts due to null val
2338 }
2339 } // visit upper slots if present
2340 index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2341 }
2342 nextKey = e.key;
2343 } while ((ev = e.val) == null); // skip deleted or special nodes
2344 next = e;
2345 return nextVal = ev;
2346 }
2347
2348 public final void remove() {
2349 if (nextVal == null && last == null)
2350 advance();
2351 Node e = last;
2352 if (e == null)
2353 throw new IllegalStateException();
2354 last = null;
2355 map.remove(e.key);
2356 }
2357
2358 public final boolean hasNext() {
2359 return nextVal != null || advance() != null;
2360 }
2361
2362 public final boolean hasMoreElements() { return hasNext(); }
2363 public final void setRawResult(Object x) { }
2364 public R getRawResult() { return null; }
2365 public boolean exec() { return true; }
2366 }
2367
2368 /* ---------------- Public operations -------------- */
2369
2370 /**
2371 * Creates a new, empty map with the default initial table size (16).
2372 */
2373 public ConcurrentHashMap() {
2374 this.counter = new LongAdder();
2375 }
2376
2377 /**
2378 * Creates a new, empty map with an initial table size
2379 * accommodating the specified number of elements without the need
2380 * to dynamically resize.
2381 *
2382 * @param initialCapacity The implementation performs internal
2383 * sizing to accommodate this many elements.
2384 * @throws IllegalArgumentException if the initial capacity of
2385 * elements is negative
2386 */
2387 public ConcurrentHashMap(int initialCapacity) {
2388 if (initialCapacity < 0)
2389 throw new IllegalArgumentException();
2390 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2391 MAXIMUM_CAPACITY :
2392 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2393 this.counter = new LongAdder();
2394 this.sizeCtl = cap;
2395 }
2396
2397 /**
2398 * Creates a new map with the same mappings as the given map.
2399 *
2400 * @param m the map
2401 */
2402 public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
2403 this.counter = new LongAdder();
2404 this.sizeCtl = DEFAULT_CAPACITY;
2405 internalPutAll(m);
2406 }
2407
2408 /**
2409 * Creates a new, empty map with an initial table size based on
2410 * the given number of elements ({@code initialCapacity}) and
2411 * initial table density ({@code loadFactor}).
2412 *
2413 * @param initialCapacity the initial capacity. The implementation
2414 * performs internal sizing to accommodate this many elements,
2415 * given the specified load factor.
2416 * @param loadFactor the load factor (table density) for
2417 * establishing the initial table size
2418 * @throws IllegalArgumentException if the initial capacity of
2419 * elements is negative or the load factor is nonpositive
2420 *
2421 * @since 1.6
2422 */
2423 public ConcurrentHashMap(int initialCapacity, float loadFactor) {
2424 this(initialCapacity, loadFactor, 1);
2425 }
2426
2427 /**
2428 * Creates a new, empty map with an initial table size based on
2429 * the given number of elements ({@code initialCapacity}), table
2430 * density ({@code loadFactor}), and number of concurrently
2431 * updating threads ({@code concurrencyLevel}).
2432 *
2433 * @param initialCapacity the initial capacity. The implementation
2434 * performs internal sizing to accommodate this many elements,
2435 * given the specified load factor.
2436 * @param loadFactor the load factor (table density) for
2437 * establishing the initial table size
2438 * @param concurrencyLevel the estimated number of concurrently
2439 * updating threads. The implementation may use this value as
2440 * a sizing hint.
2441 * @throws IllegalArgumentException if the initial capacity is
2442 * negative or the load factor or concurrencyLevel are
2443 * nonpositive
2444 */
2445 public ConcurrentHashMap(int initialCapacity,
2446 float loadFactor, int concurrencyLevel) {
2447 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2448 throw new IllegalArgumentException();
2449 if (initialCapacity < concurrencyLevel) // Use at least as many bins
2450 initialCapacity = concurrencyLevel; // as estimated threads
2451 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2452 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2453 MAXIMUM_CAPACITY : tableSizeFor((int)size);
2454 this.counter = new LongAdder();
2455 this.sizeCtl = cap;
2456 }
2457
2458 /**
2459 * {@inheritDoc}
2460 */
2461 public boolean isEmpty() {
2462 return counter.sum() <= 0L; // ignore transient negative values
2463 }
2464
2465 /**
2466 * {@inheritDoc}
2467 */
2468 public int size() {
2469 long n = counter.sum();
2470 return ((n < 0L) ? 0 :
2471 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2472 (int)n);
2473 }
2474
2475 /**
2476 * Returns the number of mappings. This method should be used
2477 * instead of {@link #size} because a ConcurrentHashMap may
2478 * contain more mappings than can be represented as an int. The
2479 * value returned is a snapshot; the actual count may differ if
2480 * there are ongoing concurrent insertions or removals.
2481 *
2482 * @return the number of mappings
2483 */
2484 public long mappingCount() {
2485 long n = counter.sum();
2486 return (n < 0L) ? 0L : n; // ignore transient negative values
2487 }
2488
2489 /**
2490 * Returns the value to which the specified key is mapped,
2491 * or {@code null} if this map contains no mapping for the key.
2492 *
2493 * <p>More formally, if this map contains a mapping from a key
2494 * {@code k} to a value {@code v} such that {@code key.equals(k)},
2495 * then this method returns {@code v}; otherwise it returns
2496 * {@code null}. (There can be at most one such mapping.)
2497 *
2498 * @throws NullPointerException if the specified key is null
2499 */
2500 @SuppressWarnings("unchecked") public V get(Object key) {
2501 if (key == null)
2502 throw new NullPointerException();
2503 return (V)internalGet(key);
2504 }
2505
2506 /**
2507 * Returns the value to which the specified key is mapped,
2508 * or the given defaultValue if this map contains no mapping for the key.
2509 *
2510 * @param key the key
2511 * @param defaultValue the value to return if this map contains
2512 * no mapping for the given key
2513 * @return the mapping for the key, if present; else the defaultValue
2514 * @throws NullPointerException if the specified key is null
2515 */
2516 @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2517 if (key == null)
2518 throw new NullPointerException();
2519 V v = (V) internalGet(key);
2520 return v == null ? defaultValue : v;
2521 }
2522
2523 /**
2524 * Tests if the specified object is a key in this table.
2525 *
2526 * @param key possible key
2527 * @return {@code true} if and only if the specified object
2528 * is a key in this table, as determined by the
2529 * {@code equals} method; {@code false} otherwise
2530 * @throws NullPointerException if the specified key is null
2531 */
2532 public boolean containsKey(Object key) {
2533 if (key == null)
2534 throw new NullPointerException();
2535 return internalGet(key) != null;
2536 }
2537
2538 /**
2539 * Returns {@code true} if this map maps one or more keys to the
2540 * specified value. Note: This method may require a full traversal
2541 * of the map, and is much slower than method {@code containsKey}.
2542 *
2543 * @param value value whose presence in this map is to be tested
2544 * @return {@code true} if this map maps one or more keys to the
2545 * specified value
2546 * @throws NullPointerException if the specified value is null
2547 */
2548 public boolean containsValue(Object value) {
2549 if (value == null)
2550 throw new NullPointerException();
2551 Object v;
2552 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2553 while ((v = it.advance()) != null) {
2554 if (v == value || value.equals(v))
2555 return true;
2556 }
2557 return false;
2558 }
2559
2560 /**
2561 * Legacy method testing if some key maps into the specified value
2562 * in this table. This method is identical in functionality to
2563 * {@link #containsValue}, and exists solely to ensure
2564 * full compatibility with class {@link java.util.Hashtable},
2565 * which supported this method prior to introduction of the
2566 * Java Collections framework.
2567 *
2568 * @param value a value to search for
2569 * @return {@code true} if and only if some key maps to the
2570 * {@code value} argument in this table as
2571 * determined by the {@code equals} method;
2572 * {@code false} otherwise
2573 * @throws NullPointerException if the specified value is null
2574 */
2575 public boolean contains(Object value) {
2576 return containsValue(value);
2577 }
2578
2579 /**
2580 * Maps the specified key to the specified value in this table.
2581 * Neither the key nor the value can be null.
2582 *
2583 * <p> The value can be retrieved by calling the {@code get} method
2584 * with a key that is equal to the original key.
2585 *
2586 * @param key key with which the specified value is to be associated
2587 * @param value value to be associated with the specified key
2588 * @return the previous value associated with {@code key}, or
2589 * {@code null} if there was no mapping for {@code key}
2590 * @throws NullPointerException if the specified key or value is null
2591 */
2592 @SuppressWarnings("unchecked") public V put(K key, V value) {
2593 if (key == null || value == null)
2594 throw new NullPointerException();
2595 return (V)internalPut(key, value);
2596 }
2597
2598 /**
2599 * {@inheritDoc}
2600 *
2601 * @return the previous value associated with the specified key,
2602 * or {@code null} if there was no mapping for the key
2603 * @throws NullPointerException if the specified key or value is null
2604 */
2605 @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2606 if (key == null || value == null)
2607 throw new NullPointerException();
2608 return (V)internalPutIfAbsent(key, value);
2609 }
2610
2611 /**
2612 * Copies all of the mappings from the specified map to this one.
2613 * These mappings replace any mappings that this map had for any of the
2614 * keys currently in the specified map.
2615 *
2616 * @param m mappings to be stored in this map
2617 */
2618 public void putAll(Map<? extends K, ? extends V> m) {
2619 internalPutAll(m);
2620 }
2621
2622 /**
2623 * If the specified key is not already associated with a value,
2624 * computes its value using the given mappingFunction and enters
2625 * it into the map unless null. This is equivalent to
2626 * <pre> {@code
2627 * if (map.containsKey(key))
2628 * return map.get(key);
2629 * value = mappingFunction.apply(key);
2630 * if (value != null)
2631 * map.put(key, value);
2632 * return value;}</pre>
2633 *
2634 * except that the action is performed atomically. If the
2635 * function returns {@code null} no mapping is recorded. If the
2636 * function itself throws an (unchecked) exception, the exception
2637 * is rethrown to its caller, and no mapping is recorded. Some
2638 * attempted update operations on this map by other threads may be
2639 * blocked while computation is in progress, so the computation
2640 * should be short and simple, and must not attempt to update any
2641 * other mappings of this Map. The most appropriate usage is to
2642 * construct a new object serving as an initial mapped value, or
2643 * memoized result, as in:
2644 *
2645 * <pre> {@code
2646 * map.computeIfAbsent(key, new Fun<K, V>() {
2647 * public V map(K k) { return new Value(f(k)); }});}</pre>
2648 *
2649 * @param key key with which the specified value is to be associated
2650 * @param mappingFunction the function to compute a value
2651 * @return the current (existing or computed) value associated with
2652 * the specified key, or null if the computed value is null
2653 * @throws NullPointerException if the specified key or mappingFunction
2654 * is null
2655 * @throws IllegalStateException if the computation detectably
2656 * attempts a recursive update to this map that would
2657 * otherwise never complete
2658 * @throws RuntimeException or Error if the mappingFunction does so,
2659 * in which case the mapping is left unestablished
2660 */
2661 @SuppressWarnings("unchecked") public V computeIfAbsent
2662 (K key, Fun<? super K, ? extends V> mappingFunction) {
2663 if (key == null || mappingFunction == null)
2664 throw new NullPointerException();
2665 return (V)internalComputeIfAbsent(key, mappingFunction);
2666 }
2667
2668 /**
2669 * If the given key is present, computes a new mapping value given a key and
2670 * its current mapped value. This is equivalent to
2671 * <pre> {@code
2672 * if (map.containsKey(key)) {
2673 * value = remappingFunction.apply(key, map.get(key));
2674 * if (value != null)
2675 * map.put(key, value);
2676 * else
2677 * map.remove(key);
2678 * }
2679 * }</pre>
2680 *
2681 * except that the action is performed atomically. If the
2682 * function returns {@code null}, the mapping is removed. If the
2683 * function itself throws an (unchecked) exception, the exception
2684 * is rethrown to its caller, and the current mapping is left
2685 * unchanged. Some attempted update operations on this map by
2686 * other threads may be blocked while computation is in progress,
2687 * so the computation should be short and simple, and must not
2688 * attempt to update any other mappings of this Map. For example,
2689 * to either create or append new messages to a value mapping:
2690 *
2691 * @param key key with which the specified value is to be associated
2692 * @param remappingFunction the function to compute a value
2693 * @return the new value associated with the specified key, or null if none
2694 * @throws NullPointerException if the specified key or remappingFunction
2695 * is null
2696 * @throws IllegalStateException if the computation detectably
2697 * attempts a recursive update to this map that would
2698 * otherwise never complete
2699 * @throws RuntimeException or Error if the remappingFunction does so,
2700 * in which case the mapping is unchanged
2701 */
2702 @SuppressWarnings("unchecked") public V computeIfPresent
2703 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2704 if (key == null || remappingFunction == null)
2705 throw new NullPointerException();
2706 return (V)internalCompute(key, true, remappingFunction);
2707 }
2708
2709 /**
2710 * Computes a new mapping value given a key and
2711 * its current mapped value (or {@code null} if there is no current
2712 * mapping). This is equivalent to
2713 * <pre> {@code
2714 * value = remappingFunction.apply(key, map.get(key));
2715 * if (value != null)
2716 * map.put(key, value);
2717 * else
2718 * map.remove(key);
2719 * }</pre>
2720 *
2721 * except that the action is performed atomically. If the
2722 * function returns {@code null}, the mapping is removed. If the
2723 * function itself throws an (unchecked) exception, the exception
2724 * is rethrown to its caller, and the current mapping is left
2725 * unchanged. Some attempted update operations on this map by
2726 * other threads may be blocked while computation is in progress,
2727 * so the computation should be short and simple, and must not
2728 * attempt to update any other mappings of this Map. For example,
2729 * to either create or append new messages to a value mapping:
2730 *
2731 * <pre> {@code
2732 * Map<Key, String> map = ...;
2733 * final String msg = ...;
2734 * map.compute(key, new BiFun<Key, String, String>() {
2735 * public String apply(Key k, String v) {
2736 * return (v == null) ? msg : v + msg;});}}</pre>
2737 *
2738 * @param key key with which the specified value is to be associated
2739 * @param remappingFunction the function to compute a value
2740 * @return the new value associated with the specified key, or null if none
2741 * @throws NullPointerException if the specified key or remappingFunction
2742 * is null
2743 * @throws IllegalStateException if the computation detectably
2744 * attempts a recursive update to this map that would
2745 * otherwise never complete
2746 * @throws RuntimeException or Error if the remappingFunction does so,
2747 * in which case the mapping is unchanged
2748 */
2749 @SuppressWarnings("unchecked") public V compute
2750 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2751 if (key == null || remappingFunction == null)
2752 throw new NullPointerException();
2753 return (V)internalCompute(key, false, remappingFunction);
2754 }
2755
2756 /**
2757 * If the specified key is not already associated
2758 * with a value, associate it with the given value.
2759 * Otherwise, replace the value with the results of
2760 * the given remapping function. This is equivalent to:
2761 * <pre> {@code
2762 * if (!map.containsKey(key))
2763 * map.put(value);
2764 * else {
2765 * newValue = remappingFunction.apply(map.get(key), value);
2766 * if (value != null)
2767 * map.put(key, value);
2768 * else
2769 * map.remove(key);
2770 * }
2771 * }</pre>
2772 * except that the action is performed atomically. If the
2773 * function returns {@code null}, the mapping is removed. If the
2774 * function itself throws an (unchecked) exception, the exception
2775 * is rethrown to its caller, and the current mapping is left
2776 * unchanged. Some attempted update operations on this map by
2777 * other threads may be blocked while computation is in progress,
2778 * so the computation should be short and simple, and must not
2779 * attempt to update any other mappings of this Map.
2780 */
2781 @SuppressWarnings("unchecked") public V merge
2782 (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2783 if (key == null || value == null || remappingFunction == null)
2784 throw new NullPointerException();
2785 return (V)internalMerge(key, value, remappingFunction);
2786 }
2787
2788 /**
2789 * Removes the key (and its corresponding value) from this map.
2790 * This method does nothing if the key is not in the map.
2791 *
2792 * @param key the key that needs to be removed
2793 * @return the previous value associated with {@code key}, or
2794 * {@code null} if there was no mapping for {@code key}
2795 * @throws NullPointerException if the specified key is null
2796 */
2797 @SuppressWarnings("unchecked") public V remove(Object key) {
2798 if (key == null)
2799 throw new NullPointerException();
2800 return (V)internalReplace(key, null, null);
2801 }
2802
2803 /**
2804 * {@inheritDoc}
2805 *
2806 * @throws NullPointerException if the specified key is null
2807 */
2808 public boolean remove(Object key, Object value) {
2809 if (key == null)
2810 throw new NullPointerException();
2811 if (value == null)
2812 return false;
2813 return internalReplace(key, null, value) != null;
2814 }
2815
2816 /**
2817 * {@inheritDoc}
2818 *
2819 * @throws NullPointerException if any of the arguments are null
2820 */
2821 public boolean replace(K key, V oldValue, V newValue) {
2822 if (key == null || oldValue == null || newValue == null)
2823 throw new NullPointerException();
2824 return internalReplace(key, newValue, oldValue) != null;
2825 }
2826
2827 /**
2828 * {@inheritDoc}
2829 *
2830 * @return the previous value associated with the specified key,
2831 * or {@code null} if there was no mapping for the key
2832 * @throws NullPointerException if the specified key or value is null
2833 */
2834 @SuppressWarnings("unchecked") public V replace(K key, V value) {
2835 if (key == null || value == null)
2836 throw new NullPointerException();
2837 return (V)internalReplace(key, value, null);
2838 }
2839
2840 /**
2841 * Removes all of the mappings from this map.
2842 */
2843 public void clear() {
2844 internalClear();
2845 }
2846
2847 /**
2848 * Returns a {@link Set} view of the keys contained in this map.
2849 * The set is backed by the map, so changes to the map are
2850 * reflected in the set, and vice-versa. The set supports element
2851 * removal, which removes the corresponding mapping from this map,
2852 * via the {@code Iterator.remove}, {@code Set.remove},
2853 * {@code removeAll}, {@code retainAll}, and {@code clear}
2854 * operations. It does not support the {@code add} or
2855 * {@code addAll} operations.
2856 *
2857 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2858 * that will never throw {@link ConcurrentModificationException},
2859 * and guarantees to traverse elements as they existed upon
2860 * construction of the iterator, and may (but is not guaranteed to)
2861 * reflect any modifications subsequent to construction.
2862 */
2863 public Set<K> keySet() {
2864 KeySet<K,V> ks = keySet;
2865 return (ks != null) ? ks : (keySet = new KeySet<K,V>(this));
2866 }
2867
2868 /**
2869 * Returns a {@link Collection} view of the values contained in this map.
2870 * The collection is backed by the map, so changes to the map are
2871 * reflected in the collection, and vice-versa. The collection
2872 * supports element removal, which removes the corresponding
2873 * mapping from this map, via the {@code Iterator.remove},
2874 * {@code Collection.remove}, {@code removeAll},
2875 * {@code retainAll}, and {@code clear} operations. It does not
2876 * support the {@code add} or {@code addAll} operations.
2877 *
2878 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2879 * that will never throw {@link ConcurrentModificationException},
2880 * and guarantees to traverse elements as they existed upon
2881 * construction of the iterator, and may (but is not guaranteed to)
2882 * reflect any modifications subsequent to construction.
2883 */
2884 public Collection<V> values() {
2885 Values<K,V> vs = values;
2886 return (vs != null) ? vs : (values = new Values<K,V>(this));
2887 }
2888
2889 /**
2890 * Returns a {@link Set} view of the mappings contained in this map.
2891 * The set is backed by the map, so changes to the map are
2892 * reflected in the set, and vice-versa. The set supports element
2893 * removal, which removes the corresponding mapping from the map,
2894 * via the {@code Iterator.remove}, {@code Set.remove},
2895 * {@code removeAll}, {@code retainAll}, and {@code clear}
2896 * operations. It does not support the {@code add} or
2897 * {@code addAll} operations.
2898 *
2899 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2900 * that will never throw {@link ConcurrentModificationException},
2901 * and guarantees to traverse elements as they existed upon
2902 * construction of the iterator, and may (but is not guaranteed to)
2903 * reflect any modifications subsequent to construction.
2904 */
2905 public Set<Map.Entry<K,V>> entrySet() {
2906 EntrySet<K,V> es = entrySet;
2907 return (es != null) ? es : (entrySet = new EntrySet<K,V>(this));
2908 }
2909
2910 /**
2911 * Returns an enumeration of the keys in this table.
2912 *
2913 * @return an enumeration of the keys in this table
2914 * @see #keySet()
2915 */
2916 public Enumeration<K> keys() {
2917 return new KeyIterator<K,V>(this);
2918 }
2919
2920 /**
2921 * Returns an enumeration of the values in this table.
2922 *
2923 * @return an enumeration of the values in this table
2924 * @see #values()
2925 */
2926 public Enumeration<V> elements() {
2927 return new ValueIterator<K,V>(this);
2928 }
2929
2930 /**
2931 * Returns a partitionable iterator of the keys in this map.
2932 *
2933 * @return a partitionable iterator of the keys in this map
2934 */
2935 public Spliterator<K> keySpliterator() {
2936 return new KeyIterator<K,V>(this);
2937 }
2938
2939 /**
2940 * Returns a partitionable iterator of the values in this map.
2941 *
2942 * @return a partitionable iterator of the values in this map
2943 */
2944 public Spliterator<V> valueSpliterator() {
2945 return new ValueIterator<K,V>(this);
2946 }
2947
2948 /**
2949 * Returns a partitionable iterator of the entries in this map.
2950 *
2951 * @return a partitionable iterator of the entries in this map
2952 */
2953 public Spliterator<Map.Entry<K,V>> entrySpliterator() {
2954 return new EntryIterator<K,V>(this);
2955 }
2956
2957 /**
2958 * Returns the hash code value for this {@link Map}, i.e.,
2959 * the sum of, for each key-value pair in the map,
2960 * {@code key.hashCode() ^ value.hashCode()}.
2961 *
2962 * @return the hash code value for this map
2963 */
2964 public int hashCode() {
2965 int h = 0;
2966 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2967 Object v;
2968 while ((v = it.advance()) != null) {
2969 h += it.nextKey.hashCode() ^ v.hashCode();
2970 }
2971 return h;
2972 }
2973
2974 /**
2975 * Returns a string representation of this map. The string
2976 * representation consists of a list of key-value mappings (in no
2977 * particular order) enclosed in braces ("{@code {}}"). Adjacent
2978 * mappings are separated by the characters {@code ", "} (comma
2979 * and space). Each key-value mapping is rendered as the key
2980 * followed by an equals sign ("{@code =}") followed by the
2981 * associated value.
2982 *
2983 * @return a string representation of this map
2984 */
2985 public String toString() {
2986 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2987 StringBuilder sb = new StringBuilder();
2988 sb.append('{');
2989 Object v;
2990 if ((v = it.advance()) != null) {
2991 for (;;) {
2992 Object k = it.nextKey;
2993 sb.append(k == this ? "(this Map)" : k);
2994 sb.append('=');
2995 sb.append(v == this ? "(this Map)" : v);
2996 if ((v = it.advance()) == null)
2997 break;
2998 sb.append(',').append(' ');
2999 }
3000 }
3001 return sb.append('}').toString();
3002 }
3003
3004 /**
3005 * Compares the specified object with this map for equality.
3006 * Returns {@code true} if the given object is a map with the same
3007 * mappings as this map. This operation may return misleading
3008 * results if either map is concurrently modified during execution
3009 * of this method.
3010 *
3011 * @param o object to be compared for equality with this map
3012 * @return {@code true} if the specified object is equal to this map
3013 */
3014 public boolean equals(Object o) {
3015 if (o != this) {
3016 if (!(o instanceof Map))
3017 return false;
3018 Map<?,?> m = (Map<?,?>) o;
3019 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3020 Object val;
3021 while ((val = it.advance()) != null) {
3022 Object v = m.get(it.nextKey);
3023 if (v == null || (v != val && !v.equals(val)))
3024 return false;
3025 }
3026 for (Map.Entry<?,?> e : m.entrySet()) {
3027 Object mk, mv, v;
3028 if ((mk = e.getKey()) == null ||
3029 (mv = e.getValue()) == null ||
3030 (v = internalGet(mk)) == null ||
3031 (mv != v && !mv.equals(v)))
3032 return false;
3033 }
3034 }
3035 return true;
3036 }
3037
3038 /* ----------------Iterators -------------- */
3039
3040 @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3041 implements Spliterator<K>, Enumeration<K> {
3042 KeyIterator(ConcurrentHashMap<K, V> map) { super(map); }
3043 KeyIterator(Traverser<K,V,Object> it) {
3044 super(it);
3045 }
3046 public KeyIterator<K,V> split() {
3047 if (last != null || (next != null && nextVal == null))
3048 throw new IllegalStateException();
3049 return new KeyIterator<K,V>(this);
3050 }
3051 @SuppressWarnings("unchecked") public final K next() {
3052 if (nextVal == null && advance() == null)
3053 throw new NoSuchElementException();
3054 Object k = nextKey;
3055 nextVal = null;
3056 return (K) k;
3057 }
3058
3059 public final K nextElement() { return next(); }
3060 }
3061
3062 @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3063 implements Spliterator<V>, Enumeration<V> {
3064 ValueIterator(ConcurrentHashMap<K, V> map) { super(map); }
3065 ValueIterator(Traverser<K,V,Object> it) {
3066 super(it);
3067 }
3068 public ValueIterator<K,V> split() {
3069 if (last != null || (next != null && nextVal == null))
3070 throw new IllegalStateException();
3071 return new ValueIterator<K,V>(this);
3072 }
3073
3074 @SuppressWarnings("unchecked") public final V next() {
3075 Object v;
3076 if ((v = nextVal) == null && (v = advance()) == null)
3077 throw new NoSuchElementException();
3078 nextVal = null;
3079 return (V) v;
3080 }
3081
3082 public final V nextElement() { return next(); }
3083 }
3084
3085 @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3086 implements Spliterator<Map.Entry<K,V>> {
3087 EntryIterator(ConcurrentHashMap<K, V> map) { super(map); }
3088 EntryIterator(Traverser<K,V,Object> it) {
3089 super(it);
3090 }
3091 public EntryIterator<K,V> split() {
3092 if (last != null || (next != null && nextVal == null))
3093 throw new IllegalStateException();
3094 return new EntryIterator<K,V>(this);
3095 }
3096
3097 @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3098 Object v;
3099 if ((v = nextVal) == null && (v = advance()) == null)
3100 throw new NoSuchElementException();
3101 Object k = nextKey;
3102 nextVal = null;
3103 return new MapEntry<K,V>((K)k, (V)v, map);
3104 }
3105 }
3106
3107 /**
3108 * Exported Entry for iterators
3109 */
3110 static final class MapEntry<K,V> implements Map.Entry<K, V> {
3111 final K key; // non-null
3112 V val; // non-null
3113 final ConcurrentHashMap<K, V> map;
3114 MapEntry(K key, V val, ConcurrentHashMap<K, V> map) {
3115 this.key = key;
3116 this.val = val;
3117 this.map = map;
3118 }
3119 public final K getKey() { return key; }
3120 public final V getValue() { return val; }
3121 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3122 public final String toString(){ return key + "=" + val; }
3123
3124 public final boolean equals(Object o) {
3125 Object k, v; Map.Entry<?,?> e;
3126 return ((o instanceof Map.Entry) &&
3127 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3128 (v = e.getValue()) != null &&
3129 (k == key || k.equals(key)) &&
3130 (v == val || v.equals(val)));
3131 }
3132
3133 /**
3134 * Sets our entry's value and writes through to the map. The
3135 * value to return is somewhat arbitrary here. Since we do not
3136 * necessarily track asynchronous changes, the most recent
3137 * "previous" value could be different from what we return (or
3138 * could even have been removed in which case the put will
3139 * re-establish). We do not and cannot guarantee more.
3140 */
3141 public final V setValue(V value) {
3142 if (value == null) throw new NullPointerException();
3143 V v = val;
3144 val = value;
3145 map.put(key, value);
3146 return v;
3147 }
3148 }
3149
3150 /* ----------------Views -------------- */
3151
3152 /**
3153 * Base class for views.
3154 */
3155 static abstract class CHMView<K, V> {
3156 final ConcurrentHashMap<K, V> map;
3157 CHMView(ConcurrentHashMap<K, V> map) { this.map = map; }
3158 public final int size() { return map.size(); }
3159 public final boolean isEmpty() { return map.isEmpty(); }
3160 public final void clear() { map.clear(); }
3161
3162 // implementations below rely on concrete classes supplying these
3163 abstract public Iterator<?> iterator();
3164 abstract public boolean contains(Object o);
3165 abstract public boolean remove(Object o);
3166
3167 private static final String oomeMsg = "Required array size too large";
3168
3169 public final Object[] toArray() {
3170 long sz = map.mappingCount();
3171 if (sz > (long)(MAX_ARRAY_SIZE))
3172 throw new OutOfMemoryError(oomeMsg);
3173 int n = (int)sz;
3174 Object[] r = new Object[n];
3175 int i = 0;
3176 Iterator<?> it = iterator();
3177 while (it.hasNext()) {
3178 if (i == n) {
3179 if (n >= MAX_ARRAY_SIZE)
3180 throw new OutOfMemoryError(oomeMsg);
3181 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3182 n = MAX_ARRAY_SIZE;
3183 else
3184 n += (n >>> 1) + 1;
3185 r = Arrays.copyOf(r, n);
3186 }
3187 r[i++] = it.next();
3188 }
3189 return (i == n) ? r : Arrays.copyOf(r, i);
3190 }
3191
3192 @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3193 long sz = map.mappingCount();
3194 if (sz > (long)(MAX_ARRAY_SIZE))
3195 throw new OutOfMemoryError(oomeMsg);
3196 int m = (int)sz;
3197 T[] r = (a.length >= m) ? a :
3198 (T[])java.lang.reflect.Array
3199 .newInstance(a.getClass().getComponentType(), m);
3200 int n = r.length;
3201 int i = 0;
3202 Iterator<?> it = iterator();
3203 while (it.hasNext()) {
3204 if (i == n) {
3205 if (n >= MAX_ARRAY_SIZE)
3206 throw new OutOfMemoryError(oomeMsg);
3207 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3208 n = MAX_ARRAY_SIZE;
3209 else
3210 n += (n >>> 1) + 1;
3211 r = Arrays.copyOf(r, n);
3212 }
3213 r[i++] = (T)it.next();
3214 }
3215 if (a == r && i < n) {
3216 r[i] = null; // null-terminate
3217 return r;
3218 }
3219 return (i == n) ? r : Arrays.copyOf(r, i);
3220 }
3221
3222 public final int hashCode() {
3223 int h = 0;
3224 for (Iterator<?> it = iterator(); it.hasNext();)
3225 h += it.next().hashCode();
3226 return h;
3227 }
3228
3229 public final String toString() {
3230 StringBuilder sb = new StringBuilder();
3231 sb.append('[');
3232 Iterator<?> it = iterator();
3233 if (it.hasNext()) {
3234 for (;;) {
3235 Object e = it.next();
3236 sb.append(e == this ? "(this Collection)" : e);
3237 if (!it.hasNext())
3238 break;
3239 sb.append(',').append(' ');
3240 }
3241 }
3242 return sb.append(']').toString();
3243 }
3244
3245 public final boolean containsAll(Collection<?> c) {
3246 if (c != this) {
3247 for (Iterator<?> it = c.iterator(); it.hasNext();) {
3248 Object e = it.next();
3249 if (e == null || !contains(e))
3250 return false;
3251 }
3252 }
3253 return true;
3254 }
3255
3256 public final boolean removeAll(Collection<?> c) {
3257 boolean modified = false;
3258 for (Iterator<?> it = iterator(); it.hasNext();) {
3259 if (c.contains(it.next())) {
3260 it.remove();
3261 modified = true;
3262 }
3263 }
3264 return modified;
3265 }
3266
3267 public final boolean retainAll(Collection<?> c) {
3268 boolean modified = false;
3269 for (Iterator<?> it = iterator(); it.hasNext();) {
3270 if (!c.contains(it.next())) {
3271 it.remove();
3272 modified = true;
3273 }
3274 }
3275 return modified;
3276 }
3277
3278 }
3279
3280 static final class KeySet<K,V> extends CHMView<K,V> implements Set<K> {
3281 KeySet(ConcurrentHashMap<K, V> map) {
3282 super(map);
3283 }
3284 public final boolean contains(Object o) { return map.containsKey(o); }
3285 public final boolean remove(Object o) { return map.remove(o) != null; }
3286 public final Iterator<K> iterator() {
3287 return new KeyIterator<K,V>(map);
3288 }
3289 public final boolean add(K e) {
3290 throw new UnsupportedOperationException();
3291 }
3292 public final boolean addAll(Collection<? extends K> c) {
3293 throw new UnsupportedOperationException();
3294 }
3295 public boolean equals(Object o) {
3296 Set<?> c;
3297 return ((o instanceof Set) &&
3298 ((c = (Set<?>)o) == this ||
3299 (containsAll(c) && c.containsAll(this))));
3300 }
3301 }
3302
3303
3304 static final class Values<K,V> extends CHMView<K,V>
3305 implements Collection<V> {
3306 Values(ConcurrentHashMap<K, V> map) { super(map); }
3307 public final boolean contains(Object o) { return map.containsValue(o); }
3308 public final boolean remove(Object o) {
3309 if (o != null) {
3310 Iterator<V> it = new ValueIterator<K,V>(map);
3311 while (it.hasNext()) {
3312 if (o.equals(it.next())) {
3313 it.remove();
3314 return true;
3315 }
3316 }
3317 }
3318 return false;
3319 }
3320 public final Iterator<V> iterator() {
3321 return new ValueIterator<K,V>(map);
3322 }
3323 public final boolean add(V e) {
3324 throw new UnsupportedOperationException();
3325 }
3326 public final boolean addAll(Collection<? extends V> c) {
3327 throw new UnsupportedOperationException();
3328 }
3329
3330 }
3331
3332 static final class EntrySet<K,V> extends CHMView<K,V>
3333 implements Set<Map.Entry<K,V>> {
3334 EntrySet(ConcurrentHashMap<K, V> map) { super(map); }
3335 public final boolean contains(Object o) {
3336 Object k, v, r; Map.Entry<?,?> e;
3337 return ((o instanceof Map.Entry) &&
3338 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3339 (r = map.get(k)) != null &&
3340 (v = e.getValue()) != null &&
3341 (v == r || v.equals(r)));
3342 }
3343 public final boolean remove(Object o) {
3344 Object k, v; Map.Entry<?,?> e;
3345 return ((o instanceof Map.Entry) &&
3346 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3347 (v = e.getValue()) != null &&
3348 map.remove(k, v));
3349 }
3350 public final Iterator<Map.Entry<K,V>> iterator() {
3351 return new EntryIterator<K,V>(map);
3352 }
3353 public final boolean add(Entry<K,V> e) {
3354 throw new UnsupportedOperationException();
3355 }
3356 public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3357 throw new UnsupportedOperationException();
3358 }
3359 public boolean equals(Object o) {
3360 Set<?> c;
3361 return ((o instanceof Set) &&
3362 ((c = (Set<?>)o) == this ||
3363 (containsAll(c) && c.containsAll(this))));
3364 }
3365 }
3366
3367 /* ---------------- Serialization Support -------------- */
3368
3369 /**
3370 * Stripped-down version of helper class used in previous version,
3371 * declared for the sake of serialization compatibility
3372 */
3373 static class Segment<K,V> implements Serializable {
3374 private static final long serialVersionUID = 2249069246763182397L;
3375 final float loadFactor;
3376 Segment(float lf) { this.loadFactor = lf; }
3377 }
3378
3379 /**
3380 * Saves the state of the {@code ConcurrentHashMap} instance to a
3381 * stream (i.e., serializes it).
3382 * @param s the stream
3383 * @serialData
3384 * the key (Object) and value (Object)
3385 * for each key-value mapping, followed by a null pair.
3386 * The key-value mappings are emitted in no particular order.
3387 */
3388 @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3389 throws java.io.IOException {
3390 if (segments == null) { // for serialization compatibility
3391 segments = (Segment<K,V>[])
3392 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3393 for (int i = 0; i < segments.length; ++i)
3394 segments[i] = new Segment<K,V>(LOAD_FACTOR);
3395 }
3396 s.defaultWriteObject();
3397 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3398 Object v;
3399 while ((v = it.advance()) != null) {
3400 s.writeObject(it.nextKey);
3401 s.writeObject(v);
3402 }
3403 s.writeObject(null);
3404 s.writeObject(null);
3405 segments = null; // throw away
3406 }
3407
3408 /**
3409 * Reconstitutes the instance from a stream (that is, deserializes it).
3410 * @param s the stream
3411 */
3412 @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3413 throws java.io.IOException, ClassNotFoundException {
3414 s.defaultReadObject();
3415 this.segments = null; // unneeded
3416 // initialize transient final field
3417 UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3418
3419 // Create all nodes, then place in table once size is known
3420 long size = 0L;
3421 Node p = null;
3422 for (;;) {
3423 K k = (K) s.readObject();
3424 V v = (V) s.readObject();
3425 if (k != null && v != null) {
3426 int h = spread(k.hashCode());
3427 p = new Node(h, k, v, p);
3428 ++size;
3429 }
3430 else
3431 break;
3432 }
3433 if (p != null) {
3434 boolean init = false;
3435 int n;
3436 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3437 n = MAXIMUM_CAPACITY;
3438 else {
3439 int sz = (int)size;
3440 n = tableSizeFor(sz + (sz >>> 1) + 1);
3441 }
3442 int sc = sizeCtl;
3443 boolean collide = false;
3444 if (n > sc &&
3445 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3446 try {
3447 if (table == null) {
3448 init = true;
3449 Node[] tab = new Node[n];
3450 int mask = n - 1;
3451 while (p != null) {
3452 int j = p.hash & mask;
3453 Node next = p.next;
3454 Node q = p.next = tabAt(tab, j);
3455 setTabAt(tab, j, p);
3456 if (!collide && q != null && q.hash == p.hash)
3457 collide = true;
3458 p = next;
3459 }
3460 table = tab;
3461 counter.add(size);
3462 sc = n - (n >>> 2);
3463 }
3464 } finally {
3465 sizeCtl = sc;
3466 }
3467 if (collide) { // rescan and convert to TreeBins
3468 Node[] tab = table;
3469 for (int i = 0; i < tab.length; ++i) {
3470 int c = 0;
3471 for (Node e = tabAt(tab, i); e != null; e = e.next) {
3472 if (++c > TREE_THRESHOLD &&
3473 (e.key instanceof Comparable)) {
3474 replaceWithTreeBin(tab, i, e.key);
3475 break;
3476 }
3477 }
3478 }
3479 }
3480 }
3481 if (!init) { // Can only happen if unsafely published.
3482 while (p != null) {
3483 internalPut(p.key, p.val);
3484 p = p.next;
3485 }
3486 }
3487 }
3488 }
3489
3490
3491 // -------------------------------------------------------
3492
3493 // Sams
3494 /** Interface describing a void action of one argument */
3495 public interface Action<A> { void apply(A a); }
3496 /** Interface describing a void action of two arguments */
3497 public interface BiAction<A,B> { void apply(A a, B b); }
3498 /** Interface describing a function of one argument */
3499 public interface Fun<A,T> { T apply(A a); }
3500 /** Interface describing a function of two arguments */
3501 public interface BiFun<A,B,T> { T apply(A a, B b); }
3502 /** Interface describing a function of no arguments */
3503 public interface Generator<T> { T apply(); }
3504 /** Interface describing a function mapping its argument to a double */
3505 public interface ObjectToDouble<A> { double apply(A a); }
3506 /** Interface describing a function mapping its argument to a long */
3507 public interface ObjectToLong<A> { long apply(A a); }
3508 /** Interface describing a function mapping its argument to an int */
3509 public interface ObjectToInt<A> {int apply(A a); }
3510 /** Interface describing a function mapping two arguments to a double */
3511 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3512 /** Interface describing a function mapping two arguments to a long */
3513 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3514 /** Interface describing a function mapping two arguments to an int */
3515 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3516 /** Interface describing a function mapping a double to a double */
3517 public interface DoubleToDouble { double apply(double a); }
3518 /** Interface describing a function mapping a long to a long */
3519 public interface LongToLong { long apply(long a); }
3520 /** Interface describing a function mapping an int to an int */
3521 public interface IntToInt { int apply(int a); }
3522 /** Interface describing a function mapping two doubles to a double */
3523 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3524 /** Interface describing a function mapping two longs to a long */
3525 public interface LongByLongToLong { long apply(long a, long b); }
3526 /** Interface describing a function mapping two ints to an int */
3527 public interface IntByIntToInt { int apply(int a, int b); }
3528
3529
3530 // -------------------------------------------------------
3531
3532 /**
3533 * Returns an extended {@link Parallel} view of this map using the
3534 * given executor for bulk parallel operations.
3535 *
3536 * @param executor the executor
3537 * @return a parallel view
3538 */
3539 public Parallel parallel(ForkJoinPool executor) {
3540 return new Parallel(executor);
3541 }
3542
3543 /**
3544 * An extended view of a ConcurrentHashMap supporting bulk
3545 * parallel operations. These operations are designed to be
3546 * safely, and often sensibly, applied even with maps that are
3547 * being concurrently updated by other threads; for example, when
3548 * computing a snapshot summary of the values in a shared
3549 * registry. There are three kinds of operation, each with four
3550 * forms, accepting functions with Keys, Values, Entries, and
3551 * (Key, Value) arguments and/or return values. Because the
3552 * elements of a ConcurrentHashMap are not ordered in any
3553 * particular way, and may be processed in different orders in
3554 * different parallel executions, the correctness of supplied
3555 * functions should not depend on any ordering, or on any other
3556 * objects or values that may transiently change while computation
3557 * is in progress; and except for forEach actions, should ideally
3558 * be side-effect-free.
3559 *
3560 * <ul>
3561 * <li> forEach: Perform a given action on each element.
3562 * A variant form applies a given transformation on each element
3563 * before performing the action.</li>
3564 *
3565 * <li> search: Return the first available non-null result of
3566 * applying a given function on each element; skipping further
3567 * search when a result is found.</li>
3568 *
3569 * <li> reduce: Accumulate each element. The supplied reduction
3570 * function cannot rely on ordering (more formally, it should be
3571 * both associative and commutative). There are five variants:
3572 *
3573 * <ul>
3574 *
3575 * <li> Plain reductions. (There is not a form of this method for
3576 * (key, value) function arguments since there is no corresponding
3577 * return type.)</li>
3578 *
3579 * <li> Mapped reductions that accumulate the results of a given
3580 * function applied to each element.</li>
3581 *
3582 * <li> Reductions to scalar doubles, longs, and ints, using a
3583 * given basis value.</li>
3584 *
3585 * </li>
3586 * </ul>
3587 * </ul>
3588 *
3589 * <p>The concurrency properties of the bulk operations follow
3590 * from those of ConcurrentHashMap: Any non-null result returned
3591 * from {@code get(key)} and related access methods bears a
3592 * happens-before relation with the associated insertion or
3593 * update. The result of any bulk operation reflects the
3594 * composition of these per-element relations (but is not
3595 * necessarily atomic with respect to the map as a whole unless it
3596 * is somehow known to be quiescent). Conversely, because keys
3597 * and values in the map are never null, null serves as a reliable
3598 * atomic indicator of the current lack of any result. To
3599 * maintain this property, null serves as an implicit basis for
3600 * all non-scalar reduction operations. For the double, long, and
3601 * int versions, the basis should be one that, when combined with
3602 * any other value, returns that other value (more formally, it
3603 * should be the identity element for the reduction). Most common
3604 * reductions have these properties; for example, computing a sum
3605 * with basis 0 or a minimum with basis MAX_VALUE.
3606 *
3607 * <p>Search and transformation functions provided as arguments
3608 * should similarly return null to indicate the lack of any result
3609 * (in which case it is not used). In the case of mapped
3610 * reductions, this also enables transformations to serve as
3611 * filters, returning null (or, in the case of primitive
3612 * specializations, the identity basis) if the element should not
3613 * be combined. You can create compound transformations and
3614 * filterings by composing them yourself under this "null means
3615 * there is nothing there now" rule before using them in search or
3616 * reduce operations.
3617 *
3618 * <p>Methods accepting and/or returning Entry arguments maintain
3619 * key-value associations. They may be useful for example when
3620 * finding the key for the greatest value. Note that "plain" Entry
3621 * arguments can be supplied using {@code new
3622 * AbstractMap.SimpleEntry(k,v)}.
3623 *
3624 * <p> Bulk operations may complete abruptly, throwing an
3625 * exception encountered in the application of a supplied
3626 * function. Bear in mind when handling such exceptions that other
3627 * concurrently executing functions could also have thrown
3628 * exceptions, or would have done so if the first exception had
3629 * not occurred.
3630 *
3631 * <p>Parallel speedups compared to sequential processing are
3632 * common but not guaranteed. Operations involving brief
3633 * functions on small maps may execute more slowly than sequential
3634 * loops if the underlying work to parallelize the computation is
3635 * more expensive than the computation itself. Similarly,
3636 * parallelization may not lead to much actual parallelism if all
3637 * processors are busy performing unrelated tasks.
3638 *
3639 * <p> All arguments to all task methods must be non-null.
3640 *
3641 * <p><em>jsr166e note: During transition, this class
3642 * uses nested functional interfaces with different names but the
3643 * same forms as those expected for JDK8.<em>
3644 */
3645 public class Parallel {
3646 final ForkJoinPool fjp;
3647
3648 /**
3649 * Returns an extended view of this map using the given
3650 * executor for bulk parallel operations.
3651 *
3652 * @param executor the executor
3653 */
3654 public Parallel(ForkJoinPool executor) {
3655 this.fjp = executor;
3656 }
3657
3658 /**
3659 * Performs the given action for each (key, value).
3660 *
3661 * @param action the action
3662 */
3663 public void forEach(BiAction<K,V> action) {
3664 fjp.invoke(ForkJoinTasks.forEach
3665 (ConcurrentHashMap.this, action));
3666 }
3667
3668 /**
3669 * Performs the given action for each non-null transformation
3670 * of each (key, value).
3671 *
3672 * @param transformer a function returning the transformation
3673 * for an element, or null if there is no transformation (in
3674 * which case the action is not applied)
3675 * @param action the action
3676 */
3677 public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3678 Action<U> action) {
3679 fjp.invoke(ForkJoinTasks.forEach
3680 (ConcurrentHashMap.this, transformer, action));
3681 }
3682
3683 /**
3684 * Returns a non-null result from applying the given search
3685 * function on each (key, value), or null if none. Upon
3686 * success, further element processing is suppressed and the
3687 * results of any other parallel invocations of the search
3688 * function are ignored.
3689 *
3690 * @param searchFunction a function returning a non-null
3691 * result on success, else null
3692 * @return a non-null result from applying the given search
3693 * function on each (key, value), or null if none
3694 */
3695 public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3696 return fjp.invoke(ForkJoinTasks.search
3697 (ConcurrentHashMap.this, searchFunction));
3698 }
3699
3700 /**
3701 * Returns the result of accumulating the given transformation
3702 * of all (key, value) pairs using the given reducer to
3703 * combine values, or null if none.
3704 *
3705 * @param transformer a function returning the transformation
3706 * for an element, or null if there is no transformation (in
3707 * which case it is not combined)
3708 * @param reducer a commutative associative combining function
3709 * @return the result of accumulating the given transformation
3710 * of all (key, value) pairs
3711 */
3712 public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3713 BiFun<? super U, ? super U, ? extends U> reducer) {
3714 return fjp.invoke(ForkJoinTasks.reduce
3715 (ConcurrentHashMap.this, transformer, reducer));
3716 }
3717
3718 /**
3719 * Returns the result of accumulating the given transformation
3720 * of all (key, value) pairs using the given reducer to
3721 * combine values, and the given basis as an identity value.
3722 *
3723 * @param transformer a function returning the transformation
3724 * for an element
3725 * @param basis the identity (initial default value) for the reduction
3726 * @param reducer a commutative associative combining function
3727 * @return the result of accumulating the given transformation
3728 * of all (key, value) pairs
3729 */
3730 public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3731 double basis,
3732 DoubleByDoubleToDouble reducer) {
3733 return fjp.invoke(ForkJoinTasks.reduceToDouble
3734 (ConcurrentHashMap.this, transformer, basis, reducer));
3735 }
3736
3737 /**
3738 * Returns the result of accumulating the given transformation
3739 * of all (key, value) pairs using the given reducer to
3740 * combine values, and the given basis as an identity value.
3741 *
3742 * @param transformer a function returning the transformation
3743 * for an element
3744 * @param basis the identity (initial default value) for the reduction
3745 * @param reducer a commutative associative combining function
3746 * @return the result of accumulating the given transformation
3747 * of all (key, value) pairs
3748 */
3749 public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3750 long basis,
3751 LongByLongToLong reducer) {
3752 return fjp.invoke(ForkJoinTasks.reduceToLong
3753 (ConcurrentHashMap.this, transformer, basis, reducer));
3754 }
3755
3756 /**
3757 * Returns the result of accumulating the given transformation
3758 * of all (key, value) pairs using the given reducer to
3759 * combine values, and the given basis as an identity value.
3760 *
3761 * @param transformer a function returning the transformation
3762 * for an element
3763 * @param basis the identity (initial default value) for the reduction
3764 * @param reducer a commutative associative combining function
3765 * @return the result of accumulating the given transformation
3766 * of all (key, value) pairs
3767 */
3768 public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3769 int basis,
3770 IntByIntToInt reducer) {
3771 return fjp.invoke(ForkJoinTasks.reduceToInt
3772 (ConcurrentHashMap.this, transformer, basis, reducer));
3773 }
3774
3775 /**
3776 * Performs the given action for each key.
3777 *
3778 * @param action the action
3779 */
3780 public void forEachKey(Action<K> action) {
3781 fjp.invoke(ForkJoinTasks.forEachKey
3782 (ConcurrentHashMap.this, action));
3783 }
3784
3785 /**
3786 * Performs the given action for each non-null transformation
3787 * of each key.
3788 *
3789 * @param transformer a function returning the transformation
3790 * for an element, or null if there is no transformation (in
3791 * which case the action is not applied)
3792 * @param action the action
3793 */
3794 public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3795 Action<U> action) {
3796 fjp.invoke(ForkJoinTasks.forEachKey
3797 (ConcurrentHashMap.this, transformer, action));
3798 }
3799
3800 /**
3801 * Returns a non-null result from applying the given search
3802 * function on each key, or null if none. Upon success,
3803 * further element processing is suppressed and the results of
3804 * any other parallel invocations of the search function are
3805 * ignored.
3806 *
3807 * @param searchFunction a function returning a non-null
3808 * result on success, else null
3809 * @return a non-null result from applying the given search
3810 * function on each key, or null if none
3811 */
3812 public <U> U searchKeys(Fun<? super K, ? extends U> searchFunction) {
3813 return fjp.invoke(ForkJoinTasks.searchKeys
3814 (ConcurrentHashMap.this, searchFunction));
3815 }
3816
3817 /**
3818 * Returns the result of accumulating all keys using the given
3819 * reducer to combine values, or null if none.
3820 *
3821 * @param reducer a commutative associative combining function
3822 * @return the result of accumulating all keys using the given
3823 * reducer to combine values, or null if none
3824 */
3825 public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3826 return fjp.invoke(ForkJoinTasks.reduceKeys
3827 (ConcurrentHashMap.this, reducer));
3828 }
3829
3830 /**
3831 * Returns the result of accumulating the given transformation
3832 * of all keys using the given reducer to combine values, or
3833 * null if none.
3834 *
3835 * @param transformer a function returning the transformation
3836 * for an element, or null if there is no transformation (in
3837 * which case it is not combined)
3838 * @param reducer a commutative associative combining function
3839 * @return the result of accumulating the given transformation
3840 * of all keys
3841 */
3842 public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3843 BiFun<? super U, ? super U, ? extends U> reducer) {
3844 return fjp.invoke(ForkJoinTasks.reduceKeys
3845 (ConcurrentHashMap.this, transformer, reducer));
3846 }
3847
3848 /**
3849 * Returns the result of accumulating the given transformation
3850 * of all keys using the given reducer to combine values, and
3851 * the given basis as an identity value.
3852 *
3853 * @param transformer a function returning the transformation
3854 * for an element
3855 * @param basis the identity (initial default value) for the reduction
3856 * @param reducer a commutative associative combining function
3857 * @return the result of accumulating the given transformation
3858 * of all keys
3859 */
3860 public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3861 double basis,
3862 DoubleByDoubleToDouble reducer) {
3863 return fjp.invoke(ForkJoinTasks.reduceKeysToDouble
3864 (ConcurrentHashMap.this, transformer, basis, reducer));
3865 }
3866
3867 /**
3868 * Returns the result of accumulating the given transformation
3869 * of all keys using the given reducer to combine values, and
3870 * the given basis as an identity value.
3871 *
3872 * @param transformer a function returning the transformation
3873 * for an element
3874 * @param basis the identity (initial default value) for the reduction
3875 * @param reducer a commutative associative combining function
3876 * @return the result of accumulating the given transformation
3877 * of all keys
3878 */
3879 public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3880 long basis,
3881 LongByLongToLong reducer) {
3882 return fjp.invoke(ForkJoinTasks.reduceKeysToLong
3883 (ConcurrentHashMap.this, transformer, basis, reducer));
3884 }
3885
3886 /**
3887 * Returns the result of accumulating the given transformation
3888 * of all keys using the given reducer to combine values, and
3889 * the given basis as an identity value.
3890 *
3891 * @param transformer a function returning the transformation
3892 * for an element
3893 * @param basis the identity (initial default value) for the reduction
3894 * @param reducer a commutative associative combining function
3895 * @return the result of accumulating the given transformation
3896 * of all keys
3897 */
3898 public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3899 int basis,
3900 IntByIntToInt reducer) {
3901 return fjp.invoke(ForkJoinTasks.reduceKeysToInt
3902 (ConcurrentHashMap.this, transformer, basis, reducer));
3903 }
3904
3905 /**
3906 * Performs the given action for each value.
3907 *
3908 * @param action the action
3909 */
3910 public void forEachValue(Action<V> action) {
3911 fjp.invoke(ForkJoinTasks.forEachValue
3912 (ConcurrentHashMap.this, action));
3913 }
3914
3915 /**
3916 * Performs the given action for each non-null transformation
3917 * of each value.
3918 *
3919 * @param transformer a function returning the transformation
3920 * for an element, or null if there is no transformation (in
3921 * which case the action is not applied)
3922 */
3923 public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3924 Action<U> action) {
3925 fjp.invoke(ForkJoinTasks.forEachValue
3926 (ConcurrentHashMap.this, transformer, action));
3927 }
3928
3929 /**
3930 * Returns a non-null result from applying the given search
3931 * function on each value, or null if none. Upon success,
3932 * further element processing is suppressed and the results of
3933 * any other parallel invocations of the search function are
3934 * ignored.
3935 *
3936 * @param searchFunction a function returning a non-null
3937 * result on success, else null
3938 * @return a non-null result from applying the given search
3939 * function on each value, or null if none
3940 */
3941 public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
3942 return fjp.invoke(ForkJoinTasks.searchValues
3943 (ConcurrentHashMap.this, searchFunction));
3944 }
3945
3946 /**
3947 * Returns the result of accumulating all values using the
3948 * given reducer to combine values, or null if none.
3949 *
3950 * @param reducer a commutative associative combining function
3951 * @return the result of accumulating all values
3952 */
3953 public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
3954 return fjp.invoke(ForkJoinTasks.reduceValues
3955 (ConcurrentHashMap.this, reducer));
3956 }
3957
3958 /**
3959 * Returns the result of accumulating the given transformation
3960 * of all values using the given reducer to combine values, or
3961 * null if none.
3962 *
3963 * @param transformer a function returning the transformation
3964 * for an element, or null if there is no transformation (in
3965 * which case it is not combined)
3966 * @param reducer a commutative associative combining function
3967 * @return the result of accumulating the given transformation
3968 * of all values
3969 */
3970 public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
3971 BiFun<? super U, ? super U, ? extends U> reducer) {
3972 return fjp.invoke(ForkJoinTasks.reduceValues
3973 (ConcurrentHashMap.this, transformer, reducer));
3974 }
3975
3976 /**
3977 * Returns the result of accumulating the given transformation
3978 * of all values using the given reducer to combine values,
3979 * and the given basis as an identity value.
3980 *
3981 * @param transformer a function returning the transformation
3982 * for an element
3983 * @param basis the identity (initial default value) for the reduction
3984 * @param reducer a commutative associative combining function
3985 * @return the result of accumulating the given transformation
3986 * of all values
3987 */
3988 public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
3989 double basis,
3990 DoubleByDoubleToDouble reducer) {
3991 return fjp.invoke(ForkJoinTasks.reduceValuesToDouble
3992 (ConcurrentHashMap.this, transformer, basis, reducer));
3993 }
3994
3995 /**
3996 * Returns the result of accumulating the given transformation
3997 * of all values using the given reducer to combine values,
3998 * and the given basis as an identity value.
3999 *
4000 * @param transformer a function returning the transformation
4001 * for an element
4002 * @param basis the identity (initial default value) for the reduction
4003 * @param reducer a commutative associative combining function
4004 * @return the result of accumulating the given transformation
4005 * of all values
4006 */
4007 public long reduceValuesToLong(ObjectToLong<? super V> transformer,
4008 long basis,
4009 LongByLongToLong reducer) {
4010 return fjp.invoke(ForkJoinTasks.reduceValuesToLong
4011 (ConcurrentHashMap.this, transformer, basis, reducer));
4012 }
4013
4014 /**
4015 * Returns the result of accumulating the given transformation
4016 * of all values using the given reducer to combine values,
4017 * and the given basis as an identity value.
4018 *
4019 * @param transformer a function returning the transformation
4020 * for an element
4021 * @param basis the identity (initial default value) for the reduction
4022 * @param reducer a commutative associative combining function
4023 * @return the result of accumulating the given transformation
4024 * of all values
4025 */
4026 public int reduceValuesToInt(ObjectToInt<? super V> transformer,
4027 int basis,
4028 IntByIntToInt reducer) {
4029 return fjp.invoke(ForkJoinTasks.reduceValuesToInt
4030 (ConcurrentHashMap.this, transformer, basis, reducer));
4031 }
4032
4033 /**
4034 * Performs the given action for each entry.
4035 *
4036 * @param action the action
4037 */
4038 public void forEachEntry(Action<Map.Entry<K,V>> action) {
4039 fjp.invoke(ForkJoinTasks.forEachEntry
4040 (ConcurrentHashMap.this, action));
4041 }
4042
4043 /**
4044 * Performs the given action for each non-null transformation
4045 * of each entry.
4046 *
4047 * @param transformer a function returning the transformation
4048 * for an element, or null if there is no transformation (in
4049 * which case the action is not applied)
4050 * @param action the action
4051 */
4052 public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
4053 Action<U> action) {
4054 fjp.invoke(ForkJoinTasks.forEachEntry
4055 (ConcurrentHashMap.this, transformer, action));
4056 }
4057
4058 /**
4059 * Returns a non-null result from applying the given search
4060 * function on each entry, or null if none. Upon success,
4061 * further element processing is suppressed and the results of
4062 * any other parallel invocations of the search function are
4063 * ignored.
4064 *
4065 * @param searchFunction a function returning a non-null
4066 * result on success, else null
4067 * @return a non-null result from applying the given search
4068 * function on each entry, or null if none
4069 */
4070 public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4071 return fjp.invoke(ForkJoinTasks.searchEntries
4072 (ConcurrentHashMap.this, searchFunction));
4073 }
4074
4075 /**
4076 * Returns the result of accumulating all entries using the
4077 * given reducer to combine values, or null if none.
4078 *
4079 * @param reducer a commutative associative combining function
4080 * @return the result of accumulating all entries
4081 */
4082 public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4083 return fjp.invoke(ForkJoinTasks.reduceEntries
4084 (ConcurrentHashMap.this, reducer));
4085 }
4086
4087 /**
4088 * Returns the result of accumulating the given transformation
4089 * of all entries using the given reducer to combine values,
4090 * or null if none.
4091 *
4092 * @param transformer a function returning the transformation
4093 * for an element, or null if there is no transformation (in
4094 * which case it is not combined).
4095 * @param reducer a commutative associative combining function
4096 * @return the result of accumulating the given transformation
4097 * of all entries
4098 */
4099 public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
4100 BiFun<? super U, ? super U, ? extends U> reducer) {
4101 return fjp.invoke(ForkJoinTasks.reduceEntries
4102 (ConcurrentHashMap.this, transformer, reducer));
4103 }
4104
4105 /**
4106 * Returns the result of accumulating the given transformation
4107 * of all entries using the given reducer to combine values,
4108 * and the given basis as an identity value.
4109 *
4110 * @param transformer a function returning the transformation
4111 * for an element
4112 * @param basis the identity (initial default value) for the reduction
4113 * @param reducer a commutative associative combining function
4114 * @return the result of accumulating the given transformation
4115 * of all entries
4116 */
4117 public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4118 double basis,
4119 DoubleByDoubleToDouble reducer) {
4120 return fjp.invoke(ForkJoinTasks.reduceEntriesToDouble
4121 (ConcurrentHashMap.this, transformer, basis, reducer));
4122 }
4123
4124 /**
4125 * Returns the result of accumulating the given transformation
4126 * of all entries using the given reducer to combine values,
4127 * and the given basis as an identity value.
4128 *
4129 * @param transformer a function returning the transformation
4130 * for an element
4131 * @param basis the identity (initial default value) for the reduction
4132 * @param reducer a commutative associative combining function
4133 * @return the result of accumulating the given transformation
4134 * of all entries
4135 */
4136 public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4137 long basis,
4138 LongByLongToLong reducer) {
4139 return fjp.invoke(ForkJoinTasks.reduceEntriesToLong
4140 (ConcurrentHashMap.this, transformer, basis, reducer));
4141 }
4142
4143 /**
4144 * Returns the result of accumulating the given transformation
4145 * of all entries using the given reducer to combine values,
4146 * and the given basis as an identity value.
4147 *
4148 * @param transformer a function returning the transformation
4149 * for an element
4150 * @param basis the identity (initial default value) for the reduction
4151 * @param reducer a commutative associative combining function
4152 * @return the result of accumulating the given transformation
4153 * of all entries
4154 */
4155 public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4156 int basis,
4157 IntByIntToInt reducer) {
4158 return fjp.invoke(ForkJoinTasks.reduceEntriesToInt
4159 (ConcurrentHashMap.this, transformer, basis, reducer));
4160 }
4161 }
4162
4163 // ---------------------------------------------------------------------
4164
4165 /**
4166 * Predefined tasks for performing bulk parallel operations on
4167 * ConcurrentHashMaps. These tasks follow the forms and rules used
4168 * in class {@link Parallel}. Each method has the same name, but
4169 * returns a task rather than invoking it. These methods may be
4170 * useful in custom applications such as submitting a task without
4171 * waiting for completion, or combining with other tasks.
4172 */
4173 public static class ForkJoinTasks {
4174 private ForkJoinTasks() {}
4175
4176 /**
4177 * Returns a task that when invoked, performs the given
4178 * action for each (key, value)
4179 *
4180 * @param map the map
4181 * @param action the action
4182 * @return the task
4183 */
4184 public static <K,V> ForkJoinTask<Void> forEach
4185 (ConcurrentHashMap<K,V> map,
4186 BiAction<K,V> action) {
4187 if (action == null) throw new NullPointerException();
4188 return new ForEachMappingTask<K,V>(map, null, -1, action);
4189 }
4190
4191 /**
4192 * Returns a task that when invoked, performs the given
4193 * action for each non-null transformation of each (key, value)
4194 *
4195 * @param map the map
4196 * @param transformer a function returning the transformation
4197 * for an element, or null if there is no transformation (in
4198 * which case the action is not applied)
4199 * @param action the action
4200 * @return the task
4201 */
4202 public static <K,V,U> ForkJoinTask<Void> forEach
4203 (ConcurrentHashMap<K,V> map,
4204 BiFun<? super K, ? super V, ? extends U> transformer,
4205 Action<U> action) {
4206 if (transformer == null || action == null)
4207 throw new NullPointerException();
4208 return new ForEachTransformedMappingTask<K,V,U>
4209 (map, null, -1, transformer, action);
4210 }
4211
4212 /**
4213 * Returns a task that when invoked, returns a non-null result
4214 * from applying the given search function on each (key,
4215 * value), or null if none. Upon success, further element
4216 * processing is suppressed and the results of any other
4217 * parallel invocations of the search function are ignored.
4218 *
4219 * @param map the map
4220 * @param searchFunction a function returning a non-null
4221 * result on success, else null
4222 * @return the task
4223 */
4224 public static <K,V,U> ForkJoinTask<U> search
4225 (ConcurrentHashMap<K,V> map,
4226 BiFun<? super K, ? super V, ? extends U> searchFunction) {
4227 if (searchFunction == null) throw new NullPointerException();
4228 return new SearchMappingsTask<K,V,U>
4229 (map, null, -1, searchFunction,
4230 new AtomicReference<U>());
4231 }
4232
4233 /**
4234 * Returns a task that when invoked, returns the result of
4235 * accumulating the given transformation of all (key, value) pairs
4236 * using the given reducer to combine values, or null if none.
4237 *
4238 * @param map the map
4239 * @param transformer a function returning the transformation
4240 * for an element, or null if there is no transformation (in
4241 * which case it is not combined).
4242 * @param reducer a commutative associative combining function
4243 * @return the task
4244 */
4245 public static <K,V,U> ForkJoinTask<U> reduce
4246 (ConcurrentHashMap<K,V> map,
4247 BiFun<? super K, ? super V, ? extends U> transformer,
4248 BiFun<? super U, ? super U, ? extends U> reducer) {
4249 if (transformer == null || reducer == null)
4250 throw new NullPointerException();
4251 return new MapReduceMappingsTask<K,V,U>
4252 (map, null, -1, null, transformer, reducer);
4253 }
4254
4255 /**
4256 * Returns a task that when invoked, returns the result of
4257 * accumulating the given transformation of all (key, value) pairs
4258 * using the given reducer to combine values, and the given
4259 * basis as an identity value.
4260 *
4261 * @param map the map
4262 * @param transformer a function returning the transformation
4263 * for an element
4264 * @param basis the identity (initial default value) for the reduction
4265 * @param reducer a commutative associative combining function
4266 * @return the task
4267 */
4268 public static <K,V> ForkJoinTask<Double> reduceToDouble
4269 (ConcurrentHashMap<K,V> map,
4270 ObjectByObjectToDouble<? super K, ? super V> transformer,
4271 double basis,
4272 DoubleByDoubleToDouble reducer) {
4273 if (transformer == null || reducer == null)
4274 throw new NullPointerException();
4275 return new MapReduceMappingsToDoubleTask<K,V>
4276 (map, null, -1, null, transformer, basis, reducer);
4277 }
4278
4279 /**
4280 * Returns a task that when invoked, returns the result of
4281 * accumulating the given transformation of all (key, value) pairs
4282 * using the given reducer to combine values, and the given
4283 * basis as an identity value.
4284 *
4285 * @param map the map
4286 * @param transformer a function returning the transformation
4287 * for an element
4288 * @param basis the identity (initial default value) for the reduction
4289 * @param reducer a commutative associative combining function
4290 * @return the task
4291 */
4292 public static <K,V> ForkJoinTask<Long> reduceToLong
4293 (ConcurrentHashMap<K,V> map,
4294 ObjectByObjectToLong<? super K, ? super V> transformer,
4295 long basis,
4296 LongByLongToLong reducer) {
4297 if (transformer == null || reducer == null)
4298 throw new NullPointerException();
4299 return new MapReduceMappingsToLongTask<K,V>
4300 (map, null, -1, null, transformer, basis, reducer);
4301 }
4302
4303 /**
4304 * Returns a task that when invoked, returns the result of
4305 * accumulating the given transformation of all (key, value) pairs
4306 * using the given reducer to combine values, and the given
4307 * basis as an identity value.
4308 *
4309 * @param transformer a function returning the transformation
4310 * for an element
4311 * @param basis the identity (initial default value) for the reduction
4312 * @param reducer a commutative associative combining function
4313 * @return the task
4314 */
4315 public static <K,V> ForkJoinTask<Integer> reduceToInt
4316 (ConcurrentHashMap<K,V> map,
4317 ObjectByObjectToInt<? super K, ? super V> transformer,
4318 int basis,
4319 IntByIntToInt reducer) {
4320 if (transformer == null || reducer == null)
4321 throw new NullPointerException();
4322 return new MapReduceMappingsToIntTask<K,V>
4323 (map, null, -1, null, transformer, basis, reducer);
4324 }
4325
4326 /**
4327 * Returns a task that when invoked, performs the given action
4328 * for each key.
4329 *
4330 * @param map the map
4331 * @param action the action
4332 * @return the task
4333 */
4334 public static <K,V> ForkJoinTask<Void> forEachKey
4335 (ConcurrentHashMap<K,V> map,
4336 Action<K> action) {
4337 if (action == null) throw new NullPointerException();
4338 return new ForEachKeyTask<K,V>(map, null, -1, action);
4339 }
4340
4341 /**
4342 * Returns a task that when invoked, performs the given action
4343 * for each non-null transformation of each key.
4344 *
4345 * @param map the map
4346 * @param transformer a function returning the transformation
4347 * for an element, or null if there is no transformation (in
4348 * which case the action is not applied)
4349 * @param action the action
4350 * @return the task
4351 */
4352 public static <K,V,U> ForkJoinTask<Void> forEachKey
4353 (ConcurrentHashMap<K,V> map,
4354 Fun<? super K, ? extends U> transformer,
4355 Action<U> action) {
4356 if (transformer == null || action == null)
4357 throw new NullPointerException();
4358 return new ForEachTransformedKeyTask<K,V,U>
4359 (map, null, -1, transformer, action);
4360 }
4361
4362 /**
4363 * Returns a task that when invoked, returns a non-null result
4364 * from applying the given search function on each key, or
4365 * null if none. Upon success, further element processing is
4366 * suppressed and the results of any other parallel
4367 * invocations of the search function are ignored.
4368 *
4369 * @param map the map
4370 * @param searchFunction a function returning a non-null
4371 * result on success, else null
4372 * @return the task
4373 */
4374 public static <K,V,U> ForkJoinTask<U> searchKeys
4375 (ConcurrentHashMap<K,V> map,
4376 Fun<? super K, ? extends U> searchFunction) {
4377 if (searchFunction == null) throw new NullPointerException();
4378 return new SearchKeysTask<K,V,U>
4379 (map, null, -1, searchFunction,
4380 new AtomicReference<U>());
4381 }
4382
4383 /**
4384 * Returns a task that when invoked, returns the result of
4385 * accumulating all keys using the given reducer to combine
4386 * values, or null if none.
4387 *
4388 * @param map the map
4389 * @param reducer a commutative associative combining function
4390 * @return the task
4391 */
4392 public static <K,V> ForkJoinTask<K> reduceKeys
4393 (ConcurrentHashMap<K,V> map,
4394 BiFun<? super K, ? super K, ? extends K> reducer) {
4395 if (reducer == null) throw new NullPointerException();
4396 return new ReduceKeysTask<K,V>
4397 (map, null, -1, null, reducer);
4398 }
4399
4400 /**
4401 * Returns a task that when invoked, returns the result of
4402 * accumulating the given transformation of all keys using the given
4403 * reducer to combine values, or null if none.
4404 *
4405 * @param map the map
4406 * @param transformer a function returning the transformation
4407 * for an element, or null if there is no transformation (in
4408 * which case it is not combined).
4409 * @param reducer a commutative associative combining function
4410 * @return the task
4411 */
4412 public static <K,V,U> ForkJoinTask<U> reduceKeys
4413 (ConcurrentHashMap<K,V> map,
4414 Fun<? super K, ? extends U> transformer,
4415 BiFun<? super U, ? super U, ? extends U> reducer) {
4416 if (transformer == null || reducer == null)
4417 throw new NullPointerException();
4418 return new MapReduceKeysTask<K,V,U>
4419 (map, null, -1, null, transformer, reducer);
4420 }
4421
4422 /**
4423 * Returns a task that when invoked, returns the result of
4424 * accumulating the given transformation of all keys using the given
4425 * reducer to combine values, and the given basis as an
4426 * identity value.
4427 *
4428 * @param map the map
4429 * @param transformer a function returning the transformation
4430 * for an element
4431 * @param basis the identity (initial default value) for the reduction
4432 * @param reducer a commutative associative combining function
4433 * @return the task
4434 */
4435 public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4436 (ConcurrentHashMap<K,V> map,
4437 ObjectToDouble<? super K> transformer,
4438 double basis,
4439 DoubleByDoubleToDouble reducer) {
4440 if (transformer == null || reducer == null)
4441 throw new NullPointerException();
4442 return new MapReduceKeysToDoubleTask<K,V>
4443 (map, null, -1, null, transformer, basis, reducer);
4444 }
4445
4446 /**
4447 * Returns a task that when invoked, returns the result of
4448 * accumulating the given transformation of all keys using the given
4449 * reducer to combine values, and the given basis as an
4450 * identity value.
4451 *
4452 * @param map the map
4453 * @param transformer a function returning the transformation
4454 * for an element
4455 * @param basis the identity (initial default value) for the reduction
4456 * @param reducer a commutative associative combining function
4457 * @return the task
4458 */
4459 public static <K,V> ForkJoinTask<Long> reduceKeysToLong
4460 (ConcurrentHashMap<K,V> map,
4461 ObjectToLong<? super K> transformer,
4462 long basis,
4463 LongByLongToLong reducer) {
4464 if (transformer == null || reducer == null)
4465 throw new NullPointerException();
4466 return new MapReduceKeysToLongTask<K,V>
4467 (map, null, -1, null, transformer, basis, reducer);
4468 }
4469
4470 /**
4471 * Returns a task that when invoked, returns the result of
4472 * accumulating the given transformation of all keys using the given
4473 * reducer to combine values, and the given basis as an
4474 * identity value.
4475 *
4476 * @param map the map
4477 * @param transformer a function returning the transformation
4478 * for an element
4479 * @param basis the identity (initial default value) for the reduction
4480 * @param reducer a commutative associative combining function
4481 * @return the task
4482 */
4483 public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
4484 (ConcurrentHashMap<K,V> map,
4485 ObjectToInt<? super K> transformer,
4486 int basis,
4487 IntByIntToInt reducer) {
4488 if (transformer == null || reducer == null)
4489 throw new NullPointerException();
4490 return new MapReduceKeysToIntTask<K,V>
4491 (map, null, -1, null, transformer, basis, reducer);
4492 }
4493
4494 /**
4495 * Returns a task that when invoked, performs the given action
4496 * for each value.
4497 *
4498 * @param map the map
4499 * @param action the action
4500 */
4501 public static <K,V> ForkJoinTask<Void> forEachValue
4502 (ConcurrentHashMap<K,V> map,
4503 Action<V> action) {
4504 if (action == null) throw new NullPointerException();
4505 return new ForEachValueTask<K,V>(map, null, -1, action);
4506 }
4507
4508 /**
4509 * Returns a task that when invoked, performs the given action
4510 * for each non-null transformation of each value.
4511 *
4512 * @param map the map
4513 * @param transformer a function returning the transformation
4514 * for an element, or null if there is no transformation (in
4515 * which case the action is not applied)
4516 * @param action the action
4517 */
4518 public static <K,V,U> ForkJoinTask<Void> forEachValue
4519 (ConcurrentHashMap<K,V> map,
4520 Fun<? super V, ? extends U> transformer,
4521 Action<U> action) {
4522 if (transformer == null || action == null)
4523 throw new NullPointerException();
4524 return new ForEachTransformedValueTask<K,V,U>
4525 (map, null, -1, transformer, action);
4526 }
4527
4528 /**
4529 * Returns a task that when invoked, returns a non-null result
4530 * from applying the given search function on each value, or
4531 * null if none. Upon success, further element processing is
4532 * suppressed and the results of any other parallel
4533 * invocations of the search function are ignored.
4534 *
4535 * @param map the map
4536 * @param searchFunction a function returning a non-null
4537 * result on success, else null
4538 * @return the task
4539 */
4540 public static <K,V,U> ForkJoinTask<U> searchValues
4541 (ConcurrentHashMap<K,V> map,
4542 Fun<? super V, ? extends U> searchFunction) {
4543 if (searchFunction == null) throw new NullPointerException();
4544 return new SearchValuesTask<K,V,U>
4545 (map, null, -1, searchFunction,
4546 new AtomicReference<U>());
4547 }
4548
4549 /**
4550 * Returns a task that when invoked, returns the result of
4551 * accumulating all values using the given reducer to combine
4552 * values, or null if none.
4553 *
4554 * @param map the map
4555 * @param reducer a commutative associative combining function
4556 * @return the task
4557 */
4558 public static <K,V> ForkJoinTask<V> reduceValues
4559 (ConcurrentHashMap<K,V> map,
4560 BiFun<? super V, ? super V, ? extends V> reducer) {
4561 if (reducer == null) throw new NullPointerException();
4562 return new ReduceValuesTask<K,V>
4563 (map, null, -1, null, reducer);
4564 }
4565
4566 /**
4567 * Returns a task that when invoked, returns the result of
4568 * accumulating the given transformation of all values using the
4569 * given reducer to combine values, or null if none.
4570 *
4571 * @param map the map
4572 * @param transformer a function returning the transformation
4573 * for an element, or null if there is no transformation (in
4574 * which case it is not combined).
4575 * @param reducer a commutative associative combining function
4576 * @return the task
4577 */
4578 public static <K,V,U> ForkJoinTask<U> reduceValues
4579 (ConcurrentHashMap<K,V> map,
4580 Fun<? super V, ? extends U> transformer,
4581 BiFun<? super U, ? super U, ? extends U> reducer) {
4582 if (transformer == null || reducer == null)
4583 throw new NullPointerException();
4584 return new MapReduceValuesTask<K,V,U>
4585 (map, null, -1, null, transformer, reducer);
4586 }
4587
4588 /**
4589 * Returns a task that when invoked, returns the result of
4590 * accumulating the given transformation of all values using the
4591 * given reducer to combine values, and the given basis as an
4592 * identity value.
4593 *
4594 * @param map the map
4595 * @param transformer a function returning the transformation
4596 * for an element
4597 * @param basis the identity (initial default value) for the reduction
4598 * @param reducer a commutative associative combining function
4599 * @return the task
4600 */
4601 public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
4602 (ConcurrentHashMap<K,V> map,
4603 ObjectToDouble<? super V> transformer,
4604 double basis,
4605 DoubleByDoubleToDouble reducer) {
4606 if (transformer == null || reducer == null)
4607 throw new NullPointerException();
4608 return new MapReduceValuesToDoubleTask<K,V>
4609 (map, null, -1, null, transformer, basis, reducer);
4610 }
4611
4612 /**
4613 * Returns a task that when invoked, returns the result of
4614 * accumulating the given transformation of all values using the
4615 * given reducer to combine values, and the given basis as an
4616 * identity value.
4617 *
4618 * @param map the map
4619 * @param transformer a function returning the transformation
4620 * for an element
4621 * @param basis the identity (initial default value) for the reduction
4622 * @param reducer a commutative associative combining function
4623 * @return the task
4624 */
4625 public static <K,V> ForkJoinTask<Long> reduceValuesToLong
4626 (ConcurrentHashMap<K,V> map,
4627 ObjectToLong<? super V> transformer,
4628 long basis,
4629 LongByLongToLong reducer) {
4630 if (transformer == null || reducer == null)
4631 throw new NullPointerException();
4632 return new MapReduceValuesToLongTask<K,V>
4633 (map, null, -1, null, transformer, basis, reducer);
4634 }
4635
4636 /**
4637 * Returns a task that when invoked, returns the result of
4638 * accumulating the given transformation of all values using the
4639 * given reducer to combine values, and the given basis as an
4640 * identity value.
4641 *
4642 * @param map the map
4643 * @param transformer a function returning the transformation
4644 * for an element
4645 * @param basis the identity (initial default value) for the reduction
4646 * @param reducer a commutative associative combining function
4647 * @return the task
4648 */
4649 public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
4650 (ConcurrentHashMap<K,V> map,
4651 ObjectToInt<? super V> transformer,
4652 int basis,
4653 IntByIntToInt reducer) {
4654 if (transformer == null || reducer == null)
4655 throw new NullPointerException();
4656 return new MapReduceValuesToIntTask<K,V>
4657 (map, null, -1, null, transformer, basis, reducer);
4658 }
4659
4660 /**
4661 * Returns a task that when invoked, perform the given action
4662 * for each entry.
4663 *
4664 * @param map the map
4665 * @param action the action
4666 */
4667 public static <K,V> ForkJoinTask<Void> forEachEntry
4668 (ConcurrentHashMap<K,V> map,
4669 Action<Map.Entry<K,V>> action) {
4670 if (action == null) throw new NullPointerException();
4671 return new ForEachEntryTask<K,V>(map, null, -1, action);
4672 }
4673
4674 /**
4675 * Returns a task that when invoked, perform the given action
4676 * for each non-null transformation of each entry.
4677 *
4678 * @param map the map
4679 * @param transformer a function returning the transformation
4680 * for an element, or null if there is no transformation (in
4681 * which case the action is not applied)
4682 * @param action the action
4683 */
4684 public static <K,V,U> ForkJoinTask<Void> forEachEntry
4685 (ConcurrentHashMap<K,V> map,
4686 Fun<Map.Entry<K,V>, ? extends U> transformer,
4687 Action<U> action) {
4688 if (transformer == null || action == null)
4689 throw new NullPointerException();
4690 return new ForEachTransformedEntryTask<K,V,U>
4691 (map, null, -1, transformer, action);
4692 }
4693
4694 /**
4695 * Returns a task that when invoked, returns a non-null result
4696 * from applying the given search function on each entry, or
4697 * null if none. Upon success, further element processing is
4698 * suppressed and the results of any other parallel
4699 * invocations of the search function are ignored.
4700 *
4701 * @param map the map
4702 * @param searchFunction a function returning a non-null
4703 * result on success, else null
4704 * @return the task
4705 */
4706 public static <K,V,U> ForkJoinTask<U> searchEntries
4707 (ConcurrentHashMap<K,V> map,
4708 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4709 if (searchFunction == null) throw new NullPointerException();
4710 return new SearchEntriesTask<K,V,U>
4711 (map, null, -1, searchFunction,
4712 new AtomicReference<U>());
4713 }
4714
4715 /**
4716 * Returns a task that when invoked, returns the result of
4717 * accumulating all entries using the given reducer to combine
4718 * values, or null if none.
4719 *
4720 * @param map the map
4721 * @param reducer a commutative associative combining function
4722 * @return the task
4723 */
4724 public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
4725 (ConcurrentHashMap<K,V> map,
4726 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4727 if (reducer == null) throw new NullPointerException();
4728 return new ReduceEntriesTask<K,V>
4729 (map, null, -1, null, reducer);
4730 }
4731
4732 /**
4733 * Returns a task that when invoked, returns the result of
4734 * accumulating the given transformation of all entries using the
4735 * given reducer to combine values, or null if none.
4736 *
4737 * @param map the map
4738 * @param transformer a function returning the transformation
4739 * for an element, or null if there is no transformation (in
4740 * which case it is not combined).
4741 * @param reducer a commutative associative combining function
4742 * @return the task
4743 */
4744 public static <K,V,U> ForkJoinTask<U> reduceEntries
4745 (ConcurrentHashMap<K,V> map,
4746 Fun<Map.Entry<K,V>, ? extends U> transformer,
4747 BiFun<? super U, ? super U, ? extends U> reducer) {
4748 if (transformer == null || reducer == null)
4749 throw new NullPointerException();
4750 return new MapReduceEntriesTask<K,V,U>
4751 (map, null, -1, null, transformer, reducer);
4752 }
4753
4754 /**
4755 * Returns a task that when invoked, returns the result of
4756 * accumulating the given transformation of all entries using the
4757 * given reducer to combine values, and the given basis as an
4758 * identity value.
4759 *
4760 * @param map the map
4761 * @param transformer a function returning the transformation
4762 * for an element
4763 * @param basis the identity (initial default value) for the reduction
4764 * @param reducer a commutative associative combining function
4765 * @return the task
4766 */
4767 public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
4768 (ConcurrentHashMap<K,V> map,
4769 ObjectToDouble<Map.Entry<K,V>> transformer,
4770 double basis,
4771 DoubleByDoubleToDouble reducer) {
4772 if (transformer == null || reducer == null)
4773 throw new NullPointerException();
4774 return new MapReduceEntriesToDoubleTask<K,V>
4775 (map, null, -1, null, transformer, basis, reducer);
4776 }
4777
4778 /**
4779 * Returns a task that when invoked, returns the result of
4780 * accumulating the given transformation of all entries using the
4781 * given reducer to combine values, and the given basis as an
4782 * identity value.
4783 *
4784 * @param map the map
4785 * @param transformer a function returning the transformation
4786 * for an element
4787 * @param basis the identity (initial default value) for the reduction
4788 * @param reducer a commutative associative combining function
4789 * @return the task
4790 */
4791 public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
4792 (ConcurrentHashMap<K,V> map,
4793 ObjectToLong<Map.Entry<K,V>> transformer,
4794 long basis,
4795 LongByLongToLong reducer) {
4796 if (transformer == null || reducer == null)
4797 throw new NullPointerException();
4798 return new MapReduceEntriesToLongTask<K,V>
4799 (map, null, -1, null, transformer, basis, reducer);
4800 }
4801
4802 /**
4803 * Returns a task that when invoked, returns the result of
4804 * accumulating the given transformation of all entries using the
4805 * given reducer to combine values, and the given basis as an
4806 * identity value.
4807 *
4808 * @param map the map
4809 * @param transformer a function returning the transformation
4810 * for an element
4811 * @param basis the identity (initial default value) for the reduction
4812 * @param reducer a commutative associative combining function
4813 * @return the task
4814 */
4815 public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
4816 (ConcurrentHashMap<K,V> map,
4817 ObjectToInt<Map.Entry<K,V>> transformer,
4818 int basis,
4819 IntByIntToInt reducer) {
4820 if (transformer == null || reducer == null)
4821 throw new NullPointerException();
4822 return new MapReduceEntriesToIntTask<K,V>
4823 (map, null, -1, null, transformer, basis, reducer);
4824 }
4825 }
4826
4827 // -------------------------------------------------------
4828
4829 /**
4830 * Base for FJ tasks for bulk operations. This adds a variant of
4831 * CountedCompleters and some split and merge bookkeeping to
4832 * iterator functionality. The forEach and reduce methods are
4833 * similar to those illustrated in CountedCompleter documentation,
4834 * except that bottom-up reduction completions perform them within
4835 * their compute methods. The search methods are like forEach
4836 * except they continually poll for success and exit early. Also,
4837 * exceptions are handled in a simpler manner, by just trying to
4838 * complete root task exceptionally.
4839 */
4840 @SuppressWarnings("serial") static abstract class BulkTask<K,V,R> extends Traverser<K,V,R> {
4841 final BulkTask<K,V,?> parent; // completion target
4842 int batch; // split control; -1 for unknown
4843 int pending; // completion control
4844
4845 BulkTask(ConcurrentHashMap<K,V> map, BulkTask<K,V,?> parent,
4846 int batch) {
4847 super(map);
4848 this.parent = parent;
4849 this.batch = batch;
4850 if (parent != null && map != null) { // split parent
4851 Node[] t;
4852 if ((t = parent.tab) == null &&
4853 (t = parent.tab = map.table) != null)
4854 parent.baseLimit = parent.baseSize = t.length;
4855 this.tab = t;
4856 this.baseSize = parent.baseSize;
4857 int hi = this.baseLimit = parent.baseLimit;
4858 parent.baseLimit = this.index = this.baseIndex =
4859 (hi + parent.baseIndex + 1) >>> 1;
4860 }
4861 }
4862
4863 // FJ methods
4864
4865 /**
4866 * Propagates completion. Note that all reduce actions
4867 * bypass this method to combine while completing.
4868 */
4869 final void tryComplete() {
4870 BulkTask<K,V,?> a = this, s = a;
4871 for (int c;;) {
4872 if ((c = a.pending) == 0) {
4873 if ((a = (s = a).parent) == null) {
4874 s.quietlyComplete();
4875 break;
4876 }
4877 }
4878 else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
4879 break;
4880 }
4881 }
4882
4883 /**
4884 * Forces root task to complete.
4885 * @param ex if null, complete normally, else exceptionally
4886 * @return false to simplify use
4887 */
4888 final boolean tryCompleteComputation(Throwable ex) {
4889 for (BulkTask<K,V,?> a = this;;) {
4890 BulkTask<K,V,?> p = a.parent;
4891 if (p == null) {
4892 if (ex != null)
4893 a.completeExceptionally(ex);
4894 else
4895 a.quietlyComplete();
4896 return false;
4897 }
4898 a = p;
4899 }
4900 }
4901
4902 /**
4903 * Version of tryCompleteComputation for function screening checks
4904 */
4905 final boolean abortOnNullFunction() {
4906 return tryCompleteComputation(new Error("Unexpected null function"));
4907 }
4908
4909 // utilities
4910
4911 /** CompareAndSet pending count */
4912 final boolean casPending(int cmp, int val) {
4913 return U.compareAndSwapInt(this, PENDING, cmp, val);
4914 }
4915
4916 /**
4917 * Returns approx exp2 of the number of times (minus one) to
4918 * split task by two before executing leaf action. This value
4919 * is faster to compute and more convenient to use as a guide
4920 * to splitting than is the depth, since it is used while
4921 * dividing by two anyway.
4922 */
4923 final int batch() {
4924 ConcurrentHashMap<K, V> m; int b; Node[] t;
4925 if ((b = batch) < 0 && (m = map) != null) { // force initialization
4926 if ((t = tab) == null && (t = tab = m.table) != null)
4927 baseLimit = baseSize = t.length;
4928 if (t != null) {
4929 long n = m.counter.sum();
4930 int sp = getPool().getParallelism() << 3; // slack of 8
4931 b = batch = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
4932 }
4933 }
4934 return b;
4935 }
4936
4937 /**
4938 * Returns exportable snapshot entry.
4939 */
4940 static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
4941 return new AbstractMap.SimpleEntry<K,V>(k, v);
4942 }
4943
4944 // Unsafe mechanics
4945 private static final sun.misc.Unsafe U;
4946 private static final long PENDING;
4947 static {
4948 try {
4949 U = sun.misc.Unsafe.getUnsafe();
4950 PENDING = U.objectFieldOffset
4951 (BulkTask.class.getDeclaredField("pending"));
4952 } catch (Exception e) {
4953 throw new Error(e);
4954 }
4955 }
4956 }
4957
4958 /*
4959 * Task classes. Coded in a regular but ugly format/style to
4960 * simplify checks that each variant differs in the right way from
4961 * others.
4962 */
4963
4964 @SuppressWarnings("serial") static final class ForEachKeyTask<K,V>
4965 extends BulkTask<K,V,Void> {
4966 final Action<K> action;
4967 ForEachKeyTask
4968 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
4969 Action<K> action) {
4970 super(m, p, b);
4971 this.action = action;
4972 }
4973 @SuppressWarnings("unchecked") public final boolean exec() {
4974 final Action<K> action = this.action;
4975 if (action == null)
4976 return abortOnNullFunction();
4977 try {
4978 int b = batch(), c;
4979 while (b > 1 && baseIndex != baseLimit) {
4980 do {} while (!casPending(c = pending, c+1));
4981 new ForEachKeyTask<K,V>(map, this, b >>>= 1, action).fork();
4982 }
4983 while (advance() != null)
4984 action.apply((K)nextKey);
4985 tryComplete();
4986 } catch (Throwable ex) {
4987 return tryCompleteComputation(ex);
4988 }
4989 return false;
4990 }
4991 }
4992
4993 @SuppressWarnings("serial") static final class ForEachValueTask<K,V>
4994 extends BulkTask<K,V,Void> {
4995 final Action<V> action;
4996 ForEachValueTask
4997 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
4998 Action<V> action) {
4999 super(m, p, b);
5000 this.action = action;
5001 }
5002 @SuppressWarnings("unchecked") public final boolean exec() {
5003 final Action<V> action = this.action;
5004 if (action == null)
5005 return abortOnNullFunction();
5006 try {
5007 int b = batch(), c;
5008 while (b > 1 && baseIndex != baseLimit) {
5009 do {} while (!casPending(c = pending, c+1));
5010 new ForEachValueTask<K,V>(map, this, b >>>= 1, action).fork();
5011 }
5012 Object v;
5013 while ((v = advance()) != null)
5014 action.apply((V)v);
5015 tryComplete();
5016 } catch (Throwable ex) {
5017 return tryCompleteComputation(ex);
5018 }
5019 return false;
5020 }
5021 }
5022
5023 @SuppressWarnings("serial") static final class ForEachEntryTask<K,V>
5024 extends BulkTask<K,V,Void> {
5025 final Action<Entry<K,V>> action;
5026 ForEachEntryTask
5027 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5028 Action<Entry<K,V>> action) {
5029 super(m, p, b);
5030 this.action = action;
5031 }
5032 @SuppressWarnings("unchecked") public final boolean exec() {
5033 final Action<Entry<K,V>> action = this.action;
5034 if (action == null)
5035 return abortOnNullFunction();
5036 try {
5037 int b = batch(), c;
5038 while (b > 1 && baseIndex != baseLimit) {
5039 do {} while (!casPending(c = pending, c+1));
5040 new ForEachEntryTask<K,V>(map, this, b >>>= 1, action).fork();
5041 }
5042 Object v;
5043 while ((v = advance()) != null)
5044 action.apply(entryFor((K)nextKey, (V)v));
5045 tryComplete();
5046 } catch (Throwable ex) {
5047 return tryCompleteComputation(ex);
5048 }
5049 return false;
5050 }
5051 }
5052
5053 @SuppressWarnings("serial") static final class ForEachMappingTask<K,V>
5054 extends BulkTask<K,V,Void> {
5055 final BiAction<K,V> action;
5056 ForEachMappingTask
5057 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5058 BiAction<K,V> action) {
5059 super(m, p, b);
5060 this.action = action;
5061 }
5062 @SuppressWarnings("unchecked") public final boolean exec() {
5063 final BiAction<K,V> action = this.action;
5064 if (action == null)
5065 return abortOnNullFunction();
5066 try {
5067 int b = batch(), c;
5068 while (b > 1 && baseIndex != baseLimit) {
5069 do {} while (!casPending(c = pending, c+1));
5070 new ForEachMappingTask<K,V>(map, this, b >>>= 1,
5071 action).fork();
5072 }
5073 Object v;
5074 while ((v = advance()) != null)
5075 action.apply((K)nextKey, (V)v);
5076 tryComplete();
5077 } catch (Throwable ex) {
5078 return tryCompleteComputation(ex);
5079 }
5080 return false;
5081 }
5082 }
5083
5084 @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U>
5085 extends BulkTask<K,V,Void> {
5086 final Fun<? super K, ? extends U> transformer;
5087 final Action<U> action;
5088 ForEachTransformedKeyTask
5089 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5090 Fun<? super K, ? extends U> transformer,
5091 Action<U> action) {
5092 super(m, p, b);
5093 this.transformer = transformer;
5094 this.action = action;
5095
5096 }
5097 @SuppressWarnings("unchecked") public final boolean exec() {
5098 final Fun<? super K, ? extends U> transformer =
5099 this.transformer;
5100 final Action<U> action = this.action;
5101 if (transformer == null || action == null)
5102 return abortOnNullFunction();
5103 try {
5104 int b = batch(), c;
5105 while (b > 1 && baseIndex != baseLimit) {
5106 do {} while (!casPending(c = pending, c+1));
5107 new ForEachTransformedKeyTask<K,V,U>
5108 (map, this, b >>>= 1, transformer, action).fork();
5109 }
5110 U u;
5111 while (advance() != null) {
5112 if ((u = transformer.apply((K)nextKey)) != null)
5113 action.apply(u);
5114 }
5115 tryComplete();
5116 } catch (Throwable ex) {
5117 return tryCompleteComputation(ex);
5118 }
5119 return false;
5120 }
5121 }
5122
5123 @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U>
5124 extends BulkTask<K,V,Void> {
5125 final Fun<? super V, ? extends U> transformer;
5126 final Action<U> action;
5127 ForEachTransformedValueTask
5128 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5129 Fun<? super V, ? extends U> transformer,
5130 Action<U> action) {
5131 super(m, p, b);
5132 this.transformer = transformer;
5133 this.action = action;
5134
5135 }
5136 @SuppressWarnings("unchecked") public final boolean exec() {
5137 final Fun<? super V, ? extends U> transformer =
5138 this.transformer;
5139 final Action<U> action = this.action;
5140 if (transformer == null || action == null)
5141 return abortOnNullFunction();
5142 try {
5143 int b = batch(), c;
5144 while (b > 1 && baseIndex != baseLimit) {
5145 do {} while (!casPending(c = pending, c+1));
5146 new ForEachTransformedValueTask<K,V,U>
5147 (map, this, b >>>= 1, transformer, action).fork();
5148 }
5149 Object v; U u;
5150 while ((v = advance()) != null) {
5151 if ((u = transformer.apply((V)v)) != null)
5152 action.apply(u);
5153 }
5154 tryComplete();
5155 } catch (Throwable ex) {
5156 return tryCompleteComputation(ex);
5157 }
5158 return false;
5159 }
5160 }
5161
5162 @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U>
5163 extends BulkTask<K,V,Void> {
5164 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5165 final Action<U> action;
5166 ForEachTransformedEntryTask
5167 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5168 Fun<Map.Entry<K,V>, ? extends U> transformer,
5169 Action<U> action) {
5170 super(m, p, b);
5171 this.transformer = transformer;
5172 this.action = action;
5173
5174 }
5175 @SuppressWarnings("unchecked") public final boolean exec() {
5176 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5177 this.transformer;
5178 final Action<U> action = this.action;
5179 if (transformer == null || action == null)
5180 return abortOnNullFunction();
5181 try {
5182 int b = batch(), c;
5183 while (b > 1 && baseIndex != baseLimit) {
5184 do {} while (!casPending(c = pending, c+1));
5185 new ForEachTransformedEntryTask<K,V,U>
5186 (map, this, b >>>= 1, transformer, action).fork();
5187 }
5188 Object v; U u;
5189 while ((v = advance()) != null) {
5190 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5191 action.apply(u);
5192 }
5193 tryComplete();
5194 } catch (Throwable ex) {
5195 return tryCompleteComputation(ex);
5196 }
5197 return false;
5198 }
5199 }
5200
5201 @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U>
5202 extends BulkTask<K,V,Void> {
5203 final BiFun<? super K, ? super V, ? extends U> transformer;
5204 final Action<U> action;
5205 ForEachTransformedMappingTask
5206 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5207 BiFun<? super K, ? super V, ? extends U> transformer,
5208 Action<U> action) {
5209 super(m, p, b);
5210 this.transformer = transformer;
5211 this.action = action;
5212
5213 }
5214 @SuppressWarnings("unchecked") public final boolean exec() {
5215 final BiFun<? super K, ? super V, ? extends U> transformer =
5216 this.transformer;
5217 final Action<U> action = this.action;
5218 if (transformer == null || action == null)
5219 return abortOnNullFunction();
5220 try {
5221 int b = batch(), c;
5222 while (b > 1 && baseIndex != baseLimit) {
5223 do {} while (!casPending(c = pending, c+1));
5224 new ForEachTransformedMappingTask<K,V,U>
5225 (map, this, b >>>= 1, transformer, action).fork();
5226 }
5227 Object v; U u;
5228 while ((v = advance()) != null) {
5229 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5230 action.apply(u);
5231 }
5232 tryComplete();
5233 } catch (Throwable ex) {
5234 return tryCompleteComputation(ex);
5235 }
5236 return false;
5237 }
5238 }
5239
5240 @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U>
5241 extends BulkTask<K,V,U> {
5242 final Fun<? super K, ? extends U> searchFunction;
5243 final AtomicReference<U> result;
5244 SearchKeysTask
5245 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5246 Fun<? super K, ? extends U> searchFunction,
5247 AtomicReference<U> result) {
5248 super(m, p, b);
5249 this.searchFunction = searchFunction; this.result = result;
5250 }
5251 @SuppressWarnings("unchecked") public final boolean exec() {
5252 AtomicReference<U> result = this.result;
5253 final Fun<? super K, ? extends U> searchFunction =
5254 this.searchFunction;
5255 if (searchFunction == null || result == null)
5256 return abortOnNullFunction();
5257 try {
5258 int b = batch(), c;
5259 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5260 do {} while (!casPending(c = pending, c+1));
5261 new SearchKeysTask<K,V,U>(map, this, b >>>= 1,
5262 searchFunction, result).fork();
5263 }
5264 U u;
5265 while (result.get() == null && advance() != null) {
5266 if ((u = searchFunction.apply((K)nextKey)) != null) {
5267 if (result.compareAndSet(null, u))
5268 tryCompleteComputation(null);
5269 break;
5270 }
5271 }
5272 tryComplete();
5273 } catch (Throwable ex) {
5274 return tryCompleteComputation(ex);
5275 }
5276 return false;
5277 }
5278 public final U getRawResult() { return result.get(); }
5279 }
5280
5281 @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U>
5282 extends BulkTask<K,V,U> {
5283 final Fun<? super V, ? extends U> searchFunction;
5284 final AtomicReference<U> result;
5285 SearchValuesTask
5286 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5287 Fun<? super V, ? extends U> searchFunction,
5288 AtomicReference<U> result) {
5289 super(m, p, b);
5290 this.searchFunction = searchFunction; this.result = result;
5291 }
5292 @SuppressWarnings("unchecked") public final boolean exec() {
5293 AtomicReference<U> result = this.result;
5294 final Fun<? super V, ? extends U> searchFunction =
5295 this.searchFunction;
5296 if (searchFunction == null || result == null)
5297 return abortOnNullFunction();
5298 try {
5299 int b = batch(), c;
5300 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5301 do {} while (!casPending(c = pending, c+1));
5302 new SearchValuesTask<K,V,U>(map, this, b >>>= 1,
5303 searchFunction, result).fork();
5304 }
5305 Object v; U u;
5306 while (result.get() == null && (v = advance()) != null) {
5307 if ((u = searchFunction.apply((V)v)) != null) {
5308 if (result.compareAndSet(null, u))
5309 tryCompleteComputation(null);
5310 break;
5311 }
5312 }
5313 tryComplete();
5314 } catch (Throwable ex) {
5315 return tryCompleteComputation(ex);
5316 }
5317 return false;
5318 }
5319 public final U getRawResult() { return result.get(); }
5320 }
5321
5322 @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U>
5323 extends BulkTask<K,V,U> {
5324 final Fun<Entry<K,V>, ? extends U> searchFunction;
5325 final AtomicReference<U> result;
5326 SearchEntriesTask
5327 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5328 Fun<Entry<K,V>, ? extends U> searchFunction,
5329 AtomicReference<U> result) {
5330 super(m, p, b);
5331 this.searchFunction = searchFunction; this.result = result;
5332 }
5333 @SuppressWarnings("unchecked") public final boolean exec() {
5334 AtomicReference<U> result = this.result;
5335 final Fun<Entry<K,V>, ? extends U> searchFunction =
5336 this.searchFunction;
5337 if (searchFunction == null || result == null)
5338 return abortOnNullFunction();
5339 try {
5340 int b = batch(), c;
5341 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5342 do {} while (!casPending(c = pending, c+1));
5343 new SearchEntriesTask<K,V,U>(map, this, b >>>= 1,
5344 searchFunction, result).fork();
5345 }
5346 Object v; U u;
5347 while (result.get() == null && (v = advance()) != null) {
5348 if ((u = searchFunction.apply(entryFor((K)nextKey, (V)v))) != null) {
5349 if (result.compareAndSet(null, u))
5350 tryCompleteComputation(null);
5351 break;
5352 }
5353 }
5354 tryComplete();
5355 } catch (Throwable ex) {
5356 return tryCompleteComputation(ex);
5357 }
5358 return false;
5359 }
5360 public final U getRawResult() { return result.get(); }
5361 }
5362
5363 @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U>
5364 extends BulkTask<K,V,U> {
5365 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5366 final AtomicReference<U> result;
5367 SearchMappingsTask
5368 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5369 BiFun<? super K, ? super V, ? extends U> searchFunction,
5370 AtomicReference<U> result) {
5371 super(m, p, b);
5372 this.searchFunction = searchFunction; this.result = result;
5373 }
5374 @SuppressWarnings("unchecked") public final boolean exec() {
5375 AtomicReference<U> result = this.result;
5376 final BiFun<? super K, ? super V, ? extends U> searchFunction =
5377 this.searchFunction;
5378 if (searchFunction == null || result == null)
5379 return abortOnNullFunction();
5380 try {
5381 int b = batch(), c;
5382 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5383 do {} while (!casPending(c = pending, c+1));
5384 new SearchMappingsTask<K,V,U>(map, this, b >>>= 1,
5385 searchFunction, result).fork();
5386 }
5387 Object v; U u;
5388 while (result.get() == null && (v = advance()) != null) {
5389 if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5390 if (result.compareAndSet(null, u))
5391 tryCompleteComputation(null);
5392 break;
5393 }
5394 }
5395 tryComplete();
5396 } catch (Throwable ex) {
5397 return tryCompleteComputation(ex);
5398 }
5399 return false;
5400 }
5401 public final U getRawResult() { return result.get(); }
5402 }
5403
5404 @SuppressWarnings("serial") static final class ReduceKeysTask<K,V>
5405 extends BulkTask<K,V,K> {
5406 final BiFun<? super K, ? super K, ? extends K> reducer;
5407 K result;
5408 ReduceKeysTask<K,V> rights, nextRight;
5409 ReduceKeysTask
5410 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5411 ReduceKeysTask<K,V> nextRight,
5412 BiFun<? super K, ? super K, ? extends K> reducer) {
5413 super(m, p, b); this.nextRight = nextRight;
5414 this.reducer = reducer;
5415 }
5416 @SuppressWarnings("unchecked") public final boolean exec() {
5417 final BiFun<? super K, ? super K, ? extends K> reducer =
5418 this.reducer;
5419 if (reducer == null)
5420 return abortOnNullFunction();
5421 try {
5422 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5423 do {} while (!casPending(c = pending, c+1));
5424 (rights = new ReduceKeysTask<K,V>
5425 (map, this, b >>>= 1, rights, reducer)).fork();
5426 }
5427 K r = null;
5428 while (advance() != null) {
5429 K u = (K)nextKey;
5430 r = (r == null) ? u : reducer.apply(r, u);
5431 }
5432 result = r;
5433 for (ReduceKeysTask<K,V> t = this, s;;) {
5434 int c; BulkTask<K,V,?> par; K tr, sr;
5435 if ((c = t.pending) == 0) {
5436 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5437 if ((sr = s.result) != null)
5438 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5439 }
5440 if ((par = t.parent) == null ||
5441 !(par instanceof ReduceKeysTask)) {
5442 t.quietlyComplete();
5443 break;
5444 }
5445 t = (ReduceKeysTask<K,V>)par;
5446 }
5447 else if (t.casPending(c, c - 1))
5448 break;
5449 }
5450 } catch (Throwable ex) {
5451 return tryCompleteComputation(ex);
5452 }
5453 return false;
5454 }
5455 public final K getRawResult() { return result; }
5456 }
5457
5458 @SuppressWarnings("serial") static final class ReduceValuesTask<K,V>
5459 extends BulkTask<K,V,V> {
5460 final BiFun<? super V, ? super V, ? extends V> reducer;
5461 V result;
5462 ReduceValuesTask<K,V> rights, nextRight;
5463 ReduceValuesTask
5464 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5465 ReduceValuesTask<K,V> nextRight,
5466 BiFun<? super V, ? super V, ? extends V> reducer) {
5467 super(m, p, b); this.nextRight = nextRight;
5468 this.reducer = reducer;
5469 }
5470 @SuppressWarnings("unchecked") public final boolean exec() {
5471 final BiFun<? super V, ? super V, ? extends V> reducer =
5472 this.reducer;
5473 if (reducer == null)
5474 return abortOnNullFunction();
5475 try {
5476 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5477 do {} while (!casPending(c = pending, c+1));
5478 (rights = new ReduceValuesTask<K,V>
5479 (map, this, b >>>= 1, rights, reducer)).fork();
5480 }
5481 V r = null;
5482 Object v;
5483 while ((v = advance()) != null) {
5484 V u = (V)v;
5485 r = (r == null) ? u : reducer.apply(r, u);
5486 }
5487 result = r;
5488 for (ReduceValuesTask<K,V> t = this, s;;) {
5489 int c; BulkTask<K,V,?> par; V tr, sr;
5490 if ((c = t.pending) == 0) {
5491 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5492 if ((sr = s.result) != null)
5493 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5494 }
5495 if ((par = t.parent) == null ||
5496 !(par instanceof ReduceValuesTask)) {
5497 t.quietlyComplete();
5498 break;
5499 }
5500 t = (ReduceValuesTask<K,V>)par;
5501 }
5502 else if (t.casPending(c, c - 1))
5503 break;
5504 }
5505 } catch (Throwable ex) {
5506 return tryCompleteComputation(ex);
5507 }
5508 return false;
5509 }
5510 public final V getRawResult() { return result; }
5511 }
5512
5513 @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V>
5514 extends BulkTask<K,V,Map.Entry<K,V>> {
5515 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5516 Map.Entry<K,V> result;
5517 ReduceEntriesTask<K,V> rights, nextRight;
5518 ReduceEntriesTask
5519 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5520 ReduceEntriesTask<K,V> nextRight,
5521 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5522 super(m, p, b); this.nextRight = nextRight;
5523 this.reducer = reducer;
5524 }
5525 @SuppressWarnings("unchecked") public final boolean exec() {
5526 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer =
5527 this.reducer;
5528 if (reducer == null)
5529 return abortOnNullFunction();
5530 try {
5531 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5532 do {} while (!casPending(c = pending, c+1));
5533 (rights = new ReduceEntriesTask<K,V>
5534 (map, this, b >>>= 1, rights, reducer)).fork();
5535 }
5536 Map.Entry<K,V> r = null;
5537 Object v;
5538 while ((v = advance()) != null) {
5539 Map.Entry<K,V> u = entryFor((K)nextKey, (V)v);
5540 r = (r == null) ? u : reducer.apply(r, u);
5541 }
5542 result = r;
5543 for (ReduceEntriesTask<K,V> t = this, s;;) {
5544 int c; BulkTask<K,V,?> par; Map.Entry<K,V> tr, sr;
5545 if ((c = t.pending) == 0) {
5546 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5547 if ((sr = s.result) != null)
5548 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5549 }
5550 if ((par = t.parent) == null ||
5551 !(par instanceof ReduceEntriesTask)) {
5552 t.quietlyComplete();
5553 break;
5554 }
5555 t = (ReduceEntriesTask<K,V>)par;
5556 }
5557 else if (t.casPending(c, c - 1))
5558 break;
5559 }
5560 } catch (Throwable ex) {
5561 return tryCompleteComputation(ex);
5562 }
5563 return false;
5564 }
5565 public final Map.Entry<K,V> getRawResult() { return result; }
5566 }
5567
5568 @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U>
5569 extends BulkTask<K,V,U> {
5570 final Fun<? super K, ? extends U> transformer;
5571 final BiFun<? super U, ? super U, ? extends U> reducer;
5572 U result;
5573 MapReduceKeysTask<K,V,U> rights, nextRight;
5574 MapReduceKeysTask
5575 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5576 MapReduceKeysTask<K,V,U> nextRight,
5577 Fun<? super K, ? extends U> transformer,
5578 BiFun<? super U, ? super U, ? extends U> reducer) {
5579 super(m, p, b); this.nextRight = nextRight;
5580 this.transformer = transformer;
5581 this.reducer = reducer;
5582 }
5583 @SuppressWarnings("unchecked") public final boolean exec() {
5584 final Fun<? super K, ? extends U> transformer =
5585 this.transformer;
5586 final BiFun<? super U, ? super U, ? extends U> reducer =
5587 this.reducer;
5588 if (transformer == null || reducer == null)
5589 return abortOnNullFunction();
5590 try {
5591 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5592 do {} while (!casPending(c = pending, c+1));
5593 (rights = new MapReduceKeysTask<K,V,U>
5594 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5595 }
5596 U r = null, u;
5597 while (advance() != null) {
5598 if ((u = transformer.apply((K)nextKey)) != null)
5599 r = (r == null) ? u : reducer.apply(r, u);
5600 }
5601 result = r;
5602 for (MapReduceKeysTask<K,V,U> t = this, s;;) {
5603 int c; BulkTask<K,V,?> par; U tr, sr;
5604 if ((c = t.pending) == 0) {
5605 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5606 if ((sr = s.result) != null)
5607 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5608 }
5609 if ((par = t.parent) == null ||
5610 !(par instanceof MapReduceKeysTask)) {
5611 t.quietlyComplete();
5612 break;
5613 }
5614 t = (MapReduceKeysTask<K,V,U>)par;
5615 }
5616 else if (t.casPending(c, c - 1))
5617 break;
5618 }
5619 } catch (Throwable ex) {
5620 return tryCompleteComputation(ex);
5621 }
5622 return false;
5623 }
5624 public final U getRawResult() { return result; }
5625 }
5626
5627 @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U>
5628 extends BulkTask<K,V,U> {
5629 final Fun<? super V, ? extends U> transformer;
5630 final BiFun<? super U, ? super U, ? extends U> reducer;
5631 U result;
5632 MapReduceValuesTask<K,V,U> rights, nextRight;
5633 MapReduceValuesTask
5634 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5635 MapReduceValuesTask<K,V,U> nextRight,
5636 Fun<? super V, ? extends U> transformer,
5637 BiFun<? super U, ? super U, ? extends U> reducer) {
5638 super(m, p, b); this.nextRight = nextRight;
5639 this.transformer = transformer;
5640 this.reducer = reducer;
5641 }
5642 @SuppressWarnings("unchecked") public final boolean exec() {
5643 final Fun<? super V, ? extends U> transformer =
5644 this.transformer;
5645 final BiFun<? super U, ? super U, ? extends U> reducer =
5646 this.reducer;
5647 if (transformer == null || reducer == null)
5648 return abortOnNullFunction();
5649 try {
5650 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5651 do {} while (!casPending(c = pending, c+1));
5652 (rights = new MapReduceValuesTask<K,V,U>
5653 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5654 }
5655 U r = null, u;
5656 Object v;
5657 while ((v = advance()) != null) {
5658 if ((u = transformer.apply((V)v)) != null)
5659 r = (r == null) ? u : reducer.apply(r, u);
5660 }
5661 result = r;
5662 for (MapReduceValuesTask<K,V,U> t = this, s;;) {
5663 int c; BulkTask<K,V,?> par; U tr, sr;
5664 if ((c = t.pending) == 0) {
5665 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5666 if ((sr = s.result) != null)
5667 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5668 }
5669 if ((par = t.parent) == null ||
5670 !(par instanceof MapReduceValuesTask)) {
5671 t.quietlyComplete();
5672 break;
5673 }
5674 t = (MapReduceValuesTask<K,V,U>)par;
5675 }
5676 else if (t.casPending(c, c - 1))
5677 break;
5678 }
5679 } catch (Throwable ex) {
5680 return tryCompleteComputation(ex);
5681 }
5682 return false;
5683 }
5684 public final U getRawResult() { return result; }
5685 }
5686
5687 @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U>
5688 extends BulkTask<K,V,U> {
5689 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5690 final BiFun<? super U, ? super U, ? extends U> reducer;
5691 U result;
5692 MapReduceEntriesTask<K,V,U> rights, nextRight;
5693 MapReduceEntriesTask
5694 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5695 MapReduceEntriesTask<K,V,U> nextRight,
5696 Fun<Map.Entry<K,V>, ? extends U> transformer,
5697 BiFun<? super U, ? super U, ? extends U> reducer) {
5698 super(m, p, b); this.nextRight = nextRight;
5699 this.transformer = transformer;
5700 this.reducer = reducer;
5701 }
5702 @SuppressWarnings("unchecked") public final boolean exec() {
5703 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5704 this.transformer;
5705 final BiFun<? super U, ? super U, ? extends U> reducer =
5706 this.reducer;
5707 if (transformer == null || reducer == null)
5708 return abortOnNullFunction();
5709 try {
5710 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5711 do {} while (!casPending(c = pending, c+1));
5712 (rights = new MapReduceEntriesTask<K,V,U>
5713 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5714 }
5715 U r = null, u;
5716 Object v;
5717 while ((v = advance()) != null) {
5718 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5719 r = (r == null) ? u : reducer.apply(r, u);
5720 }
5721 result = r;
5722 for (MapReduceEntriesTask<K,V,U> t = this, s;;) {
5723 int c; BulkTask<K,V,?> par; U tr, sr;
5724 if ((c = t.pending) == 0) {
5725 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5726 if ((sr = s.result) != null)
5727 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5728 }
5729 if ((par = t.parent) == null ||
5730 !(par instanceof MapReduceEntriesTask)) {
5731 t.quietlyComplete();
5732 break;
5733 }
5734 t = (MapReduceEntriesTask<K,V,U>)par;
5735 }
5736 else if (t.casPending(c, c - 1))
5737 break;
5738 }
5739 } catch (Throwable ex) {
5740 return tryCompleteComputation(ex);
5741 }
5742 return false;
5743 }
5744 public final U getRawResult() { return result; }
5745 }
5746
5747 @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U>
5748 extends BulkTask<K,V,U> {
5749 final BiFun<? super K, ? super V, ? extends U> transformer;
5750 final BiFun<? super U, ? super U, ? extends U> reducer;
5751 U result;
5752 MapReduceMappingsTask<K,V,U> rights, nextRight;
5753 MapReduceMappingsTask
5754 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5755 MapReduceMappingsTask<K,V,U> nextRight,
5756 BiFun<? super K, ? super V, ? extends U> transformer,
5757 BiFun<? super U, ? super U, ? extends U> reducer) {
5758 super(m, p, b); this.nextRight = nextRight;
5759 this.transformer = transformer;
5760 this.reducer = reducer;
5761 }
5762 @SuppressWarnings("unchecked") public final boolean exec() {
5763 final BiFun<? super K, ? super V, ? extends U> transformer =
5764 this.transformer;
5765 final BiFun<? super U, ? super U, ? extends U> reducer =
5766 this.reducer;
5767 if (transformer == null || reducer == null)
5768 return abortOnNullFunction();
5769 try {
5770 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5771 do {} while (!casPending(c = pending, c+1));
5772 (rights = new MapReduceMappingsTask<K,V,U>
5773 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5774 }
5775 U r = null, u;
5776 Object v;
5777 while ((v = advance()) != null) {
5778 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5779 r = (r == null) ? u : reducer.apply(r, u);
5780 }
5781 result = r;
5782 for (MapReduceMappingsTask<K,V,U> t = this, s;;) {
5783 int c; BulkTask<K,V,?> par; U tr, sr;
5784 if ((c = t.pending) == 0) {
5785 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5786 if ((sr = s.result) != null)
5787 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5788 }
5789 if ((par = t.parent) == null ||
5790 !(par instanceof MapReduceMappingsTask)) {
5791 t.quietlyComplete();
5792 break;
5793 }
5794 t = (MapReduceMappingsTask<K,V,U>)par;
5795 }
5796 else if (t.casPending(c, c - 1))
5797 break;
5798 }
5799 } catch (Throwable ex) {
5800 return tryCompleteComputation(ex);
5801 }
5802 return false;
5803 }
5804 public final U getRawResult() { return result; }
5805 }
5806
5807 @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V>
5808 extends BulkTask<K,V,Double> {
5809 final ObjectToDouble<? super K> transformer;
5810 final DoubleByDoubleToDouble reducer;
5811 final double basis;
5812 double result;
5813 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5814 MapReduceKeysToDoubleTask
5815 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5816 MapReduceKeysToDoubleTask<K,V> nextRight,
5817 ObjectToDouble<? super K> transformer,
5818 double basis,
5819 DoubleByDoubleToDouble reducer) {
5820 super(m, p, b); this.nextRight = nextRight;
5821 this.transformer = transformer;
5822 this.basis = basis; this.reducer = reducer;
5823 }
5824 @SuppressWarnings("unchecked") public final boolean exec() {
5825 final ObjectToDouble<? super K> transformer =
5826 this.transformer;
5827 final DoubleByDoubleToDouble reducer = this.reducer;
5828 if (transformer == null || reducer == null)
5829 return abortOnNullFunction();
5830 try {
5831 final double id = this.basis;
5832 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5833 do {} while (!casPending(c = pending, c+1));
5834 (rights = new MapReduceKeysToDoubleTask<K,V>
5835 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
5836 }
5837 double r = id;
5838 while (advance() != null)
5839 r = reducer.apply(r, transformer.apply((K)nextKey));
5840 result = r;
5841 for (MapReduceKeysToDoubleTask<K,V> t = this, s;;) {
5842 int c; BulkTask<K,V,?> par;
5843 if ((c = t.pending) == 0) {
5844 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5845 t.result = reducer.apply(t.result, s.result);
5846 }
5847 if ((par = t.parent) == null ||
5848 !(par instanceof MapReduceKeysToDoubleTask)) {
5849 t.quietlyComplete();
5850 break;
5851 }
5852 t = (MapReduceKeysToDoubleTask<K,V>)par;
5853 }
5854 else if (t.casPending(c, c - 1))
5855 break;
5856 }
5857 } catch (Throwable ex) {
5858 return tryCompleteComputation(ex);
5859 }
5860 return false;
5861 }
5862 public final Double getRawResult() { return result; }
5863 }
5864
5865 @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V>
5866 extends BulkTask<K,V,Double> {
5867 final ObjectToDouble<? super V> transformer;
5868 final DoubleByDoubleToDouble reducer;
5869 final double basis;
5870 double result;
5871 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5872 MapReduceValuesToDoubleTask
5873 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5874 MapReduceValuesToDoubleTask<K,V> nextRight,
5875 ObjectToDouble<? super V> transformer,
5876 double basis,
5877 DoubleByDoubleToDouble reducer) {
5878 super(m, p, b); this.nextRight = nextRight;
5879 this.transformer = transformer;
5880 this.basis = basis; this.reducer = reducer;
5881 }
5882 @SuppressWarnings("unchecked") public final boolean exec() {
5883 final ObjectToDouble<? super V> transformer =
5884 this.transformer;
5885 final DoubleByDoubleToDouble reducer = this.reducer;
5886 if (transformer == null || reducer == null)
5887 return abortOnNullFunction();
5888 try {
5889 final double id = this.basis;
5890 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5891 do {} while (!casPending(c = pending, c+1));
5892 (rights = new MapReduceValuesToDoubleTask<K,V>
5893 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
5894 }
5895 double r = id;
5896 Object v;
5897 while ((v = advance()) != null)
5898 r = reducer.apply(r, transformer.apply((V)v));
5899 result = r;
5900 for (MapReduceValuesToDoubleTask<K,V> t = this, s;;) {
5901 int c; BulkTask<K,V,?> par;
5902 if ((c = t.pending) == 0) {
5903 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5904 t.result = reducer.apply(t.result, s.result);
5905 }
5906 if ((par = t.parent) == null ||
5907 !(par instanceof MapReduceValuesToDoubleTask)) {
5908 t.quietlyComplete();
5909 break;
5910 }
5911 t = (MapReduceValuesToDoubleTask<K,V>)par;
5912 }
5913 else if (t.casPending(c, c - 1))
5914 break;
5915 }
5916 } catch (Throwable ex) {
5917 return tryCompleteComputation(ex);
5918 }
5919 return false;
5920 }
5921 public final Double getRawResult() { return result; }
5922 }
5923
5924 @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V>
5925 extends BulkTask<K,V,Double> {
5926 final ObjectToDouble<Map.Entry<K,V>> transformer;
5927 final DoubleByDoubleToDouble reducer;
5928 final double basis;
5929 double result;
5930 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5931 MapReduceEntriesToDoubleTask
5932 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5933 MapReduceEntriesToDoubleTask<K,V> nextRight,
5934 ObjectToDouble<Map.Entry<K,V>> transformer,
5935 double basis,
5936 DoubleByDoubleToDouble reducer) {
5937 super(m, p, b); this.nextRight = nextRight;
5938 this.transformer = transformer;
5939 this.basis = basis; this.reducer = reducer;
5940 }
5941 @SuppressWarnings("unchecked") public final boolean exec() {
5942 final ObjectToDouble<Map.Entry<K,V>> transformer =
5943 this.transformer;
5944 final DoubleByDoubleToDouble reducer = this.reducer;
5945 if (transformer == null || reducer == null)
5946 return abortOnNullFunction();
5947 try {
5948 final double id = this.basis;
5949 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5950 do {} while (!casPending(c = pending, c+1));
5951 (rights = new MapReduceEntriesToDoubleTask<K,V>
5952 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
5953 }
5954 double r = id;
5955 Object v;
5956 while ((v = advance()) != null)
5957 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
5958 result = r;
5959 for (MapReduceEntriesToDoubleTask<K,V> t = this, s;;) {
5960 int c; BulkTask<K,V,?> par;
5961 if ((c = t.pending) == 0) {
5962 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5963 t.result = reducer.apply(t.result, s.result);
5964 }
5965 if ((par = t.parent) == null ||
5966 !(par instanceof MapReduceEntriesToDoubleTask)) {
5967 t.quietlyComplete();
5968 break;
5969 }
5970 t = (MapReduceEntriesToDoubleTask<K,V>)par;
5971 }
5972 else if (t.casPending(c, c - 1))
5973 break;
5974 }
5975 } catch (Throwable ex) {
5976 return tryCompleteComputation(ex);
5977 }
5978 return false;
5979 }
5980 public final Double getRawResult() { return result; }
5981 }
5982
5983 @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V>
5984 extends BulkTask<K,V,Double> {
5985 final ObjectByObjectToDouble<? super K, ? super V> transformer;
5986 final DoubleByDoubleToDouble reducer;
5987 final double basis;
5988 double result;
5989 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5990 MapReduceMappingsToDoubleTask
5991 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
5992 MapReduceMappingsToDoubleTask<K,V> nextRight,
5993 ObjectByObjectToDouble<? super K, ? super V> transformer,
5994 double basis,
5995 DoubleByDoubleToDouble reducer) {
5996 super(m, p, b); this.nextRight = nextRight;
5997 this.transformer = transformer;
5998 this.basis = basis; this.reducer = reducer;
5999 }
6000 @SuppressWarnings("unchecked") public final boolean exec() {
6001 final ObjectByObjectToDouble<? super K, ? super V> transformer =
6002 this.transformer;
6003 final DoubleByDoubleToDouble reducer = this.reducer;
6004 if (transformer == null || reducer == null)
6005 return abortOnNullFunction();
6006 try {
6007 final double id = this.basis;
6008 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6009 do {} while (!casPending(c = pending, c+1));
6010 (rights = new MapReduceMappingsToDoubleTask<K,V>
6011 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6012 }
6013 double r = id;
6014 Object v;
6015 while ((v = advance()) != null)
6016 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6017 result = r;
6018 for (MapReduceMappingsToDoubleTask<K,V> t = this, s;;) {
6019 int c; BulkTask<K,V,?> par;
6020 if ((c = t.pending) == 0) {
6021 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6022 t.result = reducer.apply(t.result, s.result);
6023 }
6024 if ((par = t.parent) == null ||
6025 !(par instanceof MapReduceMappingsToDoubleTask)) {
6026 t.quietlyComplete();
6027 break;
6028 }
6029 t = (MapReduceMappingsToDoubleTask<K,V>)par;
6030 }
6031 else if (t.casPending(c, c - 1))
6032 break;
6033 }
6034 } catch (Throwable ex) {
6035 return tryCompleteComputation(ex);
6036 }
6037 return false;
6038 }
6039 public final Double getRawResult() { return result; }
6040 }
6041
6042 @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V>
6043 extends BulkTask<K,V,Long> {
6044 final ObjectToLong<? super K> transformer;
6045 final LongByLongToLong reducer;
6046 final long basis;
6047 long result;
6048 MapReduceKeysToLongTask<K,V> rights, nextRight;
6049 MapReduceKeysToLongTask
6050 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6051 MapReduceKeysToLongTask<K,V> nextRight,
6052 ObjectToLong<? super K> transformer,
6053 long basis,
6054 LongByLongToLong reducer) {
6055 super(m, p, b); this.nextRight = nextRight;
6056 this.transformer = transformer;
6057 this.basis = basis; this.reducer = reducer;
6058 }
6059 @SuppressWarnings("unchecked") public final boolean exec() {
6060 final ObjectToLong<? super K> transformer =
6061 this.transformer;
6062 final LongByLongToLong reducer = this.reducer;
6063 if (transformer == null || reducer == null)
6064 return abortOnNullFunction();
6065 try {
6066 final long id = this.basis;
6067 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6068 do {} while (!casPending(c = pending, c+1));
6069 (rights = new MapReduceKeysToLongTask<K,V>
6070 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6071 }
6072 long r = id;
6073 while (advance() != null)
6074 r = reducer.apply(r, transformer.apply((K)nextKey));
6075 result = r;
6076 for (MapReduceKeysToLongTask<K,V> t = this, s;;) {
6077 int c; BulkTask<K,V,?> par;
6078 if ((c = t.pending) == 0) {
6079 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6080 t.result = reducer.apply(t.result, s.result);
6081 }
6082 if ((par = t.parent) == null ||
6083 !(par instanceof MapReduceKeysToLongTask)) {
6084 t.quietlyComplete();
6085 break;
6086 }
6087 t = (MapReduceKeysToLongTask<K,V>)par;
6088 }
6089 else if (t.casPending(c, c - 1))
6090 break;
6091 }
6092 } catch (Throwable ex) {
6093 return tryCompleteComputation(ex);
6094 }
6095 return false;
6096 }
6097 public final Long getRawResult() { return result; }
6098 }
6099
6100 @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V>
6101 extends BulkTask<K,V,Long> {
6102 final ObjectToLong<? super V> transformer;
6103 final LongByLongToLong reducer;
6104 final long basis;
6105 long result;
6106 MapReduceValuesToLongTask<K,V> rights, nextRight;
6107 MapReduceValuesToLongTask
6108 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6109 MapReduceValuesToLongTask<K,V> nextRight,
6110 ObjectToLong<? super V> transformer,
6111 long basis,
6112 LongByLongToLong reducer) {
6113 super(m, p, b); this.nextRight = nextRight;
6114 this.transformer = transformer;
6115 this.basis = basis; this.reducer = reducer;
6116 }
6117 @SuppressWarnings("unchecked") public final boolean exec() {
6118 final ObjectToLong<? super V> transformer =
6119 this.transformer;
6120 final LongByLongToLong reducer = this.reducer;
6121 if (transformer == null || reducer == null)
6122 return abortOnNullFunction();
6123 try {
6124 final long id = this.basis;
6125 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6126 do {} while (!casPending(c = pending, c+1));
6127 (rights = new MapReduceValuesToLongTask<K,V>
6128 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6129 }
6130 long r = id;
6131 Object v;
6132 while ((v = advance()) != null)
6133 r = reducer.apply(r, transformer.apply((V)v));
6134 result = r;
6135 for (MapReduceValuesToLongTask<K,V> t = this, s;;) {
6136 int c; BulkTask<K,V,?> par;
6137 if ((c = t.pending) == 0) {
6138 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6139 t.result = reducer.apply(t.result, s.result);
6140 }
6141 if ((par = t.parent) == null ||
6142 !(par instanceof MapReduceValuesToLongTask)) {
6143 t.quietlyComplete();
6144 break;
6145 }
6146 t = (MapReduceValuesToLongTask<K,V>)par;
6147 }
6148 else if (t.casPending(c, c - 1))
6149 break;
6150 }
6151 } catch (Throwable ex) {
6152 return tryCompleteComputation(ex);
6153 }
6154 return false;
6155 }
6156 public final Long getRawResult() { return result; }
6157 }
6158
6159 @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V>
6160 extends BulkTask<K,V,Long> {
6161 final ObjectToLong<Map.Entry<K,V>> transformer;
6162 final LongByLongToLong reducer;
6163 final long basis;
6164 long result;
6165 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6166 MapReduceEntriesToLongTask
6167 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6168 MapReduceEntriesToLongTask<K,V> nextRight,
6169 ObjectToLong<Map.Entry<K,V>> transformer,
6170 long basis,
6171 LongByLongToLong reducer) {
6172 super(m, p, b); this.nextRight = nextRight;
6173 this.transformer = transformer;
6174 this.basis = basis; this.reducer = reducer;
6175 }
6176 @SuppressWarnings("unchecked") public final boolean exec() {
6177 final ObjectToLong<Map.Entry<K,V>> transformer =
6178 this.transformer;
6179 final LongByLongToLong reducer = this.reducer;
6180 if (transformer == null || reducer == null)
6181 return abortOnNullFunction();
6182 try {
6183 final long id = this.basis;
6184 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6185 do {} while (!casPending(c = pending, c+1));
6186 (rights = new MapReduceEntriesToLongTask<K,V>
6187 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6188 }
6189 long r = id;
6190 Object v;
6191 while ((v = advance()) != null)
6192 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6193 result = r;
6194 for (MapReduceEntriesToLongTask<K,V> t = this, s;;) {
6195 int c; BulkTask<K,V,?> par;
6196 if ((c = t.pending) == 0) {
6197 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6198 t.result = reducer.apply(t.result, s.result);
6199 }
6200 if ((par = t.parent) == null ||
6201 !(par instanceof MapReduceEntriesToLongTask)) {
6202 t.quietlyComplete();
6203 break;
6204 }
6205 t = (MapReduceEntriesToLongTask<K,V>)par;
6206 }
6207 else if (t.casPending(c, c - 1))
6208 break;
6209 }
6210 } catch (Throwable ex) {
6211 return tryCompleteComputation(ex);
6212 }
6213 return false;
6214 }
6215 public final Long getRawResult() { return result; }
6216 }
6217
6218 @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V>
6219 extends BulkTask<K,V,Long> {
6220 final ObjectByObjectToLong<? super K, ? super V> transformer;
6221 final LongByLongToLong reducer;
6222 final long basis;
6223 long result;
6224 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6225 MapReduceMappingsToLongTask
6226 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6227 MapReduceMappingsToLongTask<K,V> nextRight,
6228 ObjectByObjectToLong<? super K, ? super V> transformer,
6229 long basis,
6230 LongByLongToLong reducer) {
6231 super(m, p, b); this.nextRight = nextRight;
6232 this.transformer = transformer;
6233 this.basis = basis; this.reducer = reducer;
6234 }
6235 @SuppressWarnings("unchecked") public final boolean exec() {
6236 final ObjectByObjectToLong<? super K, ? super V> transformer =
6237 this.transformer;
6238 final LongByLongToLong reducer = this.reducer;
6239 if (transformer == null || reducer == null)
6240 return abortOnNullFunction();
6241 try {
6242 final long id = this.basis;
6243 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6244 do {} while (!casPending(c = pending, c+1));
6245 (rights = new MapReduceMappingsToLongTask<K,V>
6246 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6247 }
6248 long r = id;
6249 Object v;
6250 while ((v = advance()) != null)
6251 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6252 result = r;
6253 for (MapReduceMappingsToLongTask<K,V> t = this, s;;) {
6254 int c; BulkTask<K,V,?> par;
6255 if ((c = t.pending) == 0) {
6256 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6257 t.result = reducer.apply(t.result, s.result);
6258 }
6259 if ((par = t.parent) == null ||
6260 !(par instanceof MapReduceMappingsToLongTask)) {
6261 t.quietlyComplete();
6262 break;
6263 }
6264 t = (MapReduceMappingsToLongTask<K,V>)par;
6265 }
6266 else if (t.casPending(c, c - 1))
6267 break;
6268 }
6269 } catch (Throwable ex) {
6270 return tryCompleteComputation(ex);
6271 }
6272 return false;
6273 }
6274 public final Long getRawResult() { return result; }
6275 }
6276
6277 @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V>
6278 extends BulkTask<K,V,Integer> {
6279 final ObjectToInt<? super K> transformer;
6280 final IntByIntToInt reducer;
6281 final int basis;
6282 int result;
6283 MapReduceKeysToIntTask<K,V> rights, nextRight;
6284 MapReduceKeysToIntTask
6285 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6286 MapReduceKeysToIntTask<K,V> nextRight,
6287 ObjectToInt<? super K> transformer,
6288 int basis,
6289 IntByIntToInt reducer) {
6290 super(m, p, b); this.nextRight = nextRight;
6291 this.transformer = transformer;
6292 this.basis = basis; this.reducer = reducer;
6293 }
6294 @SuppressWarnings("unchecked") public final boolean exec() {
6295 final ObjectToInt<? super K> transformer =
6296 this.transformer;
6297 final IntByIntToInt reducer = this.reducer;
6298 if (transformer == null || reducer == null)
6299 return abortOnNullFunction();
6300 try {
6301 final int id = this.basis;
6302 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6303 do {} while (!casPending(c = pending, c+1));
6304 (rights = new MapReduceKeysToIntTask<K,V>
6305 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6306 }
6307 int r = id;
6308 while (advance() != null)
6309 r = reducer.apply(r, transformer.apply((K)nextKey));
6310 result = r;
6311 for (MapReduceKeysToIntTask<K,V> t = this, s;;) {
6312 int c; BulkTask<K,V,?> par;
6313 if ((c = t.pending) == 0) {
6314 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6315 t.result = reducer.apply(t.result, s.result);
6316 }
6317 if ((par = t.parent) == null ||
6318 !(par instanceof MapReduceKeysToIntTask)) {
6319 t.quietlyComplete();
6320 break;
6321 }
6322 t = (MapReduceKeysToIntTask<K,V>)par;
6323 }
6324 else if (t.casPending(c, c - 1))
6325 break;
6326 }
6327 } catch (Throwable ex) {
6328 return tryCompleteComputation(ex);
6329 }
6330 return false;
6331 }
6332 public final Integer getRawResult() { return result; }
6333 }
6334
6335 @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V>
6336 extends BulkTask<K,V,Integer> {
6337 final ObjectToInt<? super V> transformer;
6338 final IntByIntToInt reducer;
6339 final int basis;
6340 int result;
6341 MapReduceValuesToIntTask<K,V> rights, nextRight;
6342 MapReduceValuesToIntTask
6343 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6344 MapReduceValuesToIntTask<K,V> nextRight,
6345 ObjectToInt<? super V> transformer,
6346 int basis,
6347 IntByIntToInt reducer) {
6348 super(m, p, b); this.nextRight = nextRight;
6349 this.transformer = transformer;
6350 this.basis = basis; this.reducer = reducer;
6351 }
6352 @SuppressWarnings("unchecked") public final boolean exec() {
6353 final ObjectToInt<? super V> transformer =
6354 this.transformer;
6355 final IntByIntToInt reducer = this.reducer;
6356 if (transformer == null || reducer == null)
6357 return abortOnNullFunction();
6358 try {
6359 final int id = this.basis;
6360 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6361 do {} while (!casPending(c = pending, c+1));
6362 (rights = new MapReduceValuesToIntTask<K,V>
6363 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6364 }
6365 int r = id;
6366 Object v;
6367 while ((v = advance()) != null)
6368 r = reducer.apply(r, transformer.apply((V)v));
6369 result = r;
6370 for (MapReduceValuesToIntTask<K,V> t = this, s;;) {
6371 int c; BulkTask<K,V,?> par;
6372 if ((c = t.pending) == 0) {
6373 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6374 t.result = reducer.apply(t.result, s.result);
6375 }
6376 if ((par = t.parent) == null ||
6377 !(par instanceof MapReduceValuesToIntTask)) {
6378 t.quietlyComplete();
6379 break;
6380 }
6381 t = (MapReduceValuesToIntTask<K,V>)par;
6382 }
6383 else if (t.casPending(c, c - 1))
6384 break;
6385 }
6386 } catch (Throwable ex) {
6387 return tryCompleteComputation(ex);
6388 }
6389 return false;
6390 }
6391 public final Integer getRawResult() { return result; }
6392 }
6393
6394 @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V>
6395 extends BulkTask<K,V,Integer> {
6396 final ObjectToInt<Map.Entry<K,V>> transformer;
6397 final IntByIntToInt reducer;
6398 final int basis;
6399 int result;
6400 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6401 MapReduceEntriesToIntTask
6402 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6403 MapReduceEntriesToIntTask<K,V> nextRight,
6404 ObjectToInt<Map.Entry<K,V>> transformer,
6405 int basis,
6406 IntByIntToInt reducer) {
6407 super(m, p, b); this.nextRight = nextRight;
6408 this.transformer = transformer;
6409 this.basis = basis; this.reducer = reducer;
6410 }
6411 @SuppressWarnings("unchecked") public final boolean exec() {
6412 final ObjectToInt<Map.Entry<K,V>> transformer =
6413 this.transformer;
6414 final IntByIntToInt reducer = this.reducer;
6415 if (transformer == null || reducer == null)
6416 return abortOnNullFunction();
6417 try {
6418 final int id = this.basis;
6419 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6420 do {} while (!casPending(c = pending, c+1));
6421 (rights = new MapReduceEntriesToIntTask<K,V>
6422 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6423 }
6424 int r = id;
6425 Object v;
6426 while ((v = advance()) != null)
6427 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6428 result = r;
6429 for (MapReduceEntriesToIntTask<K,V> t = this, s;;) {
6430 int c; BulkTask<K,V,?> par;
6431 if ((c = t.pending) == 0) {
6432 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6433 t.result = reducer.apply(t.result, s.result);
6434 }
6435 if ((par = t.parent) == null ||
6436 !(par instanceof MapReduceEntriesToIntTask)) {
6437 t.quietlyComplete();
6438 break;
6439 }
6440 t = (MapReduceEntriesToIntTask<K,V>)par;
6441 }
6442 else if (t.casPending(c, c - 1))
6443 break;
6444 }
6445 } catch (Throwable ex) {
6446 return tryCompleteComputation(ex);
6447 }
6448 return false;
6449 }
6450 public final Integer getRawResult() { return result; }
6451 }
6452
6453 @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V>
6454 extends BulkTask<K,V,Integer> {
6455 final ObjectByObjectToInt<? super K, ? super V> transformer;
6456 final IntByIntToInt reducer;
6457 final int basis;
6458 int result;
6459 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6460 MapReduceMappingsToIntTask
6461 (ConcurrentHashMap<K,V> m, BulkTask<K,V,?> p, int b,
6462 MapReduceMappingsToIntTask<K,V> rights,
6463 ObjectByObjectToInt<? super K, ? super V> transformer,
6464 int basis,
6465 IntByIntToInt reducer) {
6466 super(m, p, b); this.nextRight = nextRight;
6467 this.transformer = transformer;
6468 this.basis = basis; this.reducer = reducer;
6469 }
6470 @SuppressWarnings("unchecked") public final boolean exec() {
6471 final ObjectByObjectToInt<? super K, ? super V> transformer =
6472 this.transformer;
6473 final IntByIntToInt reducer = this.reducer;
6474 if (transformer == null || reducer == null)
6475 return abortOnNullFunction();
6476 try {
6477 final int id = this.basis;
6478 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6479 do {} while (!casPending(c = pending, c+1));
6480 (rights = new MapReduceMappingsToIntTask<K,V>
6481 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6482 }
6483 int r = id;
6484 Object v;
6485 while ((v = advance()) != null)
6486 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6487 result = r;
6488 for (MapReduceMappingsToIntTask<K,V> t = this, s;;) {
6489 int c; BulkTask<K,V,?> par;
6490 if ((c = t.pending) == 0) {
6491 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6492 t.result = reducer.apply(t.result, s.result);
6493 }
6494 if ((par = t.parent) == null ||
6495 !(par instanceof MapReduceMappingsToIntTask)) {
6496 t.quietlyComplete();
6497 break;
6498 }
6499 t = (MapReduceMappingsToIntTask<K,V>)par;
6500 }
6501 else if (t.casPending(c, c - 1))
6502 break;
6503 }
6504 } catch (Throwable ex) {
6505 return tryCompleteComputation(ex);
6506 }
6507 return false;
6508 }
6509 public final Integer getRawResult() { return result; }
6510 }
6511
6512
6513 // Unsafe mechanics
6514 private static final sun.misc.Unsafe UNSAFE;
6515 private static final long counterOffset;
6516 private static final long sizeCtlOffset;
6517 private static final long ABASE;
6518 private static final int ASHIFT;
6519
6520 static {
6521 int ss;
6522 try {
6523 UNSAFE = sun.misc.Unsafe.getUnsafe();
6524 Class<?> k = ConcurrentHashMap.class;
6525 counterOffset = UNSAFE.objectFieldOffset
6526 (k.getDeclaredField("counter"));
6527 sizeCtlOffset = UNSAFE.objectFieldOffset
6528 (k.getDeclaredField("sizeCtl"));
6529 Class<?> sc = Node[].class;
6530 ABASE = UNSAFE.arrayBaseOffset(sc);
6531 ss = UNSAFE.arrayIndexScale(sc);
6532 } catch (Exception e) {
6533 throw new Error(e);
6534 }
6535 if ((ss & (ss-1)) != 0)
6536 throw new Error("data type scale not a power of two");
6537 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
6538 }
6539 }