ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.62
Committed: Fri Sep 21 18:41:30 2012 UTC (11 years, 7 months ago) by dl
Branch: MAIN
Changes since 1.61: +17 -0 lines
Log Message:
Add getValueOrDefault

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8 import jsr166e.LongAdder;
9 import jsr166e.ForkJoinPool;
10 import jsr166e.ForkJoinTask;
11
12 import java.util.Comparator;
13 import java.util.Arrays;
14 import java.util.Map;
15 import java.util.Set;
16 import java.util.Collection;
17 import java.util.AbstractMap;
18 import java.util.AbstractSet;
19 import java.util.AbstractCollection;
20 import java.util.Hashtable;
21 import java.util.HashMap;
22 import java.util.Iterator;
23 import java.util.Enumeration;
24 import java.util.ConcurrentModificationException;
25 import java.util.NoSuchElementException;
26 import java.util.concurrent.ConcurrentMap;
27 import java.util.concurrent.ThreadLocalRandom;
28 import java.util.concurrent.locks.LockSupport;
29 import java.util.concurrent.locks.AbstractQueuedSynchronizer;
30 import java.util.concurrent.atomic.AtomicReference;
31
32 import java.io.Serializable;
33
34 /**
35 * A hash table supporting full concurrency of retrievals and
36 * high expected concurrency for updates. This class obeys the
37 * same functional specification as {@link java.util.Hashtable}, and
38 * includes versions of methods corresponding to each method of
39 * {@code Hashtable}. However, even though all operations are
40 * thread-safe, retrieval operations do <em>not</em> entail locking,
41 * and there is <em>not</em> any support for locking the entire table
42 * in a way that prevents all access. This class is fully
43 * interoperable with {@code Hashtable} in programs that rely on its
44 * thread safety but not on its synchronization details.
45 *
46 * <p> Retrieval operations (including {@code get}) generally do not
47 * block, so may overlap with update operations (including {@code put}
48 * and {@code remove}). Retrievals reflect the results of the most
49 * recently <em>completed</em> update operations holding upon their
50 * onset. (More formally, an update operation for a given key bears a
51 * <em>happens-before</em> relation with any (non-null) retrieval for
52 * that key reporting the updated value.) For aggregate operations
53 * such as {@code putAll} and {@code clear}, concurrent retrievals may
54 * reflect insertion or removal of only some entries. Similarly,
55 * Iterators and Enumerations return elements reflecting the state of
56 * the hash table at some point at or since the creation of the
57 * iterator/enumeration. They do <em>not</em> throw {@link
58 * ConcurrentModificationException}. However, iterators are designed
59 * to be used by only one thread at a time. Bear in mind that the
60 * results of aggregate status methods including {@code size}, {@code
61 * isEmpty}, and {@code containsValue} are typically useful only when
62 * a map is not undergoing concurrent updates in other threads.
63 * Otherwise the results of these methods reflect transient states
64 * that may be adequate for monitoring or estimation purposes, but not
65 * for program control.
66 *
67 * <p> The table is dynamically expanded when there are too many
68 * collisions (i.e., keys that have distinct hash codes but fall into
69 * the same slot modulo the table size), with the expected average
70 * effect of maintaining roughly two bins per mapping (corresponding
71 * to a 0.75 load factor threshold for resizing). There may be much
72 * variance around this average as mappings are added and removed, but
73 * overall, this maintains a commonly accepted time/space tradeoff for
74 * hash tables. However, resizing this or any other kind of hash
75 * table may be a relatively slow operation. When possible, it is a
76 * good idea to provide a size estimate as an optional {@code
77 * initialCapacity} constructor argument. An additional optional
78 * {@code loadFactor} constructor argument provides a further means of
79 * customizing initial table capacity by specifying the table density
80 * to be used in calculating the amount of space to allocate for the
81 * given number of elements. Also, for compatibility with previous
82 * versions of this class, constructors may optionally specify an
83 * expected {@code concurrencyLevel} as an additional hint for
84 * internal sizing. Note that using many keys with exactly the same
85 * {@code hashCode()} is a sure way to slow down performance of any
86 * hash table.
87 *
88 * <p>This class and its views and iterators implement all of the
89 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
90 * interfaces.
91 *
92 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
93 * does <em>not</em> allow {@code null} to be used as a key or value.
94 *
95 * <p>This class is a member of the
96 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
97 * Java Collections Framework</a>.
98 *
99 * <p><em>jsr166e note: This class is a candidate replacement for
100 * java.util.concurrent.ConcurrentHashMap. During transition, this
101 * class declares and uses nested functional interfaces with different
102 * names but the same forms as those expected for JDK8.<em>
103 *
104 * @since 1.5
105 * @author Doug Lea
106 * @param <K> the type of keys maintained by this map
107 * @param <V> the type of mapped values
108 */
109 public class ConcurrentHashMapV8<K, V>
110 implements ConcurrentMap<K, V>, Serializable {
111 private static final long serialVersionUID = 7249069246763182397L;
112
113 /**
114 * A partitionable iterator. A Spliterator can be traversed
115 * directly, but can also be partitioned (before traversal) by
116 * creating another Spliterator that covers a non-overlapping
117 * portion of the elements, and so may be amenable to parallel
118 * execution.
119 *
120 * <p> This interface exports a subset of expected JDK8
121 * functionality.
122 *
123 * <p>Sample usage: Here is one (of the several) ways to compute
124 * the sum of the values held in a map using the ForkJoin
125 * framework. As illustrated here, Spliterators are well suited to
126 * designs in which a task repeatedly splits off half its work
127 * into forked subtasks until small enough to process directly,
128 * and then joins these subtasks. Variants of this style can also
129 * be used in completion-based designs.
130 *
131 * <pre>
132 * {@code ConcurrentHashMapV8<String, Long> m = ...
133 * // split as if have 8 * parallelism, for load balance
134 * int n = m.size();
135 * int p = aForkJoinPool.getParallelism() * 8;
136 * int split = (n < p)? n : p;
137 * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
138 * // ...
139 * static class SumValues extends RecursiveTask<Long> {
140 * final Spliterator<Long> s;
141 * final int split; // split while > 1
142 * final SumValues nextJoin; // records forked subtasks to join
143 * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
144 * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
145 * }
146 * public Long compute() {
147 * long sum = 0;
148 * SumValues subtasks = null; // fork subtasks
149 * for (int s = split >>> 1; s > 0; s >>>= 1)
150 * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
151 * while (s.hasNext()) // directly process remaining elements
152 * sum += s.next();
153 * for (SumValues t = subtasks; t != null; t = t.nextJoin)
154 * sum += t.join(); // collect subtask results
155 * return sum;
156 * }
157 * }
158 * }</pre>
159 */
160 public static interface Spliterator<T> extends Iterator<T> {
161 /**
162 * Returns a Spliterator covering approximately half of the
163 * elements, guaranteed not to overlap with those subsequently
164 * returned by this Spliterator. After invoking this method,
165 * the current Spliterator will <em>not</em> produce any of
166 * the elements of the returned Spliterator, but the two
167 * Spliterators together will produce all of the elements that
168 * would have been produced by this Spliterator had this
169 * method not been called. The exact number of elements
170 * produced by the returned Spliterator is not guaranteed, and
171 * may be zero (i.e., with {@code hasNext()} reporting {@code
172 * false}) if this Spliterator cannot be further split.
173 *
174 * @return a Spliterator covering approximately half of the
175 * elements
176 * @throws IllegalStateException if this Spliterator has
177 * already commenced traversing elements
178 */
179 Spliterator<T> split();
180 }
181
182 /*
183 * Overview:
184 *
185 * The primary design goal of this hash table is to maintain
186 * concurrent readability (typically method get(), but also
187 * iterators and related methods) while minimizing update
188 * contention. Secondary goals are to keep space consumption about
189 * the same or better than java.util.HashMap, and to support high
190 * initial insertion rates on an empty table by many threads.
191 *
192 * Each key-value mapping is held in a Node. Because Node fields
193 * can contain special values, they are defined using plain Object
194 * types. Similarly in turn, all internal methods that use them
195 * work off Object types. And similarly, so do the internal
196 * methods of auxiliary iterator and view classes. All public
197 * generic typed methods relay in/out of these internal methods,
198 * supplying null-checks and casts as needed. This also allows
199 * many of the public methods to be factored into a smaller number
200 * of internal methods (although sadly not so for the five
201 * variants of put-related operations). The validation-based
202 * approach explained below leads to a lot of code sprawl because
203 * retry-control precludes factoring into smaller methods.
204 *
205 * The table is lazily initialized to a power-of-two size upon the
206 * first insertion. Each bin in the table normally contains a
207 * list of Nodes (most often, the list has only zero or one Node).
208 * Table accesses require volatile/atomic reads, writes, and
209 * CASes. Because there is no other way to arrange this without
210 * adding further indirections, we use intrinsics
211 * (sun.misc.Unsafe) operations. The lists of nodes within bins
212 * are always accurately traversable under volatile reads, so long
213 * as lookups check hash code and non-nullness of value before
214 * checking key equality.
215 *
216 * We use the top two bits of Node hash fields for control
217 * purposes -- they are available anyway because of addressing
218 * constraints. As explained further below, these top bits are
219 * used as follows:
220 * 00 - Normal
221 * 01 - Locked
222 * 11 - Locked and may have a thread waiting for lock
223 * 10 - Node is a forwarding node
224 *
225 * The lower 30 bits of each Node's hash field contain a
226 * transformation of the key's hash code, except for forwarding
227 * nodes, for which the lower bits are zero (and so always have
228 * hash field == MOVED).
229 *
230 * Insertion (via put or its variants) of the first node in an
231 * empty bin is performed by just CASing it to the bin. This is
232 * by far the most common case for put operations under most
233 * key/hash distributions. Other update operations (insert,
234 * delete, and replace) require locks. We do not want to waste
235 * the space required to associate a distinct lock object with
236 * each bin, so instead use the first node of a bin list itself as
237 * a lock. Blocking support for these locks relies on the builtin
238 * "synchronized" monitors. However, we also need a tryLock
239 * construction, so we overlay these by using bits of the Node
240 * hash field for lock control (see above), and so normally use
241 * builtin monitors only for blocking and signalling using
242 * wait/notifyAll constructions. See Node.tryAwaitLock.
243 *
244 * Using the first node of a list as a lock does not by itself
245 * suffice though: When a node is locked, any update must first
246 * validate that it is still the first node after locking it, and
247 * retry if not. Because new nodes are always appended to lists,
248 * once a node is first in a bin, it remains first until deleted
249 * or the bin becomes invalidated (upon resizing). However,
250 * operations that only conditionally update may inspect nodes
251 * until the point of update. This is a converse of sorts to the
252 * lazy locking technique described by Herlihy & Shavit.
253 *
254 * The main disadvantage of per-bin locks is that other update
255 * operations on other nodes in a bin list protected by the same
256 * lock can stall, for example when user equals() or mapping
257 * functions take a long time. However, statistically, under
258 * random hash codes, this is not a common problem. Ideally, the
259 * frequency of nodes in bins follows a Poisson distribution
260 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
261 * parameter of about 0.5 on average, given the resizing threshold
262 * of 0.75, although with a large variance because of resizing
263 * granularity. Ignoring variance, the expected occurrences of
264 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
265 * first values are:
266 *
267 * 0: 0.60653066
268 * 1: 0.30326533
269 * 2: 0.07581633
270 * 3: 0.01263606
271 * 4: 0.00157952
272 * 5: 0.00015795
273 * 6: 0.00001316
274 * 7: 0.00000094
275 * 8: 0.00000006
276 * more: less than 1 in ten million
277 *
278 * Lock contention probability for two threads accessing distinct
279 * elements is roughly 1 / (8 * #elements) under random hashes.
280 *
281 * Actual hash code distributions encountered in practice
282 * sometimes deviate significantly from uniform randomness. This
283 * includes the case when N > (1<<30), so some keys MUST collide.
284 * Similarly for dumb or hostile usages in which multiple keys are
285 * designed to have identical hash codes. Also, although we guard
286 * against the worst effects of this (see method spread), sets of
287 * hashes may differ only in bits that do not impact their bin
288 * index for a given power-of-two mask. So we use a secondary
289 * strategy that applies when the number of nodes in a bin exceeds
290 * a threshold, and at least one of the keys implements
291 * Comparable. These TreeBins use a balanced tree to hold nodes
292 * (a specialized form of red-black trees), bounding search time
293 * to O(log N). Each search step in a TreeBin is around twice as
294 * slow as in a regular list, but given that N cannot exceed
295 * (1<<64) (before running out of addresses) this bounds search
296 * steps, lock hold times, etc, to reasonable constants (roughly
297 * 100 nodes inspected per operation worst case) so long as keys
298 * are Comparable (which is very common -- String, Long, etc).
299 * TreeBin nodes (TreeNodes) also maintain the same "next"
300 * traversal pointers as regular nodes, so can be traversed in
301 * iterators in the same way.
302 *
303 * The table is resized when occupancy exceeds a percentage
304 * threshold (nominally, 0.75, but see below). Only a single
305 * thread performs the resize (using field "sizeCtl", to arrange
306 * exclusion), but the table otherwise remains usable for reads
307 * and updates. Resizing proceeds by transferring bins, one by
308 * one, from the table to the next table. Because we are using
309 * power-of-two expansion, the elements from each bin must either
310 * stay at same index, or move with a power of two offset. We
311 * eliminate unnecessary node creation by catching cases where old
312 * nodes can be reused because their next fields won't change. On
313 * average, only about one-sixth of them need cloning when a table
314 * doubles. The nodes they replace will be garbage collectable as
315 * soon as they are no longer referenced by any reader thread that
316 * may be in the midst of concurrently traversing table. Upon
317 * transfer, the old table bin contains only a special forwarding
318 * node (with hash field "MOVED") that contains the next table as
319 * its key. On encountering a forwarding node, access and update
320 * operations restart, using the new table.
321 *
322 * Each bin transfer requires its bin lock. However, unlike other
323 * cases, a transfer can skip a bin if it fails to acquire its
324 * lock, and revisit it later (unless it is a TreeBin). Method
325 * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
326 * have been skipped because of failure to acquire a lock, and
327 * blocks only if none are available (i.e., only very rarely).
328 * The transfer operation must also ensure that all accessible
329 * bins in both the old and new table are usable by any traversal.
330 * When there are no lock acquisition failures, this is arranged
331 * simply by proceeding from the last bin (table.length - 1) up
332 * towards the first. Upon seeing a forwarding node, traversals
333 * (see class Iter) arrange to move to the new table
334 * without revisiting nodes. However, when any node is skipped
335 * during a transfer, all earlier table bins may have become
336 * visible, so are initialized with a reverse-forwarding node back
337 * to the old table until the new ones are established. (This
338 * sometimes requires transiently locking a forwarding node, which
339 * is possible under the above encoding.) These more expensive
340 * mechanics trigger only when necessary.
341 *
342 * The traversal scheme also applies to partial traversals of
343 * ranges of bins (via an alternate Traverser constructor)
344 * to support partitioned aggregate operations. Also, read-only
345 * operations give up if ever forwarded to a null table, which
346 * provides support for shutdown-style clearing, which is also not
347 * currently implemented.
348 *
349 * Lazy table initialization minimizes footprint until first use,
350 * and also avoids resizings when the first operation is from a
351 * putAll, constructor with map argument, or deserialization.
352 * These cases attempt to override the initial capacity settings,
353 * but harmlessly fail to take effect in cases of races.
354 *
355 * The element count is maintained using a LongAdder, which avoids
356 * contention on updates but can encounter cache thrashing if read
357 * too frequently during concurrent access. To avoid reading so
358 * often, resizing is attempted either when a bin lock is
359 * contended, or upon adding to a bin already holding two or more
360 * nodes (checked before adding in the xIfAbsent methods, after
361 * adding in others). Under uniform hash distributions, the
362 * probability of this occurring at threshold is around 13%,
363 * meaning that only about 1 in 8 puts check threshold (and after
364 * resizing, many fewer do so). But this approximation has high
365 * variance for small table sizes, so we check on any collision
366 * for sizes <= 64. The bulk putAll operation further reduces
367 * contention by only committing count updates upon these size
368 * checks.
369 *
370 * Maintaining API and serialization compatibility with previous
371 * versions of this class introduces several oddities. Mainly: We
372 * leave untouched but unused constructor arguments refering to
373 * concurrencyLevel. We accept a loadFactor constructor argument,
374 * but apply it only to initial table capacity (which is the only
375 * time that we can guarantee to honor it.) We also declare an
376 * unused "Segment" class that is instantiated in minimal form
377 * only when serializing.
378 */
379
380 /* ---------------- Constants -------------- */
381
382 /**
383 * The largest possible table capacity. This value must be
384 * exactly 1<<30 to stay within Java array allocation and indexing
385 * bounds for power of two table sizes, and is further required
386 * because the top two bits of 32bit hash fields are used for
387 * control purposes.
388 */
389 private static final int MAXIMUM_CAPACITY = 1 << 30;
390
391 /**
392 * The default initial table capacity. Must be a power of 2
393 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
394 */
395 private static final int DEFAULT_CAPACITY = 16;
396
397 /**
398 * The largest possible (non-power of two) array size.
399 * Needed by toArray and related methods.
400 */
401 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
402
403 /**
404 * The default concurrency level for this table. Unused but
405 * defined for compatibility with previous versions of this class.
406 */
407 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
408
409 /**
410 * The load factor for this table. Overrides of this value in
411 * constructors affect only the initial table capacity. The
412 * actual floating point value isn't normally used -- it is
413 * simpler to use expressions such as {@code n - (n >>> 2)} for
414 * the associated resizing threshold.
415 */
416 private static final float LOAD_FACTOR = 0.75f;
417
418 /**
419 * The buffer size for skipped bins during transfers. The
420 * value is arbitrary but should be large enough to avoid
421 * most locking stalls during resizes.
422 */
423 private static final int TRANSFER_BUFFER_SIZE = 32;
424
425 /**
426 * The bin count threshold for using a tree rather than list for a
427 * bin. The value reflects the approximate break-even point for
428 * using tree-based operations.
429 */
430 private static final int TREE_THRESHOLD = 8;
431
432 /*
433 * Encodings for special uses of Node hash fields. See above for
434 * explanation.
435 */
436 static final int MOVED = 0x80000000; // hash field for forwarding nodes
437 static final int LOCKED = 0x40000000; // set/tested only as a bit
438 static final int WAITING = 0xc0000000; // both bits set/tested together
439 static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
440
441 /* ---------------- Fields -------------- */
442
443 /**
444 * The array of bins. Lazily initialized upon first insertion.
445 * Size is always a power of two. Accessed directly by iterators.
446 */
447 transient volatile Node[] table;
448
449 /**
450 * The counter maintaining number of elements.
451 */
452 private transient final LongAdder counter;
453
454 /**
455 * Table initialization and resizing control. When negative, the
456 * table is being initialized or resized. Otherwise, when table is
457 * null, holds the initial table size to use upon creation, or 0
458 * for default. After initialization, holds the next element count
459 * value upon which to resize the table.
460 */
461 private transient volatile int sizeCtl;
462
463 // views
464 private transient KeySet<K,V> keySet;
465 private transient Values<K,V> values;
466 private transient EntrySet<K,V> entrySet;
467
468 /** For serialization compatibility. Null unless serialized; see below */
469 private Segment<K,V>[] segments;
470
471 /* ---------------- Table element access -------------- */
472
473 /*
474 * Volatile access methods are used for table elements as well as
475 * elements of in-progress next table while resizing. Uses are
476 * null checked by callers, and implicitly bounds-checked, relying
477 * on the invariants that tab arrays have non-zero size, and all
478 * indices are masked with (tab.length - 1) which is never
479 * negative and always less than length. Note that, to be correct
480 * wrt arbitrary concurrency errors by users, bounds checks must
481 * operate on local variables, which accounts for some odd-looking
482 * inline assignments below.
483 */
484
485 static final Node tabAt(Node[] tab, int i) { // used by Iter
486 return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
487 }
488
489 private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
490 return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
491 }
492
493 private static final void setTabAt(Node[] tab, int i, Node v) {
494 UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
495 }
496
497 /* ---------------- Nodes -------------- */
498
499 /**
500 * Key-value entry. Note that this is never exported out as a
501 * user-visible Map.Entry (see MapEntry below). Nodes with a hash
502 * field of MOVED are special, and do not contain user keys or
503 * values. Otherwise, keys are never null, and null val fields
504 * indicate that a node is in the process of being deleted or
505 * created. For purposes of read-only access, a key may be read
506 * before a val, but can only be used after checking val to be
507 * non-null.
508 */
509 static class Node {
510 volatile int hash;
511 final Object key;
512 volatile Object val;
513 volatile Node next;
514
515 Node(int hash, Object key, Object val, Node next) {
516 this.hash = hash;
517 this.key = key;
518 this.val = val;
519 this.next = next;
520 }
521
522 /** CompareAndSet the hash field */
523 final boolean casHash(int cmp, int val) {
524 return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
525 }
526
527 /** The number of spins before blocking for a lock */
528 static final int MAX_SPINS =
529 Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
530
531 /**
532 * Spins a while if LOCKED bit set and this node is the first
533 * of its bin, and then sets WAITING bits on hash field and
534 * blocks (once) if they are still set. It is OK for this
535 * method to return even if lock is not available upon exit,
536 * which enables these simple single-wait mechanics.
537 *
538 * The corresponding signalling operation is performed within
539 * callers: Upon detecting that WAITING has been set when
540 * unlocking lock (via a failed CAS from non-waiting LOCKED
541 * state), unlockers acquire the sync lock and perform a
542 * notifyAll.
543 *
544 * The initial sanity check on tab and bounds is not currently
545 * necessary in the only usages of this method, but enables
546 * use in other future contexts.
547 */
548 final void tryAwaitLock(Node[] tab, int i) {
549 if (tab != null && i >= 0 && i < tab.length) { // sanity check
550 int r = ThreadLocalRandom.current().nextInt(); // randomize spins
551 int spins = MAX_SPINS, h;
552 while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
553 if (spins >= 0) {
554 r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
555 if (r >= 0 && --spins == 0)
556 Thread.yield(); // yield before block
557 }
558 else if (casHash(h, h | WAITING)) {
559 synchronized (this) {
560 if (tabAt(tab, i) == this &&
561 (hash & WAITING) == WAITING) {
562 try {
563 wait();
564 } catch (InterruptedException ie) {
565 Thread.currentThread().interrupt();
566 }
567 }
568 else
569 notifyAll(); // possibly won race vs signaller
570 }
571 break;
572 }
573 }
574 }
575 }
576
577 // Unsafe mechanics for casHash
578 private static final sun.misc.Unsafe UNSAFE;
579 private static final long hashOffset;
580
581 static {
582 try {
583 UNSAFE = getUnsafe();
584 Class<?> k = Node.class;
585 hashOffset = UNSAFE.objectFieldOffset
586 (k.getDeclaredField("hash"));
587 } catch (Exception e) {
588 throw new Error(e);
589 }
590 }
591 }
592
593 /* ---------------- TreeBins -------------- */
594
595 /**
596 * Nodes for use in TreeBins
597 */
598 static final class TreeNode extends Node {
599 TreeNode parent; // red-black tree links
600 TreeNode left;
601 TreeNode right;
602 TreeNode prev; // needed to unlink next upon deletion
603 boolean red;
604
605 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
606 super(hash, key, val, next);
607 this.parent = parent;
608 }
609 }
610
611 /**
612 * A specialized form of red-black tree for use in bins
613 * whose size exceeds a threshold.
614 *
615 * TreeBins use a special form of comparison for search and
616 * related operations (which is the main reason we cannot use
617 * existing collections such as TreeMaps). TreeBins contain
618 * Comparable elements, but may contain others, as well as
619 * elements that are Comparable but not necessarily Comparable<T>
620 * for the same T, so we cannot invoke compareTo among them. To
621 * handle this, the tree is ordered primarily by hash value, then
622 * by getClass().getName() order, and then by Comparator order
623 * among elements of the same class. On lookup at a node, if
624 * elements are not comparable or compare as 0, both left and
625 * right children may need to be searched in the case of tied hash
626 * values. (This corresponds to the full list search that would be
627 * necessary if all elements were non-Comparable and had tied
628 * hashes.) The red-black balancing code is updated from
629 * pre-jdk-collections
630 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
631 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
632 * Algorithms" (CLR).
633 *
634 * TreeBins also maintain a separate locking discipline than
635 * regular bins. Because they are forwarded via special MOVED
636 * nodes at bin heads (which can never change once established),
637 * we cannot use those nodes as locks. Instead, TreeBin
638 * extends AbstractQueuedSynchronizer to support a simple form of
639 * read-write lock. For update operations and table validation,
640 * the exclusive form of lock behaves in the same way as bin-head
641 * locks. However, lookups use shared read-lock mechanics to allow
642 * multiple readers in the absence of writers. Additionally,
643 * these lookups do not ever block: While the lock is not
644 * available, they proceed along the slow traversal path (via
645 * next-pointers) until the lock becomes available or the list is
646 * exhausted, whichever comes first. (These cases are not fast,
647 * but maximize aggregate expected throughput.) The AQS mechanics
648 * for doing this are straightforward. The lock state is held as
649 * AQS getState(). Read counts are negative; the write count (1)
650 * is positive. There are no signalling preferences among readers
651 * and writers. Since we don't need to export full Lock API, we
652 * just override the minimal AQS methods and use them directly.
653 */
654 static final class TreeBin extends AbstractQueuedSynchronizer {
655 private static final long serialVersionUID = 2249069246763182397L;
656 transient TreeNode root; // root of tree
657 transient TreeNode first; // head of next-pointer list
658
659 /* AQS overrides */
660 public final boolean isHeldExclusively() { return getState() > 0; }
661 public final boolean tryAcquire(int ignore) {
662 if (compareAndSetState(0, 1)) {
663 setExclusiveOwnerThread(Thread.currentThread());
664 return true;
665 }
666 return false;
667 }
668 public final boolean tryRelease(int ignore) {
669 setExclusiveOwnerThread(null);
670 setState(0);
671 return true;
672 }
673 public final int tryAcquireShared(int ignore) {
674 for (int c;;) {
675 if ((c = getState()) > 0)
676 return -1;
677 if (compareAndSetState(c, c -1))
678 return 1;
679 }
680 }
681 public final boolean tryReleaseShared(int ignore) {
682 int c;
683 do {} while (!compareAndSetState(c = getState(), c + 1));
684 return c == -1;
685 }
686
687 /** From CLR */
688 private void rotateLeft(TreeNode p) {
689 if (p != null) {
690 TreeNode r = p.right, pp, rl;
691 if ((rl = p.right = r.left) != null)
692 rl.parent = p;
693 if ((pp = r.parent = p.parent) == null)
694 root = r;
695 else if (pp.left == p)
696 pp.left = r;
697 else
698 pp.right = r;
699 r.left = p;
700 p.parent = r;
701 }
702 }
703
704 /** From CLR */
705 private void rotateRight(TreeNode p) {
706 if (p != null) {
707 TreeNode l = p.left, pp, lr;
708 if ((lr = p.left = l.right) != null)
709 lr.parent = p;
710 if ((pp = l.parent = p.parent) == null)
711 root = l;
712 else if (pp.right == p)
713 pp.right = l;
714 else
715 pp.left = l;
716 l.right = p;
717 p.parent = l;
718 }
719 }
720
721 /**
722 * Returns the TreeNode (or null if not found) for the given key
723 * starting at given root.
724 */
725 @SuppressWarnings("unchecked") final TreeNode getTreeNode
726 (int h, Object k, TreeNode p) {
727 Class<?> c = k.getClass();
728 while (p != null) {
729 int dir, ph; Object pk; Class<?> pc;
730 if ((ph = p.hash) == h) {
731 if ((pk = p.key) == k || k.equals(pk))
732 return p;
733 if (c != (pc = pk.getClass()) ||
734 !(k instanceof Comparable) ||
735 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
736 dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
737 TreeNode r = null, s = null, pl, pr;
738 if (dir >= 0) {
739 if ((pl = p.left) != null && h <= pl.hash)
740 s = pl;
741 }
742 else if ((pr = p.right) != null && h >= pr.hash)
743 s = pr;
744 if (s != null && (r = getTreeNode(h, k, s)) != null)
745 return r;
746 }
747 }
748 else
749 dir = (h < ph) ? -1 : 1;
750 p = (dir > 0) ? p.right : p.left;
751 }
752 return null;
753 }
754
755 /**
756 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
757 * read-lock to call getTreeNode, but during failure to get
758 * lock, searches along next links.
759 */
760 final Object getValue(int h, Object k) {
761 Node r = null;
762 int c = getState(); // Must read lock state first
763 for (Node e = first; e != null; e = e.next) {
764 if (c <= 0 && compareAndSetState(c, c - 1)) {
765 try {
766 r = getTreeNode(h, k, root);
767 } finally {
768 releaseShared(0);
769 }
770 break;
771 }
772 else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
773 r = e;
774 break;
775 }
776 else
777 c = getState();
778 }
779 return r == null ? null : r.val;
780 }
781
782 /**
783 * Finds or adds a node.
784 * @return null if added
785 */
786 @SuppressWarnings("unchecked") final TreeNode putTreeNode
787 (int h, Object k, Object v) {
788 Class<?> c = k.getClass();
789 TreeNode pp = root, p = null;
790 int dir = 0;
791 while (pp != null) { // find existing node or leaf to insert at
792 int ph; Object pk; Class<?> pc;
793 p = pp;
794 if ((ph = p.hash) == h) {
795 if ((pk = p.key) == k || k.equals(pk))
796 return p;
797 if (c != (pc = pk.getClass()) ||
798 !(k instanceof Comparable) ||
799 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
800 dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
801 TreeNode r = null, s = null, pl, pr;
802 if (dir >= 0) {
803 if ((pl = p.left) != null && h <= pl.hash)
804 s = pl;
805 }
806 else if ((pr = p.right) != null && h >= pr.hash)
807 s = pr;
808 if (s != null && (r = getTreeNode(h, k, s)) != null)
809 return r;
810 }
811 }
812 else
813 dir = (h < ph) ? -1 : 1;
814 pp = (dir > 0) ? p.right : p.left;
815 }
816
817 TreeNode f = first;
818 TreeNode x = first = new TreeNode(h, k, v, f, p);
819 if (p == null)
820 root = x;
821 else { // attach and rebalance; adapted from CLR
822 TreeNode xp, xpp;
823 if (f != null)
824 f.prev = x;
825 if (dir <= 0)
826 p.left = x;
827 else
828 p.right = x;
829 x.red = true;
830 while (x != null && (xp = x.parent) != null && xp.red &&
831 (xpp = xp.parent) != null) {
832 TreeNode xppl = xpp.left;
833 if (xp == xppl) {
834 TreeNode y = xpp.right;
835 if (y != null && y.red) {
836 y.red = false;
837 xp.red = false;
838 xpp.red = true;
839 x = xpp;
840 }
841 else {
842 if (x == xp.right) {
843 rotateLeft(x = xp);
844 xpp = (xp = x.parent) == null ? null : xp.parent;
845 }
846 if (xp != null) {
847 xp.red = false;
848 if (xpp != null) {
849 xpp.red = true;
850 rotateRight(xpp);
851 }
852 }
853 }
854 }
855 else {
856 TreeNode y = xppl;
857 if (y != null && y.red) {
858 y.red = false;
859 xp.red = false;
860 xpp.red = true;
861 x = xpp;
862 }
863 else {
864 if (x == xp.left) {
865 rotateRight(x = xp);
866 xpp = (xp = x.parent) == null ? null : xp.parent;
867 }
868 if (xp != null) {
869 xp.red = false;
870 if (xpp != null) {
871 xpp.red = true;
872 rotateLeft(xpp);
873 }
874 }
875 }
876 }
877 }
878 TreeNode r = root;
879 if (r != null && r.red)
880 r.red = false;
881 }
882 return null;
883 }
884
885 /**
886 * Removes the given node, that must be present before this
887 * call. This is messier than typical red-black deletion code
888 * because we cannot swap the contents of an interior node
889 * with a leaf successor that is pinned by "next" pointers
890 * that are accessible independently of lock. So instead we
891 * swap the tree linkages.
892 */
893 final void deleteTreeNode(TreeNode p) {
894 TreeNode next = (TreeNode)p.next; // unlink traversal pointers
895 TreeNode pred = p.prev;
896 if (pred == null)
897 first = next;
898 else
899 pred.next = next;
900 if (next != null)
901 next.prev = pred;
902 TreeNode replacement;
903 TreeNode pl = p.left;
904 TreeNode pr = p.right;
905 if (pl != null && pr != null) {
906 TreeNode s = pr, sl;
907 while ((sl = s.left) != null) // find successor
908 s = sl;
909 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
910 TreeNode sr = s.right;
911 TreeNode pp = p.parent;
912 if (s == pr) { // p was s's direct parent
913 p.parent = s;
914 s.right = p;
915 }
916 else {
917 TreeNode sp = s.parent;
918 if ((p.parent = sp) != null) {
919 if (s == sp.left)
920 sp.left = p;
921 else
922 sp.right = p;
923 }
924 if ((s.right = pr) != null)
925 pr.parent = s;
926 }
927 p.left = null;
928 if ((p.right = sr) != null)
929 sr.parent = p;
930 if ((s.left = pl) != null)
931 pl.parent = s;
932 if ((s.parent = pp) == null)
933 root = s;
934 else if (p == pp.left)
935 pp.left = s;
936 else
937 pp.right = s;
938 replacement = sr;
939 }
940 else
941 replacement = (pl != null) ? pl : pr;
942 TreeNode pp = p.parent;
943 if (replacement == null) {
944 if (pp == null) {
945 root = null;
946 return;
947 }
948 replacement = p;
949 }
950 else {
951 replacement.parent = pp;
952 if (pp == null)
953 root = replacement;
954 else if (p == pp.left)
955 pp.left = replacement;
956 else
957 pp.right = replacement;
958 p.left = p.right = p.parent = null;
959 }
960 if (!p.red) { // rebalance, from CLR
961 TreeNode x = replacement;
962 while (x != null) {
963 TreeNode xp, xpl;
964 if (x.red || (xp = x.parent) == null) {
965 x.red = false;
966 break;
967 }
968 if (x == (xpl = xp.left)) {
969 TreeNode sib = xp.right;
970 if (sib != null && sib.red) {
971 sib.red = false;
972 xp.red = true;
973 rotateLeft(xp);
974 sib = (xp = x.parent) == null ? null : xp.right;
975 }
976 if (sib == null)
977 x = xp;
978 else {
979 TreeNode sl = sib.left, sr = sib.right;
980 if ((sr == null || !sr.red) &&
981 (sl == null || !sl.red)) {
982 sib.red = true;
983 x = xp;
984 }
985 else {
986 if (sr == null || !sr.red) {
987 if (sl != null)
988 sl.red = false;
989 sib.red = true;
990 rotateRight(sib);
991 sib = (xp = x.parent) == null ? null : xp.right;
992 }
993 if (sib != null) {
994 sib.red = (xp == null) ? false : xp.red;
995 if ((sr = sib.right) != null)
996 sr.red = false;
997 }
998 if (xp != null) {
999 xp.red = false;
1000 rotateLeft(xp);
1001 }
1002 x = root;
1003 }
1004 }
1005 }
1006 else { // symmetric
1007 TreeNode sib = xpl;
1008 if (sib != null && sib.red) {
1009 sib.red = false;
1010 xp.red = true;
1011 rotateRight(xp);
1012 sib = (xp = x.parent) == null ? null : xp.left;
1013 }
1014 if (sib == null)
1015 x = xp;
1016 else {
1017 TreeNode sl = sib.left, sr = sib.right;
1018 if ((sl == null || !sl.red) &&
1019 (sr == null || !sr.red)) {
1020 sib.red = true;
1021 x = xp;
1022 }
1023 else {
1024 if (sl == null || !sl.red) {
1025 if (sr != null)
1026 sr.red = false;
1027 sib.red = true;
1028 rotateLeft(sib);
1029 sib = (xp = x.parent) == null ? null : xp.left;
1030 }
1031 if (sib != null) {
1032 sib.red = (xp == null) ? false : xp.red;
1033 if ((sl = sib.left) != null)
1034 sl.red = false;
1035 }
1036 if (xp != null) {
1037 xp.red = false;
1038 rotateRight(xp);
1039 }
1040 x = root;
1041 }
1042 }
1043 }
1044 }
1045 }
1046 if (p == replacement && (pp = p.parent) != null) {
1047 if (p == pp.left) // detach pointers
1048 pp.left = null;
1049 else if (p == pp.right)
1050 pp.right = null;
1051 p.parent = null;
1052 }
1053 }
1054 }
1055
1056 /* ---------------- Collision reduction methods -------------- */
1057
1058 /**
1059 * Spreads higher bits to lower, and also forces top 2 bits to 0.
1060 * Because the table uses power-of-two masking, sets of hashes
1061 * that vary only in bits above the current mask will always
1062 * collide. (Among known examples are sets of Float keys holding
1063 * consecutive whole numbers in small tables.) To counter this,
1064 * we apply a transform that spreads the impact of higher bits
1065 * downward. There is a tradeoff between speed, utility, and
1066 * quality of bit-spreading. Because many common sets of hashes
1067 * are already reasonably distributed across bits (so don't benefit
1068 * from spreading), and because we use trees to handle large sets
1069 * of collisions in bins, we don't need excessively high quality.
1070 */
1071 private static final int spread(int h) {
1072 h ^= (h >>> 18) ^ (h >>> 12);
1073 return (h ^ (h >>> 10)) & HASH_BITS;
1074 }
1075
1076 /**
1077 * Replaces a list bin with a tree bin. Call only when locked.
1078 * Fails to replace if the given key is non-comparable or table
1079 * is, or needs, resizing.
1080 */
1081 private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1082 if ((key instanceof Comparable) &&
1083 (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1084 TreeBin t = new TreeBin();
1085 for (Node e = tabAt(tab, index); e != null; e = e.next)
1086 t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1087 setTabAt(tab, index, new Node(MOVED, t, null, null));
1088 }
1089 }
1090
1091 /* ---------------- Internal access and update methods -------------- */
1092
1093 /** Implementation for get and containsKey */
1094 private final Object internalGet(Object k) {
1095 int h = spread(k.hashCode());
1096 retry: for (Node[] tab = table; tab != null;) {
1097 Node e, p; Object ek, ev; int eh; // locals to read fields once
1098 for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1099 if ((eh = e.hash) == MOVED) {
1100 if ((ek = e.key) instanceof TreeBin) // search TreeBin
1101 return ((TreeBin)ek).getValue(h, k);
1102 else { // restart with new table
1103 tab = (Node[])ek;
1104 continue retry;
1105 }
1106 }
1107 else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1108 ((ek = e.key) == k || k.equals(ek)))
1109 return ev;
1110 }
1111 break;
1112 }
1113 return null;
1114 }
1115
1116 /**
1117 * Implementation for the four public remove/replace methods:
1118 * Replaces node value with v, conditional upon match of cv if
1119 * non-null. If resulting value is null, delete.
1120 */
1121 private final Object internalReplace(Object k, Object v, Object cv) {
1122 int h = spread(k.hashCode());
1123 Object oldVal = null;
1124 for (Node[] tab = table;;) {
1125 Node f; int i, fh; Object fk;
1126 if (tab == null ||
1127 (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1128 break;
1129 else if ((fh = f.hash) == MOVED) {
1130 if ((fk = f.key) instanceof TreeBin) {
1131 TreeBin t = (TreeBin)fk;
1132 boolean validated = false;
1133 boolean deleted = false;
1134 t.acquire(0);
1135 try {
1136 if (tabAt(tab, i) == f) {
1137 validated = true;
1138 TreeNode p = t.getTreeNode(h, k, t.root);
1139 if (p != null) {
1140 Object pv = p.val;
1141 if (cv == null || cv == pv || cv.equals(pv)) {
1142 oldVal = pv;
1143 if ((p.val = v) == null) {
1144 deleted = true;
1145 t.deleteTreeNode(p);
1146 }
1147 }
1148 }
1149 }
1150 } finally {
1151 t.release(0);
1152 }
1153 if (validated) {
1154 if (deleted)
1155 counter.add(-1L);
1156 break;
1157 }
1158 }
1159 else
1160 tab = (Node[])fk;
1161 }
1162 else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1163 break; // rules out possible existence
1164 else if ((fh & LOCKED) != 0) {
1165 checkForResize(); // try resizing if can't get lock
1166 f.tryAwaitLock(tab, i);
1167 }
1168 else if (f.casHash(fh, fh | LOCKED)) {
1169 boolean validated = false;
1170 boolean deleted = false;
1171 try {
1172 if (tabAt(tab, i) == f) {
1173 validated = true;
1174 for (Node e = f, pred = null;;) {
1175 Object ek, ev;
1176 if ((e.hash & HASH_BITS) == h &&
1177 ((ev = e.val) != null) &&
1178 ((ek = e.key) == k || k.equals(ek))) {
1179 if (cv == null || cv == ev || cv.equals(ev)) {
1180 oldVal = ev;
1181 if ((e.val = v) == null) {
1182 deleted = true;
1183 Node en = e.next;
1184 if (pred != null)
1185 pred.next = en;
1186 else
1187 setTabAt(tab, i, en);
1188 }
1189 }
1190 break;
1191 }
1192 pred = e;
1193 if ((e = e.next) == null)
1194 break;
1195 }
1196 }
1197 } finally {
1198 if (!f.casHash(fh | LOCKED, fh)) {
1199 f.hash = fh;
1200 synchronized (f) { f.notifyAll(); };
1201 }
1202 }
1203 if (validated) {
1204 if (deleted)
1205 counter.add(-1L);
1206 break;
1207 }
1208 }
1209 }
1210 return oldVal;
1211 }
1212
1213 /*
1214 * Internal versions of the six insertion methods, each a
1215 * little more complicated than the last. All have
1216 * the same basic structure as the first (internalPut):
1217 * 1. If table uninitialized, create
1218 * 2. If bin empty, try to CAS new node
1219 * 3. If bin stale, use new table
1220 * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1221 * 5. Lock and validate; if valid, scan and add or update
1222 *
1223 * The others interweave other checks and/or alternative actions:
1224 * * Plain put checks for and performs resize after insertion.
1225 * * putIfAbsent prescans for mapping without lock (and fails to add
1226 * if present), which also makes pre-emptive resize checks worthwhile.
1227 * * computeIfAbsent extends form used in putIfAbsent with additional
1228 * mechanics to deal with, calls, potential exceptions and null
1229 * returns from function call.
1230 * * compute uses the same function-call mechanics, but without
1231 * the prescans
1232 * * merge acts as putIfAbsent in the absent case, but invokes the
1233 * update function if present
1234 * * putAll attempts to pre-allocate enough table space
1235 * and more lazily performs count updates and checks.
1236 *
1237 * Someday when details settle down a bit more, it might be worth
1238 * some factoring to reduce sprawl.
1239 */
1240
1241 /** Implementation for put */
1242 private final Object internalPut(Object k, Object v) {
1243 int h = spread(k.hashCode());
1244 int count = 0;
1245 for (Node[] tab = table;;) {
1246 int i; Node f; int fh; Object fk;
1247 if (tab == null)
1248 tab = initTable();
1249 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1250 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1251 break; // no lock when adding to empty bin
1252 }
1253 else if ((fh = f.hash) == MOVED) {
1254 if ((fk = f.key) instanceof TreeBin) {
1255 TreeBin t = (TreeBin)fk;
1256 Object oldVal = null;
1257 t.acquire(0);
1258 try {
1259 if (tabAt(tab, i) == f) {
1260 count = 2;
1261 TreeNode p = t.putTreeNode(h, k, v);
1262 if (p != null) {
1263 oldVal = p.val;
1264 p.val = v;
1265 }
1266 }
1267 } finally {
1268 t.release(0);
1269 }
1270 if (count != 0) {
1271 if (oldVal != null)
1272 return oldVal;
1273 break;
1274 }
1275 }
1276 else
1277 tab = (Node[])fk;
1278 }
1279 else if ((fh & LOCKED) != 0) {
1280 checkForResize();
1281 f.tryAwaitLock(tab, i);
1282 }
1283 else if (f.casHash(fh, fh | LOCKED)) {
1284 Object oldVal = null;
1285 try { // needed in case equals() throws
1286 if (tabAt(tab, i) == f) {
1287 count = 1;
1288 for (Node e = f;; ++count) {
1289 Object ek, ev;
1290 if ((e.hash & HASH_BITS) == h &&
1291 (ev = e.val) != null &&
1292 ((ek = e.key) == k || k.equals(ek))) {
1293 oldVal = ev;
1294 e.val = v;
1295 break;
1296 }
1297 Node last = e;
1298 if ((e = e.next) == null) {
1299 last.next = new Node(h, k, v, null);
1300 if (count >= TREE_THRESHOLD)
1301 replaceWithTreeBin(tab, i, k);
1302 break;
1303 }
1304 }
1305 }
1306 } finally { // unlock and signal if needed
1307 if (!f.casHash(fh | LOCKED, fh)) {
1308 f.hash = fh;
1309 synchronized (f) { f.notifyAll(); };
1310 }
1311 }
1312 if (count != 0) {
1313 if (oldVal != null)
1314 return oldVal;
1315 if (tab.length <= 64)
1316 count = 2;
1317 break;
1318 }
1319 }
1320 }
1321 counter.add(1L);
1322 if (count > 1)
1323 checkForResize();
1324 return null;
1325 }
1326
1327 /** Implementation for putIfAbsent */
1328 private final Object internalPutIfAbsent(Object k, Object v) {
1329 int h = spread(k.hashCode());
1330 int count = 0;
1331 for (Node[] tab = table;;) {
1332 int i; Node f; int fh; Object fk, fv;
1333 if (tab == null)
1334 tab = initTable();
1335 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1336 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1337 break;
1338 }
1339 else if ((fh = f.hash) == MOVED) {
1340 if ((fk = f.key) instanceof TreeBin) {
1341 TreeBin t = (TreeBin)fk;
1342 Object oldVal = null;
1343 t.acquire(0);
1344 try {
1345 if (tabAt(tab, i) == f) {
1346 count = 2;
1347 TreeNode p = t.putTreeNode(h, k, v);
1348 if (p != null)
1349 oldVal = p.val;
1350 }
1351 } finally {
1352 t.release(0);
1353 }
1354 if (count != 0) {
1355 if (oldVal != null)
1356 return oldVal;
1357 break;
1358 }
1359 }
1360 else
1361 tab = (Node[])fk;
1362 }
1363 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1364 ((fk = f.key) == k || k.equals(fk)))
1365 return fv;
1366 else {
1367 Node g = f.next;
1368 if (g != null) { // at least 2 nodes -- search and maybe resize
1369 for (Node e = g;;) {
1370 Object ek, ev;
1371 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1372 ((ek = e.key) == k || k.equals(ek)))
1373 return ev;
1374 if ((e = e.next) == null) {
1375 checkForResize();
1376 break;
1377 }
1378 }
1379 }
1380 if (((fh = f.hash) & LOCKED) != 0) {
1381 checkForResize();
1382 f.tryAwaitLock(tab, i);
1383 }
1384 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1385 Object oldVal = null;
1386 try {
1387 if (tabAt(tab, i) == f) {
1388 count = 1;
1389 for (Node e = f;; ++count) {
1390 Object ek, ev;
1391 if ((e.hash & HASH_BITS) == h &&
1392 (ev = e.val) != null &&
1393 ((ek = e.key) == k || k.equals(ek))) {
1394 oldVal = ev;
1395 break;
1396 }
1397 Node last = e;
1398 if ((e = e.next) == null) {
1399 last.next = new Node(h, k, v, null);
1400 if (count >= TREE_THRESHOLD)
1401 replaceWithTreeBin(tab, i, k);
1402 break;
1403 }
1404 }
1405 }
1406 } finally {
1407 if (!f.casHash(fh | LOCKED, fh)) {
1408 f.hash = fh;
1409 synchronized (f) { f.notifyAll(); };
1410 }
1411 }
1412 if (count != 0) {
1413 if (oldVal != null)
1414 return oldVal;
1415 if (tab.length <= 64)
1416 count = 2;
1417 break;
1418 }
1419 }
1420 }
1421 }
1422 counter.add(1L);
1423 if (count > 1)
1424 checkForResize();
1425 return null;
1426 }
1427
1428 /** Implementation for computeIfAbsent */
1429 private final Object internalComputeIfAbsent(K k,
1430 Fun<? super K, ?> mf) {
1431 int h = spread(k.hashCode());
1432 Object val = null;
1433 int count = 0;
1434 for (Node[] tab = table;;) {
1435 Node f; int i, fh; Object fk, fv;
1436 if (tab == null)
1437 tab = initTable();
1438 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1439 Node node = new Node(fh = h | LOCKED, k, null, null);
1440 if (casTabAt(tab, i, null, node)) {
1441 count = 1;
1442 try {
1443 if ((val = mf.apply(k)) != null)
1444 node.val = val;
1445 } finally {
1446 if (val == null)
1447 setTabAt(tab, i, null);
1448 if (!node.casHash(fh, h)) {
1449 node.hash = h;
1450 synchronized (node) { node.notifyAll(); };
1451 }
1452 }
1453 }
1454 if (count != 0)
1455 break;
1456 }
1457 else if ((fh = f.hash) == MOVED) {
1458 if ((fk = f.key) instanceof TreeBin) {
1459 TreeBin t = (TreeBin)fk;
1460 boolean added = false;
1461 t.acquire(0);
1462 try {
1463 if (tabAt(tab, i) == f) {
1464 count = 1;
1465 TreeNode p = t.getTreeNode(h, k, t.root);
1466 if (p != null)
1467 val = p.val;
1468 else if ((val = mf.apply(k)) != null) {
1469 added = true;
1470 count = 2;
1471 t.putTreeNode(h, k, val);
1472 }
1473 }
1474 } finally {
1475 t.release(0);
1476 }
1477 if (count != 0) {
1478 if (!added)
1479 return val;
1480 break;
1481 }
1482 }
1483 else
1484 tab = (Node[])fk;
1485 }
1486 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1487 ((fk = f.key) == k || k.equals(fk)))
1488 return fv;
1489 else {
1490 Node g = f.next;
1491 if (g != null) {
1492 for (Node e = g;;) {
1493 Object ek, ev;
1494 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1495 ((ek = e.key) == k || k.equals(ek)))
1496 return ev;
1497 if ((e = e.next) == null) {
1498 checkForResize();
1499 break;
1500 }
1501 }
1502 }
1503 if (((fh = f.hash) & LOCKED) != 0) {
1504 checkForResize();
1505 f.tryAwaitLock(tab, i);
1506 }
1507 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1508 boolean added = false;
1509 try {
1510 if (tabAt(tab, i) == f) {
1511 count = 1;
1512 for (Node e = f;; ++count) {
1513 Object ek, ev;
1514 if ((e.hash & HASH_BITS) == h &&
1515 (ev = e.val) != null &&
1516 ((ek = e.key) == k || k.equals(ek))) {
1517 val = ev;
1518 break;
1519 }
1520 Node last = e;
1521 if ((e = e.next) == null) {
1522 if ((val = mf.apply(k)) != null) {
1523 added = true;
1524 last.next = new Node(h, k, val, null);
1525 if (count >= TREE_THRESHOLD)
1526 replaceWithTreeBin(tab, i, k);
1527 }
1528 break;
1529 }
1530 }
1531 }
1532 } finally {
1533 if (!f.casHash(fh | LOCKED, fh)) {
1534 f.hash = fh;
1535 synchronized (f) { f.notifyAll(); };
1536 }
1537 }
1538 if (count != 0) {
1539 if (!added)
1540 return val;
1541 if (tab.length <= 64)
1542 count = 2;
1543 break;
1544 }
1545 }
1546 }
1547 }
1548 if (val != null) {
1549 counter.add(1L);
1550 if (count > 1)
1551 checkForResize();
1552 }
1553 return val;
1554 }
1555
1556 /** Implementation for compute */
1557 @SuppressWarnings("unchecked") private final Object internalCompute
1558 (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1559 int h = spread(k.hashCode());
1560 Object val = null;
1561 int delta = 0;
1562 int count = 0;
1563 for (Node[] tab = table;;) {
1564 Node f; int i, fh; Object fk;
1565 if (tab == null)
1566 tab = initTable();
1567 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1568 if (onlyIfPresent)
1569 break;
1570 Node node = new Node(fh = h | LOCKED, k, null, null);
1571 if (casTabAt(tab, i, null, node)) {
1572 try {
1573 count = 1;
1574 if ((val = mf.apply(k, null)) != null) {
1575 node.val = val;
1576 delta = 1;
1577 }
1578 } finally {
1579 if (delta == 0)
1580 setTabAt(tab, i, null);
1581 if (!node.casHash(fh, h)) {
1582 node.hash = h;
1583 synchronized (node) { node.notifyAll(); };
1584 }
1585 }
1586 }
1587 if (count != 0)
1588 break;
1589 }
1590 else if ((fh = f.hash) == MOVED) {
1591 if ((fk = f.key) instanceof TreeBin) {
1592 TreeBin t = (TreeBin)fk;
1593 t.acquire(0);
1594 try {
1595 if (tabAt(tab, i) == f) {
1596 count = 1;
1597 TreeNode p = t.getTreeNode(h, k, t.root);
1598 Object pv = (p == null) ? null : p.val;
1599 if ((val = mf.apply(k, (V)pv)) != null) {
1600 if (p != null)
1601 p.val = val;
1602 else {
1603 count = 2;
1604 delta = 1;
1605 t.putTreeNode(h, k, val);
1606 }
1607 }
1608 else if (p != null) {
1609 delta = -1;
1610 t.deleteTreeNode(p);
1611 }
1612 }
1613 } finally {
1614 t.release(0);
1615 }
1616 if (count != 0)
1617 break;
1618 }
1619 else
1620 tab = (Node[])fk;
1621 }
1622 else if ((fh & LOCKED) != 0) {
1623 checkForResize();
1624 f.tryAwaitLock(tab, i);
1625 }
1626 else if (f.casHash(fh, fh | LOCKED)) {
1627 try {
1628 if (tabAt(tab, i) == f) {
1629 count = 1;
1630 for (Node e = f, pred = null;; ++count) {
1631 Object ek, ev;
1632 if ((e.hash & HASH_BITS) == h &&
1633 (ev = e.val) != null &&
1634 ((ek = e.key) == k || k.equals(ek))) {
1635 val = mf.apply(k, (V)ev);
1636 if (val != null)
1637 e.val = val;
1638 else {
1639 delta = -1;
1640 Node en = e.next;
1641 if (pred != null)
1642 pred.next = en;
1643 else
1644 setTabAt(tab, i, en);
1645 }
1646 break;
1647 }
1648 pred = e;
1649 if ((e = e.next) == null) {
1650 if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1651 pred.next = new Node(h, k, val, null);
1652 delta = 1;
1653 if (count >= TREE_THRESHOLD)
1654 replaceWithTreeBin(tab, i, k);
1655 }
1656 break;
1657 }
1658 }
1659 }
1660 } finally {
1661 if (!f.casHash(fh | LOCKED, fh)) {
1662 f.hash = fh;
1663 synchronized (f) { f.notifyAll(); };
1664 }
1665 }
1666 if (count != 0) {
1667 if (tab.length <= 64)
1668 count = 2;
1669 break;
1670 }
1671 }
1672 }
1673 if (delta != 0) {
1674 counter.add((long)delta);
1675 if (count > 1)
1676 checkForResize();
1677 }
1678 return val;
1679 }
1680
1681 /** Implementation for merge */
1682 @SuppressWarnings("unchecked") private final Object internalMerge
1683 (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1684 int h = spread(k.hashCode());
1685 Object val = null;
1686 int delta = 0;
1687 int count = 0;
1688 for (Node[] tab = table;;) {
1689 int i; Node f; int fh; Object fk, fv;
1690 if (tab == null)
1691 tab = initTable();
1692 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1693 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1694 delta = 1;
1695 val = v;
1696 break;
1697 }
1698 }
1699 else if ((fh = f.hash) == MOVED) {
1700 if ((fk = f.key) instanceof TreeBin) {
1701 TreeBin t = (TreeBin)fk;
1702 t.acquire(0);
1703 try {
1704 if (tabAt(tab, i) == f) {
1705 count = 1;
1706 TreeNode p = t.getTreeNode(h, k, t.root);
1707 val = (p == null) ? v : mf.apply((V)p.val, v);
1708 if (val != null) {
1709 if (p != null)
1710 p.val = val;
1711 else {
1712 count = 2;
1713 delta = 1;
1714 t.putTreeNode(h, k, val);
1715 }
1716 }
1717 else if (p != null) {
1718 delta = -1;
1719 t.deleteTreeNode(p);
1720 }
1721 }
1722 } finally {
1723 t.release(0);
1724 }
1725 if (count != 0)
1726 break;
1727 }
1728 else
1729 tab = (Node[])fk;
1730 }
1731 else if ((fh & LOCKED) != 0) {
1732 checkForResize();
1733 f.tryAwaitLock(tab, i);
1734 }
1735 else if (f.casHash(fh, fh | LOCKED)) {
1736 try {
1737 if (tabAt(tab, i) == f) {
1738 count = 1;
1739 for (Node e = f, pred = null;; ++count) {
1740 Object ek, ev;
1741 if ((e.hash & HASH_BITS) == h &&
1742 (ev = e.val) != null &&
1743 ((ek = e.key) == k || k.equals(ek))) {
1744 val = mf.apply(v, (V)ev);
1745 if (val != null)
1746 e.val = val;
1747 else {
1748 delta = -1;
1749 Node en = e.next;
1750 if (pred != null)
1751 pred.next = en;
1752 else
1753 setTabAt(tab, i, en);
1754 }
1755 break;
1756 }
1757 pred = e;
1758 if ((e = e.next) == null) {
1759 val = v;
1760 pred.next = new Node(h, k, val, null);
1761 delta = 1;
1762 if (count >= TREE_THRESHOLD)
1763 replaceWithTreeBin(tab, i, k);
1764 break;
1765 }
1766 }
1767 }
1768 } finally {
1769 if (!f.casHash(fh | LOCKED, fh)) {
1770 f.hash = fh;
1771 synchronized (f) { f.notifyAll(); };
1772 }
1773 }
1774 if (count != 0) {
1775 if (tab.length <= 64)
1776 count = 2;
1777 break;
1778 }
1779 }
1780 }
1781 if (delta != 0) {
1782 counter.add((long)delta);
1783 if (count > 1)
1784 checkForResize();
1785 }
1786 return val;
1787 }
1788
1789 /** Implementation for putAll */
1790 private final void internalPutAll(Map<?, ?> m) {
1791 tryPresize(m.size());
1792 long delta = 0L; // number of uncommitted additions
1793 boolean npe = false; // to throw exception on exit for nulls
1794 try { // to clean up counts on other exceptions
1795 for (Map.Entry<?, ?> entry : m.entrySet()) {
1796 Object k, v;
1797 if (entry == null || (k = entry.getKey()) == null ||
1798 (v = entry.getValue()) == null) {
1799 npe = true;
1800 break;
1801 }
1802 int h = spread(k.hashCode());
1803 for (Node[] tab = table;;) {
1804 int i; Node f; int fh; Object fk;
1805 if (tab == null)
1806 tab = initTable();
1807 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1808 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1809 ++delta;
1810 break;
1811 }
1812 }
1813 else if ((fh = f.hash) == MOVED) {
1814 if ((fk = f.key) instanceof TreeBin) {
1815 TreeBin t = (TreeBin)fk;
1816 boolean validated = false;
1817 t.acquire(0);
1818 try {
1819 if (tabAt(tab, i) == f) {
1820 validated = true;
1821 TreeNode p = t.getTreeNode(h, k, t.root);
1822 if (p != null)
1823 p.val = v;
1824 else {
1825 t.putTreeNode(h, k, v);
1826 ++delta;
1827 }
1828 }
1829 } finally {
1830 t.release(0);
1831 }
1832 if (validated)
1833 break;
1834 }
1835 else
1836 tab = (Node[])fk;
1837 }
1838 else if ((fh & LOCKED) != 0) {
1839 counter.add(delta);
1840 delta = 0L;
1841 checkForResize();
1842 f.tryAwaitLock(tab, i);
1843 }
1844 else if (f.casHash(fh, fh | LOCKED)) {
1845 int count = 0;
1846 try {
1847 if (tabAt(tab, i) == f) {
1848 count = 1;
1849 for (Node e = f;; ++count) {
1850 Object ek, ev;
1851 if ((e.hash & HASH_BITS) == h &&
1852 (ev = e.val) != null &&
1853 ((ek = e.key) == k || k.equals(ek))) {
1854 e.val = v;
1855 break;
1856 }
1857 Node last = e;
1858 if ((e = e.next) == null) {
1859 ++delta;
1860 last.next = new Node(h, k, v, null);
1861 if (count >= TREE_THRESHOLD)
1862 replaceWithTreeBin(tab, i, k);
1863 break;
1864 }
1865 }
1866 }
1867 } finally {
1868 if (!f.casHash(fh | LOCKED, fh)) {
1869 f.hash = fh;
1870 synchronized (f) { f.notifyAll(); };
1871 }
1872 }
1873 if (count != 0) {
1874 if (count > 1) {
1875 counter.add(delta);
1876 delta = 0L;
1877 checkForResize();
1878 }
1879 break;
1880 }
1881 }
1882 }
1883 }
1884 } finally {
1885 if (delta != 0)
1886 counter.add(delta);
1887 }
1888 if (npe)
1889 throw new NullPointerException();
1890 }
1891
1892 /* ---------------- Table Initialization and Resizing -------------- */
1893
1894 /**
1895 * Returns a power of two table size for the given desired capacity.
1896 * See Hackers Delight, sec 3.2
1897 */
1898 private static final int tableSizeFor(int c) {
1899 int n = c - 1;
1900 n |= n >>> 1;
1901 n |= n >>> 2;
1902 n |= n >>> 4;
1903 n |= n >>> 8;
1904 n |= n >>> 16;
1905 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
1906 }
1907
1908 /**
1909 * Initializes table, using the size recorded in sizeCtl.
1910 */
1911 private final Node[] initTable() {
1912 Node[] tab; int sc;
1913 while ((tab = table) == null) {
1914 if ((sc = sizeCtl) < 0)
1915 Thread.yield(); // lost initialization race; just spin
1916 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1917 try {
1918 if ((tab = table) == null) {
1919 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
1920 tab = table = new Node[n];
1921 sc = n - (n >>> 2);
1922 }
1923 } finally {
1924 sizeCtl = sc;
1925 }
1926 break;
1927 }
1928 }
1929 return tab;
1930 }
1931
1932 /**
1933 * If table is too small and not already resizing, creates next
1934 * table and transfers bins. Rechecks occupancy after a transfer
1935 * to see if another resize is already needed because resizings
1936 * are lagging additions.
1937 */
1938 private final void checkForResize() {
1939 Node[] tab; int n, sc;
1940 while ((tab = table) != null &&
1941 (n = tab.length) < MAXIMUM_CAPACITY &&
1942 (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
1943 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1944 try {
1945 if (tab == table) {
1946 table = rebuild(tab);
1947 sc = (n << 1) - (n >>> 1);
1948 }
1949 } finally {
1950 sizeCtl = sc;
1951 }
1952 }
1953 }
1954
1955 /**
1956 * Tries to presize table to accommodate the given number of elements.
1957 *
1958 * @param size number of elements (doesn't need to be perfectly accurate)
1959 */
1960 private final void tryPresize(int size) {
1961 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
1962 tableSizeFor(size + (size >>> 1) + 1);
1963 int sc;
1964 while ((sc = sizeCtl) >= 0) {
1965 Node[] tab = table; int n;
1966 if (tab == null || (n = tab.length) == 0) {
1967 n = (sc > c) ? sc : c;
1968 if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1969 try {
1970 if (table == tab) {
1971 table = new Node[n];
1972 sc = n - (n >>> 2);
1973 }
1974 } finally {
1975 sizeCtl = sc;
1976 }
1977 }
1978 }
1979 else if (c <= sc || n >= MAXIMUM_CAPACITY)
1980 break;
1981 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1982 try {
1983 if (table == tab) {
1984 table = rebuild(tab);
1985 sc = (n << 1) - (n >>> 1);
1986 }
1987 } finally {
1988 sizeCtl = sc;
1989 }
1990 }
1991 }
1992 }
1993
1994 /*
1995 * Moves and/or copies the nodes in each bin to new table. See
1996 * above for explanation.
1997 *
1998 * @return the new table
1999 */
2000 private static final Node[] rebuild(Node[] tab) {
2001 int n = tab.length;
2002 Node[] nextTab = new Node[n << 1];
2003 Node fwd = new Node(MOVED, nextTab, null, null);
2004 int[] buffer = null; // holds bins to revisit; null until needed
2005 Node rev = null; // reverse forwarder; null until needed
2006 int nbuffered = 0; // the number of bins in buffer list
2007 int bufferIndex = 0; // buffer index of current buffered bin
2008 int bin = n - 1; // current non-buffered bin or -1 if none
2009
2010 for (int i = bin;;) { // start upwards sweep
2011 int fh; Node f;
2012 if ((f = tabAt(tab, i)) == null) {
2013 if (bin >= 0) { // Unbuffered; no lock needed (or available)
2014 if (!casTabAt(tab, i, f, fwd))
2015 continue;
2016 }
2017 else { // transiently use a locked forwarding node
2018 Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2019 if (!casTabAt(tab, i, f, g))
2020 continue;
2021 setTabAt(nextTab, i, null);
2022 setTabAt(nextTab, i + n, null);
2023 setTabAt(tab, i, fwd);
2024 if (!g.casHash(MOVED|LOCKED, MOVED)) {
2025 g.hash = MOVED;
2026 synchronized (g) { g.notifyAll(); }
2027 }
2028 }
2029 }
2030 else if ((fh = f.hash) == MOVED) {
2031 Object fk = f.key;
2032 if (fk instanceof TreeBin) {
2033 TreeBin t = (TreeBin)fk;
2034 boolean validated = false;
2035 t.acquire(0);
2036 try {
2037 if (tabAt(tab, i) == f) {
2038 validated = true;
2039 splitTreeBin(nextTab, i, t);
2040 setTabAt(tab, i, fwd);
2041 }
2042 } finally {
2043 t.release(0);
2044 }
2045 if (!validated)
2046 continue;
2047 }
2048 }
2049 else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2050 boolean validated = false;
2051 try { // split to lo and hi lists; copying as needed
2052 if (tabAt(tab, i) == f) {
2053 validated = true;
2054 splitBin(nextTab, i, f);
2055 setTabAt(tab, i, fwd);
2056 }
2057 } finally {
2058 if (!f.casHash(fh | LOCKED, fh)) {
2059 f.hash = fh;
2060 synchronized (f) { f.notifyAll(); };
2061 }
2062 }
2063 if (!validated)
2064 continue;
2065 }
2066 else {
2067 if (buffer == null) // initialize buffer for revisits
2068 buffer = new int[TRANSFER_BUFFER_SIZE];
2069 if (bin < 0 && bufferIndex > 0) {
2070 int j = buffer[--bufferIndex];
2071 buffer[bufferIndex] = i;
2072 i = j; // swap with another bin
2073 continue;
2074 }
2075 if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2076 f.tryAwaitLock(tab, i);
2077 continue; // no other options -- block
2078 }
2079 if (rev == null) // initialize reverse-forwarder
2080 rev = new Node(MOVED, tab, null, null);
2081 if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2082 continue; // recheck before adding to list
2083 buffer[nbuffered++] = i;
2084 setTabAt(nextTab, i, rev); // install place-holders
2085 setTabAt(nextTab, i + n, rev);
2086 }
2087
2088 if (bin > 0)
2089 i = --bin;
2090 else if (buffer != null && nbuffered > 0) {
2091 bin = -1;
2092 i = buffer[bufferIndex = --nbuffered];
2093 }
2094 else
2095 return nextTab;
2096 }
2097 }
2098
2099 /**
2100 * Splits a normal bin with list headed by e into lo and hi parts;
2101 * installs in given table.
2102 */
2103 private static void splitBin(Node[] nextTab, int i, Node e) {
2104 int bit = nextTab.length >>> 1; // bit to split on
2105 int runBit = e.hash & bit;
2106 Node lastRun = e, lo = null, hi = null;
2107 for (Node p = e.next; p != null; p = p.next) {
2108 int b = p.hash & bit;
2109 if (b != runBit) {
2110 runBit = b;
2111 lastRun = p;
2112 }
2113 }
2114 if (runBit == 0)
2115 lo = lastRun;
2116 else
2117 hi = lastRun;
2118 for (Node p = e; p != lastRun; p = p.next) {
2119 int ph = p.hash & HASH_BITS;
2120 Object pk = p.key, pv = p.val;
2121 if ((ph & bit) == 0)
2122 lo = new Node(ph, pk, pv, lo);
2123 else
2124 hi = new Node(ph, pk, pv, hi);
2125 }
2126 setTabAt(nextTab, i, lo);
2127 setTabAt(nextTab, i + bit, hi);
2128 }
2129
2130 /**
2131 * Splits a tree bin into lo and hi parts; installs in given table.
2132 */
2133 private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2134 int bit = nextTab.length >>> 1;
2135 TreeBin lt = new TreeBin();
2136 TreeBin ht = new TreeBin();
2137 int lc = 0, hc = 0;
2138 for (Node e = t.first; e != null; e = e.next) {
2139 int h = e.hash & HASH_BITS;
2140 Object k = e.key, v = e.val;
2141 if ((h & bit) == 0) {
2142 ++lc;
2143 lt.putTreeNode(h, k, v);
2144 }
2145 else {
2146 ++hc;
2147 ht.putTreeNode(h, k, v);
2148 }
2149 }
2150 Node ln, hn; // throw away trees if too small
2151 if (lc <= (TREE_THRESHOLD >>> 1)) {
2152 ln = null;
2153 for (Node p = lt.first; p != null; p = p.next)
2154 ln = new Node(p.hash, p.key, p.val, ln);
2155 }
2156 else
2157 ln = new Node(MOVED, lt, null, null);
2158 setTabAt(nextTab, i, ln);
2159 if (hc <= (TREE_THRESHOLD >>> 1)) {
2160 hn = null;
2161 for (Node p = ht.first; p != null; p = p.next)
2162 hn = new Node(p.hash, p.key, p.val, hn);
2163 }
2164 else
2165 hn = new Node(MOVED, ht, null, null);
2166 setTabAt(nextTab, i + bit, hn);
2167 }
2168
2169 /**
2170 * Implementation for clear. Steps through each bin, removing all
2171 * nodes.
2172 */
2173 private final void internalClear() {
2174 long delta = 0L; // negative number of deletions
2175 int i = 0;
2176 Node[] tab = table;
2177 while (tab != null && i < tab.length) {
2178 int fh; Object fk;
2179 Node f = tabAt(tab, i);
2180 if (f == null)
2181 ++i;
2182 else if ((fh = f.hash) == MOVED) {
2183 if ((fk = f.key) instanceof TreeBin) {
2184 TreeBin t = (TreeBin)fk;
2185 t.acquire(0);
2186 try {
2187 if (tabAt(tab, i) == f) {
2188 for (Node p = t.first; p != null; p = p.next) {
2189 if (p.val != null) { // (currently always true)
2190 p.val = null;
2191 --delta;
2192 }
2193 }
2194 t.first = null;
2195 t.root = null;
2196 ++i;
2197 }
2198 } finally {
2199 t.release(0);
2200 }
2201 }
2202 else
2203 tab = (Node[])fk;
2204 }
2205 else if ((fh & LOCKED) != 0) {
2206 counter.add(delta); // opportunistically update count
2207 delta = 0L;
2208 f.tryAwaitLock(tab, i);
2209 }
2210 else if (f.casHash(fh, fh | LOCKED)) {
2211 try {
2212 if (tabAt(tab, i) == f) {
2213 for (Node e = f; e != null; e = e.next) {
2214 if (e.val != null) { // (currently always true)
2215 e.val = null;
2216 --delta;
2217 }
2218 }
2219 setTabAt(tab, i, null);
2220 ++i;
2221 }
2222 } finally {
2223 if (!f.casHash(fh | LOCKED, fh)) {
2224 f.hash = fh;
2225 synchronized (f) { f.notifyAll(); };
2226 }
2227 }
2228 }
2229 }
2230 if (delta != 0)
2231 counter.add(delta);
2232 }
2233
2234 /* ----------------Table Traversal -------------- */
2235
2236 /**
2237 * Encapsulates traversal for methods such as containsValue; also
2238 * serves as a base class for other iterators and bulk tasks.
2239 *
2240 * At each step, the iterator snapshots the key ("nextKey") and
2241 * value ("nextVal") of a valid node (i.e., one that, at point of
2242 * snapshot, has a non-null user value). Because val fields can
2243 * change (including to null, indicating deletion), field nextVal
2244 * might not be accurate at point of use, but still maintains the
2245 * weak consistency property of holding a value that was once
2246 * valid.
2247 *
2248 * Internal traversals directly access these fields, as in:
2249 * {@code while (it.advance() != null) { process(it.nextKey); }}
2250 *
2251 * Exported iterators must track whether the iterator has advanced
2252 * (in hasNext vs next) (by setting/checking/nulling field
2253 * nextVal), and then extract key, value, or key-value pairs as
2254 * return values of next().
2255 *
2256 * The iterator visits once each still-valid node that was
2257 * reachable upon iterator construction. It might miss some that
2258 * were added to a bin after the bin was visited, which is OK wrt
2259 * consistency guarantees. Maintaining this property in the face
2260 * of possible ongoing resizes requires a fair amount of
2261 * bookkeeping state that is difficult to optimize away amidst
2262 * volatile accesses. Even so, traversal maintains reasonable
2263 * throughput.
2264 *
2265 * Normally, iteration proceeds bin-by-bin traversing lists.
2266 * However, if the table has been resized, then all future steps
2267 * must traverse both the bin at the current index as well as at
2268 * (index + baseSize); and so on for further resizings. To
2269 * paranoically cope with potential sharing by users of iterators
2270 * across threads, iteration terminates if a bounds checks fails
2271 * for a table read.
2272 *
2273 * This class extends ForkJoinTask to streamline parallel
2274 * iteration in bulk operations (see BulkTask). This adds only an
2275 * int of space overhead, which is close enough to negligible in
2276 * cases where it is not needed to not worry about it. Because
2277 * ForkJoinTask is Serializable, but iterators need not be, we
2278 * need to add warning suppressions.
2279 */
2280 @SuppressWarnings("serial") static class Traverser<K,V,R> extends ForkJoinTask<R> {
2281 final ConcurrentHashMapV8<K, V> map;
2282 Node next; // the next entry to use
2283 Node last; // the last entry used
2284 Object nextKey; // cached key field of next
2285 Object nextVal; // cached val field of next
2286 Node[] tab; // current table; updated if resized
2287 int index; // index of bin to use next
2288 int baseIndex; // current index of initial table
2289 int baseLimit; // index bound for initial table
2290 final int baseSize; // initial table size
2291
2292 /** Creates iterator for all entries in the table. */
2293 Traverser(ConcurrentHashMapV8<K, V> map) {
2294 this.tab = (this.map = map).table;
2295 baseLimit = baseSize = (tab == null) ? 0 : tab.length;
2296 }
2297
2298 /** Creates iterator for split() methods */
2299 Traverser(Traverser<K,V,?> it) {
2300 this.map = it.map;
2301 this.tab = it.tab;
2302 this.baseSize = it.baseSize;
2303 it.baseLimit = this.index = this.baseIndex =
2304 ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1;
2305 }
2306
2307 /**
2308 * Advances next; returns nextVal or null if terminated.
2309 * See above for explanation.
2310 */
2311 final Object advance() {
2312 Node e = last = next;
2313 Object ev = null;
2314 outer: do {
2315 if (e != null) // advance past used/skipped node
2316 e = e.next;
2317 while (e == null) { // get to next non-null bin
2318 Node[] t; int b, i, n; Object ek; // checks must use locals
2319 if ((b = baseIndex) >= baseLimit || (i = index) < 0 ||
2320 (t = tab) == null || i >= (n = t.length))
2321 break outer;
2322 else if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2323 if ((ek = e.key) instanceof TreeBin)
2324 e = ((TreeBin)ek).first;
2325 else {
2326 tab = (Node[])ek;
2327 continue; // restarts due to null val
2328 }
2329 } // visit upper slots if present
2330 index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2331 }
2332 nextKey = e.key;
2333 } while ((ev = e.val) == null); // skip deleted or special nodes
2334 next = e;
2335 return nextVal = ev;
2336 }
2337
2338 public final void remove() {
2339 if (nextVal == null && last == null)
2340 advance();
2341 Node e = last;
2342 if (e == null)
2343 throw new IllegalStateException();
2344 last = null;
2345 map.remove(e.key);
2346 }
2347
2348 public final boolean hasNext() {
2349 return nextVal != null || advance() != null;
2350 }
2351
2352 public final boolean hasMoreElements() { return hasNext(); }
2353 public final void setRawResult(Object x) { }
2354 public R getRawResult() { return null; }
2355 public boolean exec() { return true; }
2356 }
2357
2358 /* ---------------- Public operations -------------- */
2359
2360 /**
2361 * Creates a new, empty map with the default initial table size (16).
2362 */
2363 public ConcurrentHashMapV8() {
2364 this.counter = new LongAdder();
2365 }
2366
2367 /**
2368 * Creates a new, empty map with an initial table size
2369 * accommodating the specified number of elements without the need
2370 * to dynamically resize.
2371 *
2372 * @param initialCapacity The implementation performs internal
2373 * sizing to accommodate this many elements.
2374 * @throws IllegalArgumentException if the initial capacity of
2375 * elements is negative
2376 */
2377 public ConcurrentHashMapV8(int initialCapacity) {
2378 if (initialCapacity < 0)
2379 throw new IllegalArgumentException();
2380 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2381 MAXIMUM_CAPACITY :
2382 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2383 this.counter = new LongAdder();
2384 this.sizeCtl = cap;
2385 }
2386
2387 /**
2388 * Creates a new map with the same mappings as the given map.
2389 *
2390 * @param m the map
2391 */
2392 public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2393 this.counter = new LongAdder();
2394 this.sizeCtl = DEFAULT_CAPACITY;
2395 internalPutAll(m);
2396 }
2397
2398 /**
2399 * Creates a new, empty map with an initial table size based on
2400 * the given number of elements ({@code initialCapacity}) and
2401 * initial table density ({@code loadFactor}).
2402 *
2403 * @param initialCapacity the initial capacity. The implementation
2404 * performs internal sizing to accommodate this many elements,
2405 * given the specified load factor.
2406 * @param loadFactor the load factor (table density) for
2407 * establishing the initial table size
2408 * @throws IllegalArgumentException if the initial capacity of
2409 * elements is negative or the load factor is nonpositive
2410 *
2411 * @since 1.6
2412 */
2413 public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
2414 this(initialCapacity, loadFactor, 1);
2415 }
2416
2417 /**
2418 * Creates a new, empty map with an initial table size based on
2419 * the given number of elements ({@code initialCapacity}), table
2420 * density ({@code loadFactor}), and number of concurrently
2421 * updating threads ({@code concurrencyLevel}).
2422 *
2423 * @param initialCapacity the initial capacity. The implementation
2424 * performs internal sizing to accommodate this many elements,
2425 * given the specified load factor.
2426 * @param loadFactor the load factor (table density) for
2427 * establishing the initial table size
2428 * @param concurrencyLevel the estimated number of concurrently
2429 * updating threads. The implementation may use this value as
2430 * a sizing hint.
2431 * @throws IllegalArgumentException if the initial capacity is
2432 * negative or the load factor or concurrencyLevel are
2433 * nonpositive
2434 */
2435 public ConcurrentHashMapV8(int initialCapacity,
2436 float loadFactor, int concurrencyLevel) {
2437 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2438 throw new IllegalArgumentException();
2439 if (initialCapacity < concurrencyLevel) // Use at least as many bins
2440 initialCapacity = concurrencyLevel; // as estimated threads
2441 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2442 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2443 MAXIMUM_CAPACITY : tableSizeFor((int)size);
2444 this.counter = new LongAdder();
2445 this.sizeCtl = cap;
2446 }
2447
2448 /**
2449 * {@inheritDoc}
2450 */
2451 public boolean isEmpty() {
2452 return counter.sum() <= 0L; // ignore transient negative values
2453 }
2454
2455 /**
2456 * {@inheritDoc}
2457 */
2458 public int size() {
2459 long n = counter.sum();
2460 return ((n < 0L) ? 0 :
2461 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2462 (int)n);
2463 }
2464
2465 /**
2466 * Returns the number of mappings. This method should be used
2467 * instead of {@link #size} because a ConcurrentHashMap may
2468 * contain more mappings than can be represented as an int. The
2469 * value returned is a snapshot; the actual count may differ if
2470 * there are ongoing concurrent insertions or removals.
2471 *
2472 * @return the number of mappings
2473 */
2474 public long mappingCount() {
2475 long n = counter.sum();
2476 return (n < 0L) ? 0L : n; // ignore transient negative values
2477 }
2478
2479 /**
2480 * Returns the value to which the specified key is mapped,
2481 * or {@code null} if this map contains no mapping for the key.
2482 *
2483 * <p>More formally, if this map contains a mapping from a key
2484 * {@code k} to a value {@code v} such that {@code key.equals(k)},
2485 * then this method returns {@code v}; otherwise it returns
2486 * {@code null}. (There can be at most one such mapping.)
2487 *
2488 * @throws NullPointerException if the specified key is null
2489 */
2490 @SuppressWarnings("unchecked") public V get(Object key) {
2491 if (key == null)
2492 throw new NullPointerException();
2493 return (V)internalGet(key);
2494 }
2495
2496 /**
2497 * Returns the value to which the specified key is mapped,
2498 * or the gieven defaultValue if this map contains no mapping for the key.
2499 *
2500 * @param key the key
2501 * @param defaultValue the value to return if this map contains
2502 * no mapping for the given key.
2503 * @return the mapping for the key, if present; else the defaultValue
2504 * @throws NullPointerException if the specified key is null
2505 */
2506 @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2507 if (key == null)
2508 throw new NullPointerException();
2509 V v = (V) internalGet(key);
2510 return v == null ? defaultValue : v;
2511 }
2512
2513 /**
2514 * Tests if the specified object is a key in this table.
2515 *
2516 * @param key possible key
2517 * @return {@code true} if and only if the specified object
2518 * is a key in this table, as determined by the
2519 * {@code equals} method; {@code false} otherwise
2520 * @throws NullPointerException if the specified key is null
2521 */
2522 public boolean containsKey(Object key) {
2523 if (key == null)
2524 throw new NullPointerException();
2525 return internalGet(key) != null;
2526 }
2527
2528 /**
2529 * Returns {@code true} if this map maps one or more keys to the
2530 * specified value. Note: This method may require a full traversal
2531 * of the map, and is much slower than method {@code containsKey}.
2532 *
2533 * @param value value whose presence in this map is to be tested
2534 * @return {@code true} if this map maps one or more keys to the
2535 * specified value
2536 * @throws NullPointerException if the specified value is null
2537 */
2538 public boolean containsValue(Object value) {
2539 if (value == null)
2540 throw new NullPointerException();
2541 Object v;
2542 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2543 while ((v = it.advance()) != null) {
2544 if (v == value || value.equals(v))
2545 return true;
2546 }
2547 return false;
2548 }
2549
2550 /**
2551 * Legacy method testing if some key maps into the specified value
2552 * in this table. This method is identical in functionality to
2553 * {@link #containsValue}, and exists solely to ensure
2554 * full compatibility with class {@link java.util.Hashtable},
2555 * which supported this method prior to introduction of the
2556 * Java Collections framework.
2557 *
2558 * @param value a value to search for
2559 * @return {@code true} if and only if some key maps to the
2560 * {@code value} argument in this table as
2561 * determined by the {@code equals} method;
2562 * {@code false} otherwise
2563 * @throws NullPointerException if the specified value is null
2564 */
2565 public boolean contains(Object value) {
2566 return containsValue(value);
2567 }
2568
2569 /**
2570 * Maps the specified key to the specified value in this table.
2571 * Neither the key nor the value can be null.
2572 *
2573 * <p> The value can be retrieved by calling the {@code get} method
2574 * with a key that is equal to the original key.
2575 *
2576 * @param key key with which the specified value is to be associated
2577 * @param value value to be associated with the specified key
2578 * @return the previous value associated with {@code key}, or
2579 * {@code null} if there was no mapping for {@code key}
2580 * @throws NullPointerException if the specified key or value is null
2581 */
2582 @SuppressWarnings("unchecked") public V put(K key, V value) {
2583 if (key == null || value == null)
2584 throw new NullPointerException();
2585 return (V)internalPut(key, value);
2586 }
2587
2588 /**
2589 * {@inheritDoc}
2590 *
2591 * @return the previous value associated with the specified key,
2592 * or {@code null} if there was no mapping for the key
2593 * @throws NullPointerException if the specified key or value is null
2594 */
2595 @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2596 if (key == null || value == null)
2597 throw new NullPointerException();
2598 return (V)internalPutIfAbsent(key, value);
2599 }
2600
2601 /**
2602 * Copies all of the mappings from the specified map to this one.
2603 * These mappings replace any mappings that this map had for any of the
2604 * keys currently in the specified map.
2605 *
2606 * @param m mappings to be stored in this map
2607 */
2608 public void putAll(Map<? extends K, ? extends V> m) {
2609 internalPutAll(m);
2610 }
2611
2612 /**
2613 * If the specified key is not already associated with a value,
2614 * computes its value using the given mappingFunction and enters
2615 * it into the map unless null. This is equivalent to
2616 * <pre> {@code
2617 * if (map.containsKey(key))
2618 * return map.get(key);
2619 * value = mappingFunction.apply(key);
2620 * if (value != null)
2621 * map.put(key, value);
2622 * return value;}</pre>
2623 *
2624 * except that the action is performed atomically. If the
2625 * function returns {@code null} no mapping is recorded. If the
2626 * function itself throws an (unchecked) exception, the exception
2627 * is rethrown to its caller, and no mapping is recorded. Some
2628 * attempted update operations on this map by other threads may be
2629 * blocked while computation is in progress, so the computation
2630 * should be short and simple, and must not attempt to update any
2631 * other mappings of this Map. The most appropriate usage is to
2632 * construct a new object serving as an initial mapped value, or
2633 * memoized result, as in:
2634 *
2635 * <pre> {@code
2636 * map.computeIfAbsent(key, new Fun<K, V>() {
2637 * public V map(K k) { return new Value(f(k)); }});}</pre>
2638 *
2639 * @param key key with which the specified value is to be associated
2640 * @param mappingFunction the function to compute a value
2641 * @return the current (existing or computed) value associated with
2642 * the specified key, or null if the computed value is null.
2643 * @throws NullPointerException if the specified key or mappingFunction
2644 * is null
2645 * @throws IllegalStateException if the computation detectably
2646 * attempts a recursive update to this map that would
2647 * otherwise never complete
2648 * @throws RuntimeException or Error if the mappingFunction does so,
2649 * in which case the mapping is left unestablished
2650 */
2651 @SuppressWarnings("unchecked") public V computeIfAbsent
2652 (K key, Fun<? super K, ? extends V> mappingFunction) {
2653 if (key == null || mappingFunction == null)
2654 throw new NullPointerException();
2655 return (V)internalComputeIfAbsent(key, mappingFunction);
2656 }
2657
2658 /**
2659 * If the given key is present, computes a new mapping value given a key and
2660 * its current mapped value. This is equivalent to
2661 * <pre> {@code
2662 * if (map.containsKey(key)) {
2663 * value = remappingFunction.apply(key, map.get(key));
2664 * if (value != null)
2665 * map.put(key, value);
2666 * else
2667 * map.remove(key);
2668 * }
2669 * }</pre>
2670 *
2671 * except that the action is performed atomically. If the
2672 * function returns {@code null}, the mapping is removed. If the
2673 * function itself throws an (unchecked) exception, the exception
2674 * is rethrown to its caller, and the current mapping is left
2675 * unchanged. Some attempted update operations on this map by
2676 * other threads may be blocked while computation is in progress,
2677 * so the computation should be short and simple, and must not
2678 * attempt to update any other mappings of this Map. For example,
2679 * to either create or append new messages to a value mapping:
2680 *
2681 * @param key key with which the specified value is to be associated
2682 * @param remappingFunction the function to compute a value
2683 * @return the new value associated with the specified key, or null if none
2684 * @throws NullPointerException if the specified key or remappingFunction
2685 * is null
2686 * @throws IllegalStateException if the computation detectably
2687 * attempts a recursive update to this map that would
2688 * otherwise never complete
2689 * @throws RuntimeException or Error if the remappingFunction does so,
2690 * in which case the mapping is unchanged
2691 */
2692 @SuppressWarnings("unchecked") public V computeIfPresent
2693 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2694 if (key == null || remappingFunction == null)
2695 throw new NullPointerException();
2696 return (V)internalCompute(key, true, remappingFunction);
2697 }
2698
2699 /**
2700 * Computes a new mapping value given a key and
2701 * its current mapped value (or {@code null} if there is no current
2702 * mapping). This is equivalent to
2703 * <pre> {@code
2704 * value = remappingFunction.apply(key, map.get(key));
2705 * if (value != null)
2706 * map.put(key, value);
2707 * else
2708 * map.remove(key);
2709 * }</pre>
2710 *
2711 * except that the action is performed atomically. If the
2712 * function returns {@code null}, the mapping is removed. If the
2713 * function itself throws an (unchecked) exception, the exception
2714 * is rethrown to its caller, and the current mapping is left
2715 * unchanged. Some attempted update operations on this map by
2716 * other threads may be blocked while computation is in progress,
2717 * so the computation should be short and simple, and must not
2718 * attempt to update any other mappings of this Map. For example,
2719 * to either create or append new messages to a value mapping:
2720 *
2721 * <pre> {@code
2722 * Map<Key, String> map = ...;
2723 * final String msg = ...;
2724 * map.compute(key, new BiFun<Key, String, String>() {
2725 * public String apply(Key k, String v) {
2726 * return (v == null) ? msg : v + msg;});}}</pre>
2727 *
2728 * @param key key with which the specified value is to be associated
2729 * @param remappingFunction the function to compute a value
2730 * @return the new value associated with the specified key, or null if none
2731 * @throws NullPointerException if the specified key or remappingFunction
2732 * is null
2733 * @throws IllegalStateException if the computation detectably
2734 * attempts a recursive update to this map that would
2735 * otherwise never complete
2736 * @throws RuntimeException or Error if the remappingFunction does so,
2737 * in which case the mapping is unchanged
2738 */
2739 @SuppressWarnings("unchecked") public V compute
2740 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2741 if (key == null || remappingFunction == null)
2742 throw new NullPointerException();
2743 return (V)internalCompute(key, false, remappingFunction);
2744 }
2745
2746 /**
2747 * If the specified key is not already associated
2748 * with a value, associate it with the given value.
2749 * Otherwise, replace the value with the results of
2750 * the given remapping function. This is equivalent to:
2751 * <pre> {@code
2752 * if (!map.containsKey(key))
2753 * map.put(value);
2754 * else {
2755 * newValue = remappingFunction.apply(map.get(key), value);
2756 * if (value != null)
2757 * map.put(key, value);
2758 * else
2759 * map.remove(key);
2760 * }
2761 * }</pre>
2762 * except that the action is performed atomically. If the
2763 * function returns {@code null}, the mapping is removed. If the
2764 * function itself throws an (unchecked) exception, the exception
2765 * is rethrown to its caller, and the current mapping is left
2766 * unchanged. Some attempted update operations on this map by
2767 * other threads may be blocked while computation is in progress,
2768 * so the computation should be short and simple, and must not
2769 * attempt to update any other mappings of this Map.
2770 */
2771 @SuppressWarnings("unchecked") public V merge
2772 (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2773 if (key == null || value == null || remappingFunction == null)
2774 throw new NullPointerException();
2775 return (V)internalMerge(key, value, remappingFunction);
2776 }
2777
2778 /**
2779 * Removes the key (and its corresponding value) from this map.
2780 * This method does nothing if the key is not in the map.
2781 *
2782 * @param key the key that needs to be removed
2783 * @return the previous value associated with {@code key}, or
2784 * {@code null} if there was no mapping for {@code key}
2785 * @throws NullPointerException if the specified key is null
2786 */
2787 @SuppressWarnings("unchecked") public V remove(Object key) {
2788 if (key == null)
2789 throw new NullPointerException();
2790 return (V)internalReplace(key, null, null);
2791 }
2792
2793 /**
2794 * {@inheritDoc}
2795 *
2796 * @throws NullPointerException if the specified key is null
2797 */
2798 public boolean remove(Object key, Object value) {
2799 if (key == null)
2800 throw new NullPointerException();
2801 if (value == null)
2802 return false;
2803 return internalReplace(key, null, value) != null;
2804 }
2805
2806 /**
2807 * {@inheritDoc}
2808 *
2809 * @throws NullPointerException if any of the arguments are null
2810 */
2811 public boolean replace(K key, V oldValue, V newValue) {
2812 if (key == null || oldValue == null || newValue == null)
2813 throw new NullPointerException();
2814 return internalReplace(key, newValue, oldValue) != null;
2815 }
2816
2817 /**
2818 * {@inheritDoc}
2819 *
2820 * @return the previous value associated with the specified key,
2821 * or {@code null} if there was no mapping for the key
2822 * @throws NullPointerException if the specified key or value is null
2823 */
2824 @SuppressWarnings("unchecked") public V replace(K key, V value) {
2825 if (key == null || value == null)
2826 throw new NullPointerException();
2827 return (V)internalReplace(key, value, null);
2828 }
2829
2830 /**
2831 * Removes all of the mappings from this map.
2832 */
2833 public void clear() {
2834 internalClear();
2835 }
2836
2837 /**
2838 * Returns a {@link Set} view of the keys contained in this map.
2839 * The set is backed by the map, so changes to the map are
2840 * reflected in the set, and vice-versa. The set supports element
2841 * removal, which removes the corresponding mapping from this map,
2842 * via the {@code Iterator.remove}, {@code Set.remove},
2843 * {@code removeAll}, {@code retainAll}, and {@code clear}
2844 * operations. It does not support the {@code add} or
2845 * {@code addAll} operations.
2846 *
2847 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2848 * that will never throw {@link ConcurrentModificationException},
2849 * and guarantees to traverse elements as they existed upon
2850 * construction of the iterator, and may (but is not guaranteed to)
2851 * reflect any modifications subsequent to construction.
2852 */
2853 public Set<K> keySet() {
2854 KeySet<K,V> ks = keySet;
2855 return (ks != null) ? ks : (keySet = new KeySet<K,V>(this));
2856 }
2857
2858 /**
2859 * Returns a {@link Collection} view of the values contained in this map.
2860 * The collection is backed by the map, so changes to the map are
2861 * reflected in the collection, and vice-versa. The collection
2862 * supports element removal, which removes the corresponding
2863 * mapping from this map, via the {@code Iterator.remove},
2864 * {@code Collection.remove}, {@code removeAll},
2865 * {@code retainAll}, and {@code clear} operations. It does not
2866 * support the {@code add} or {@code addAll} operations.
2867 *
2868 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2869 * that will never throw {@link ConcurrentModificationException},
2870 * and guarantees to traverse elements as they existed upon
2871 * construction of the iterator, and may (but is not guaranteed to)
2872 * reflect any modifications subsequent to construction.
2873 */
2874 public Collection<V> values() {
2875 Values<K,V> vs = values;
2876 return (vs != null) ? vs : (values = new Values<K,V>(this));
2877 }
2878
2879 /**
2880 * Returns a {@link Set} view of the mappings contained in this map.
2881 * The set is backed by the map, so changes to the map are
2882 * reflected in the set, and vice-versa. The set supports element
2883 * removal, which removes the corresponding mapping from the map,
2884 * via the {@code Iterator.remove}, {@code Set.remove},
2885 * {@code removeAll}, {@code retainAll}, and {@code clear}
2886 * operations. It does not support the {@code add} or
2887 * {@code addAll} operations.
2888 *
2889 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2890 * that will never throw {@link ConcurrentModificationException},
2891 * and guarantees to traverse elements as they existed upon
2892 * construction of the iterator, and may (but is not guaranteed to)
2893 * reflect any modifications subsequent to construction.
2894 */
2895 public Set<Map.Entry<K,V>> entrySet() {
2896 EntrySet<K,V> es = entrySet;
2897 return (es != null) ? es : (entrySet = new EntrySet<K,V>(this));
2898 }
2899
2900 /**
2901 * Returns an enumeration of the keys in this table.
2902 *
2903 * @return an enumeration of the keys in this table
2904 * @see #keySet()
2905 */
2906 public Enumeration<K> keys() {
2907 return new KeyIterator<K,V>(this);
2908 }
2909
2910 /**
2911 * Returns an enumeration of the values in this table.
2912 *
2913 * @return an enumeration of the values in this table
2914 * @see #values()
2915 */
2916 public Enumeration<V> elements() {
2917 return new ValueIterator<K,V>(this);
2918 }
2919
2920 /**
2921 * Returns a partitionable iterator of the keys in this map.
2922 *
2923 * @return a partitionable iterator of the keys in this map
2924 */
2925 public Spliterator<K> keySpliterator() {
2926 return new KeyIterator<K,V>(this);
2927 }
2928
2929 /**
2930 * Returns a partitionable iterator of the values in this map.
2931 *
2932 * @return a partitionable iterator of the values in this map
2933 */
2934 public Spliterator<V> valueSpliterator() {
2935 return new ValueIterator<K,V>(this);
2936 }
2937
2938 /**
2939 * Returns a partitionable iterator of the entries in this map.
2940 *
2941 * @return a partitionable iterator of the entries in this map
2942 */
2943 public Spliterator<Map.Entry<K,V>> entrySpliterator() {
2944 return new EntryIterator<K,V>(this);
2945 }
2946
2947 /**
2948 * Returns the hash code value for this {@link Map}, i.e.,
2949 * the sum of, for each key-value pair in the map,
2950 * {@code key.hashCode() ^ value.hashCode()}.
2951 *
2952 * @return the hash code value for this map
2953 */
2954 public int hashCode() {
2955 int h = 0;
2956 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2957 Object v;
2958 while ((v = it.advance()) != null) {
2959 h += it.nextKey.hashCode() ^ v.hashCode();
2960 }
2961 return h;
2962 }
2963
2964 /**
2965 * Returns a string representation of this map. The string
2966 * representation consists of a list of key-value mappings (in no
2967 * particular order) enclosed in braces ("{@code {}}"). Adjacent
2968 * mappings are separated by the characters {@code ", "} (comma
2969 * and space). Each key-value mapping is rendered as the key
2970 * followed by an equals sign ("{@code =}") followed by the
2971 * associated value.
2972 *
2973 * @return a string representation of this map
2974 */
2975 public String toString() {
2976 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2977 StringBuilder sb = new StringBuilder();
2978 sb.append('{');
2979 Object v;
2980 if ((v = it.advance()) != null) {
2981 for (;;) {
2982 Object k = it.nextKey;
2983 sb.append(k == this ? "(this Map)" : k);
2984 sb.append('=');
2985 sb.append(v == this ? "(this Map)" : v);
2986 if ((v = it.advance()) == null)
2987 break;
2988 sb.append(',').append(' ');
2989 }
2990 }
2991 return sb.append('}').toString();
2992 }
2993
2994 /**
2995 * Compares the specified object with this map for equality.
2996 * Returns {@code true} if the given object is a map with the same
2997 * mappings as this map. This operation may return misleading
2998 * results if either map is concurrently modified during execution
2999 * of this method.
3000 *
3001 * @param o object to be compared for equality with this map
3002 * @return {@code true} if the specified object is equal to this map
3003 */
3004 public boolean equals(Object o) {
3005 if (o != this) {
3006 if (!(o instanceof Map))
3007 return false;
3008 Map<?,?> m = (Map<?,?>) o;
3009 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3010 Object val;
3011 while ((val = it.advance()) != null) {
3012 Object v = m.get(it.nextKey);
3013 if (v == null || (v != val && !v.equals(val)))
3014 return false;
3015 }
3016 for (Map.Entry<?,?> e : m.entrySet()) {
3017 Object mk, mv, v;
3018 if ((mk = e.getKey()) == null ||
3019 (mv = e.getValue()) == null ||
3020 (v = internalGet(mk)) == null ||
3021 (mv != v && !mv.equals(v)))
3022 return false;
3023 }
3024 }
3025 return true;
3026 }
3027
3028 /* ----------------Iterators -------------- */
3029
3030 @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3031 implements Spliterator<K>, Enumeration<K> {
3032 KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3033 KeyIterator(Traverser<K,V,Object> it) {
3034 super(it);
3035 }
3036 public KeyIterator<K,V> split() {
3037 if (last != null || (next != null && nextVal == null))
3038 throw new IllegalStateException();
3039 return new KeyIterator<K,V>(this);
3040 }
3041 @SuppressWarnings("unchecked") public final K next() {
3042 if (nextVal == null && advance() == null)
3043 throw new NoSuchElementException();
3044 Object k = nextKey;
3045 nextVal = null;
3046 return (K) k;
3047 }
3048
3049 public final K nextElement() { return next(); }
3050 }
3051
3052 @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3053 implements Spliterator<V>, Enumeration<V> {
3054 ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3055 ValueIterator(Traverser<K,V,Object> it) {
3056 super(it);
3057 }
3058 public ValueIterator<K,V> split() {
3059 if (last != null || (next != null && nextVal == null))
3060 throw new IllegalStateException();
3061 return new ValueIterator<K,V>(this);
3062 }
3063
3064 @SuppressWarnings("unchecked") public final V next() {
3065 Object v;
3066 if ((v = nextVal) == null && (v = advance()) == null)
3067 throw new NoSuchElementException();
3068 nextVal = null;
3069 return (V) v;
3070 }
3071
3072 public final V nextElement() { return next(); }
3073 }
3074
3075 @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3076 implements Spliterator<Map.Entry<K,V>> {
3077 EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3078 EntryIterator(Traverser<K,V,Object> it) {
3079 super(it);
3080 }
3081 public EntryIterator<K,V> split() {
3082 if (last != null || (next != null && nextVal == null))
3083 throw new IllegalStateException();
3084 return new EntryIterator<K,V>(this);
3085 }
3086
3087 @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3088 Object v;
3089 if ((v = nextVal) == null && (v = advance()) == null)
3090 throw new NoSuchElementException();
3091 Object k = nextKey;
3092 nextVal = null;
3093 return new MapEntry<K,V>((K)k, (V)v, map);
3094 }
3095 }
3096
3097 /**
3098 * Exported Entry for iterators
3099 */
3100 static final class MapEntry<K,V> implements Map.Entry<K, V> {
3101 final K key; // non-null
3102 V val; // non-null
3103 final ConcurrentHashMapV8<K, V> map;
3104 MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) {
3105 this.key = key;
3106 this.val = val;
3107 this.map = map;
3108 }
3109 public final K getKey() { return key; }
3110 public final V getValue() { return val; }
3111 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3112 public final String toString(){ return key + "=" + val; }
3113
3114 public final boolean equals(Object o) {
3115 Object k, v; Map.Entry<?,?> e;
3116 return ((o instanceof Map.Entry) &&
3117 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3118 (v = e.getValue()) != null &&
3119 (k == key || k.equals(key)) &&
3120 (v == val || v.equals(val)));
3121 }
3122
3123 /**
3124 * Sets our entry's value and writes through to the map. The
3125 * value to return is somewhat arbitrary here. Since we do not
3126 * necessarily track asynchronous changes, the most recent
3127 * "previous" value could be different from what we return (or
3128 * could even have been removed in which case the put will
3129 * re-establish). We do not and cannot guarantee more.
3130 */
3131 public final V setValue(V value) {
3132 if (value == null) throw new NullPointerException();
3133 V v = val;
3134 val = value;
3135 map.put(key, value);
3136 return v;
3137 }
3138 }
3139
3140 /* ----------------Views -------------- */
3141
3142 /**
3143 * Base class for views.
3144 */
3145 static abstract class CHMView<K, V> {
3146 final ConcurrentHashMapV8<K, V> map;
3147 CHMView(ConcurrentHashMapV8<K, V> map) { this.map = map; }
3148 public final int size() { return map.size(); }
3149 public final boolean isEmpty() { return map.isEmpty(); }
3150 public final void clear() { map.clear(); }
3151
3152 // implementations below rely on concrete classes supplying these
3153 abstract public Iterator<?> iterator();
3154 abstract public boolean contains(Object o);
3155 abstract public boolean remove(Object o);
3156
3157 private static final String oomeMsg = "Required array size too large";
3158
3159 public final Object[] toArray() {
3160 long sz = map.mappingCount();
3161 if (sz > (long)(MAX_ARRAY_SIZE))
3162 throw new OutOfMemoryError(oomeMsg);
3163 int n = (int)sz;
3164 Object[] r = new Object[n];
3165 int i = 0;
3166 Iterator<?> it = iterator();
3167 while (it.hasNext()) {
3168 if (i == n) {
3169 if (n >= MAX_ARRAY_SIZE)
3170 throw new OutOfMemoryError(oomeMsg);
3171 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3172 n = MAX_ARRAY_SIZE;
3173 else
3174 n += (n >>> 1) + 1;
3175 r = Arrays.copyOf(r, n);
3176 }
3177 r[i++] = it.next();
3178 }
3179 return (i == n) ? r : Arrays.copyOf(r, i);
3180 }
3181
3182 @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3183 long sz = map.mappingCount();
3184 if (sz > (long)(MAX_ARRAY_SIZE))
3185 throw new OutOfMemoryError(oomeMsg);
3186 int m = (int)sz;
3187 T[] r = (a.length >= m) ? a :
3188 (T[])java.lang.reflect.Array
3189 .newInstance(a.getClass().getComponentType(), m);
3190 int n = r.length;
3191 int i = 0;
3192 Iterator<?> it = iterator();
3193 while (it.hasNext()) {
3194 if (i == n) {
3195 if (n >= MAX_ARRAY_SIZE)
3196 throw new OutOfMemoryError(oomeMsg);
3197 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3198 n = MAX_ARRAY_SIZE;
3199 else
3200 n += (n >>> 1) + 1;
3201 r = Arrays.copyOf(r, n);
3202 }
3203 r[i++] = (T)it.next();
3204 }
3205 if (a == r && i < n) {
3206 r[i] = null; // null-terminate
3207 return r;
3208 }
3209 return (i == n) ? r : Arrays.copyOf(r, i);
3210 }
3211
3212 public final int hashCode() {
3213 int h = 0;
3214 for (Iterator<?> it = iterator(); it.hasNext();)
3215 h += it.next().hashCode();
3216 return h;
3217 }
3218
3219 public final String toString() {
3220 StringBuilder sb = new StringBuilder();
3221 sb.append('[');
3222 Iterator<?> it = iterator();
3223 if (it.hasNext()) {
3224 for (;;) {
3225 Object e = it.next();
3226 sb.append(e == this ? "(this Collection)" : e);
3227 if (!it.hasNext())
3228 break;
3229 sb.append(',').append(' ');
3230 }
3231 }
3232 return sb.append(']').toString();
3233 }
3234
3235 public final boolean containsAll(Collection<?> c) {
3236 if (c != this) {
3237 for (Iterator<?> it = c.iterator(); it.hasNext();) {
3238 Object e = it.next();
3239 if (e == null || !contains(e))
3240 return false;
3241 }
3242 }
3243 return true;
3244 }
3245
3246 public final boolean removeAll(Collection<?> c) {
3247 boolean modified = false;
3248 for (Iterator<?> it = iterator(); it.hasNext();) {
3249 if (c.contains(it.next())) {
3250 it.remove();
3251 modified = true;
3252 }
3253 }
3254 return modified;
3255 }
3256
3257 public final boolean retainAll(Collection<?> c) {
3258 boolean modified = false;
3259 for (Iterator<?> it = iterator(); it.hasNext();) {
3260 if (!c.contains(it.next())) {
3261 it.remove();
3262 modified = true;
3263 }
3264 }
3265 return modified;
3266 }
3267
3268 }
3269
3270 static final class KeySet<K,V> extends CHMView<K,V> implements Set<K> {
3271 KeySet(ConcurrentHashMapV8<K, V> map) {
3272 super(map);
3273 }
3274 public final boolean contains(Object o) { return map.containsKey(o); }
3275 public final boolean remove(Object o) { return map.remove(o) != null; }
3276 public final Iterator<K> iterator() {
3277 return new KeyIterator<K,V>(map);
3278 }
3279 public final boolean add(K e) {
3280 throw new UnsupportedOperationException();
3281 }
3282 public final boolean addAll(Collection<? extends K> c) {
3283 throw new UnsupportedOperationException();
3284 }
3285 public boolean equals(Object o) {
3286 Set<?> c;
3287 return ((o instanceof Set) &&
3288 ((c = (Set<?>)o) == this ||
3289 (containsAll(c) && c.containsAll(this))));
3290 }
3291 }
3292
3293
3294 static final class Values<K,V> extends CHMView<K,V>
3295 implements Collection<V> {
3296 Values(ConcurrentHashMapV8<K, V> map) { super(map); }
3297 public final boolean contains(Object o) { return map.containsValue(o); }
3298 public final boolean remove(Object o) {
3299 if (o != null) {
3300 Iterator<V> it = new ValueIterator<K,V>(map);
3301 while (it.hasNext()) {
3302 if (o.equals(it.next())) {
3303 it.remove();
3304 return true;
3305 }
3306 }
3307 }
3308 return false;
3309 }
3310 public final Iterator<V> iterator() {
3311 return new ValueIterator<K,V>(map);
3312 }
3313 public final boolean add(V e) {
3314 throw new UnsupportedOperationException();
3315 }
3316 public final boolean addAll(Collection<? extends V> c) {
3317 throw new UnsupportedOperationException();
3318 }
3319
3320 }
3321
3322 static final class EntrySet<K,V> extends CHMView<K,V>
3323 implements Set<Map.Entry<K,V>> {
3324 EntrySet(ConcurrentHashMapV8<K, V> map) { super(map); }
3325 public final boolean contains(Object o) {
3326 Object k, v, r; Map.Entry<?,?> e;
3327 return ((o instanceof Map.Entry) &&
3328 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3329 (r = map.get(k)) != null &&
3330 (v = e.getValue()) != null &&
3331 (v == r || v.equals(r)));
3332 }
3333 public final boolean remove(Object o) {
3334 Object k, v; Map.Entry<?,?> e;
3335 return ((o instanceof Map.Entry) &&
3336 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3337 (v = e.getValue()) != null &&
3338 map.remove(k, v));
3339 }
3340 public final Iterator<Map.Entry<K,V>> iterator() {
3341 return new EntryIterator<K,V>(map);
3342 }
3343 public final boolean add(Entry<K,V> e) {
3344 throw new UnsupportedOperationException();
3345 }
3346 public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3347 throw new UnsupportedOperationException();
3348 }
3349 public boolean equals(Object o) {
3350 Set<?> c;
3351 return ((o instanceof Set) &&
3352 ((c = (Set<?>)o) == this ||
3353 (containsAll(c) && c.containsAll(this))));
3354 }
3355 }
3356
3357 /* ---------------- Serialization Support -------------- */
3358
3359 /**
3360 * Stripped-down version of helper class used in previous version,
3361 * declared for the sake of serialization compatibility
3362 */
3363 static class Segment<K,V> implements Serializable {
3364 private static final long serialVersionUID = 2249069246763182397L;
3365 final float loadFactor;
3366 Segment(float lf) { this.loadFactor = lf; }
3367 }
3368
3369 /**
3370 * Saves the state of the {@code ConcurrentHashMapV8} instance to a
3371 * stream (i.e., serializes it).
3372 * @param s the stream
3373 * @serialData
3374 * the key (Object) and value (Object)
3375 * for each key-value mapping, followed by a null pair.
3376 * The key-value mappings are emitted in no particular order.
3377 */
3378 @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3379 throws java.io.IOException {
3380 if (segments == null) { // for serialization compatibility
3381 segments = (Segment<K,V>[])
3382 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3383 for (int i = 0; i < segments.length; ++i)
3384 segments[i] = new Segment<K,V>(LOAD_FACTOR);
3385 }
3386 s.defaultWriteObject();
3387 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3388 Object v;
3389 while ((v = it.advance()) != null) {
3390 s.writeObject(it.nextKey);
3391 s.writeObject(v);
3392 }
3393 s.writeObject(null);
3394 s.writeObject(null);
3395 segments = null; // throw away
3396 }
3397
3398 /**
3399 * Reconstitutes the instance from a stream (that is, deserializes it).
3400 * @param s the stream
3401 */
3402 @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3403 throws java.io.IOException, ClassNotFoundException {
3404 s.defaultReadObject();
3405 this.segments = null; // unneeded
3406 // initialize transient final field
3407 UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3408
3409 // Create all nodes, then place in table once size is known
3410 long size = 0L;
3411 Node p = null;
3412 for (;;) {
3413 K k = (K) s.readObject();
3414 V v = (V) s.readObject();
3415 if (k != null && v != null) {
3416 int h = spread(k.hashCode());
3417 p = new Node(h, k, v, p);
3418 ++size;
3419 }
3420 else
3421 break;
3422 }
3423 if (p != null) {
3424 boolean init = false;
3425 int n;
3426 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3427 n = MAXIMUM_CAPACITY;
3428 else {
3429 int sz = (int)size;
3430 n = tableSizeFor(sz + (sz >>> 1) + 1);
3431 }
3432 int sc = sizeCtl;
3433 boolean collide = false;
3434 if (n > sc &&
3435 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3436 try {
3437 if (table == null) {
3438 init = true;
3439 Node[] tab = new Node[n];
3440 int mask = n - 1;
3441 while (p != null) {
3442 int j = p.hash & mask;
3443 Node next = p.next;
3444 Node q = p.next = tabAt(tab, j);
3445 setTabAt(tab, j, p);
3446 if (!collide && q != null && q.hash == p.hash)
3447 collide = true;
3448 p = next;
3449 }
3450 table = tab;
3451 counter.add(size);
3452 sc = n - (n >>> 2);
3453 }
3454 } finally {
3455 sizeCtl = sc;
3456 }
3457 if (collide) { // rescan and convert to TreeBins
3458 Node[] tab = table;
3459 for (int i = 0; i < tab.length; ++i) {
3460 int c = 0;
3461 for (Node e = tabAt(tab, i); e != null; e = e.next) {
3462 if (++c > TREE_THRESHOLD &&
3463 (e.key instanceof Comparable)) {
3464 replaceWithTreeBin(tab, i, e.key);
3465 break;
3466 }
3467 }
3468 }
3469 }
3470 }
3471 if (!init) { // Can only happen if unsafely published.
3472 while (p != null) {
3473 internalPut(p.key, p.val);
3474 p = p.next;
3475 }
3476 }
3477 }
3478 }
3479
3480
3481 // -------------------------------------------------------
3482
3483 // Sams
3484 /** Interface describing a void action of one argument */
3485 public interface Action<A> { void apply(A a); }
3486 /** Interface describing a void action of two arguments */
3487 public interface BiAction<A,B> { void apply(A a, B b); }
3488 /** Interface describing a function of one argument */
3489 public interface Fun<A,T> { T apply(A a); }
3490 /** Interface describing a function of two arguments */
3491 public interface BiFun<A,B,T> { T apply(A a, B b); }
3492 /** Interface describing a function of no arguments */
3493 public interface Generator<T> { T apply(); }
3494 /** Interface describing a function mapping its argument to a double */
3495 public interface ObjectToDouble<A> { double apply(A a); }
3496 /** Interface describing a function mapping its argument to a long */
3497 public interface ObjectToLong<A> { long apply(A a); }
3498 /** Interface describing a function mapping its argument to an int */
3499 public interface ObjectToInt<A> {int apply(A a); }
3500 /** Interface describing a function mapping two arguments to a double */
3501 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3502 /** Interface describing a function mapping two arguments to a long */
3503 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3504 /** Interface describing a function mapping two arguments to an int */
3505 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3506 /** Interface describing a function mapping a double to a double */
3507 public interface DoubleToDouble { double apply(double a); }
3508 /** Interface describing a function mapping a long to a long */
3509 public interface LongToLong { long apply(long a); }
3510 /** Interface describing a function mapping an int to an int */
3511 public interface IntToInt { int apply(int a); }
3512 /** Interface describing a function mapping two doubles to a double */
3513 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3514 /** Interface describing a function mapping two longs to a long */
3515 public interface LongByLongToLong { long apply(long a, long b); }
3516 /** Interface describing a function mapping two ints to an int */
3517 public interface IntByIntToInt { int apply(int a, int b); }
3518
3519
3520 // -------------------------------------------------------
3521
3522 /**
3523 * Returns an extended {@link Parallel} view of this map using the
3524 * given executor for bulk parallel operations.
3525 *
3526 * @param executor the executor
3527 * @return a parallel view
3528 */
3529 public Parallel parallel(ForkJoinPool executor) {
3530 return new Parallel(executor);
3531 }
3532
3533 /**
3534 * An extended view of a ConcurrentHashMap supporting bulk
3535 * parallel operations. These operations are designed to be
3536 * safely, and often sensibly, applied even with maps that are
3537 * being concurrently updated by other threads; for example, when
3538 * computing a snapshot summary of the values in a shared
3539 * registry. There are three kinds of operation, each with four
3540 * forms, accepting functions with Keys, Values, Entries, and
3541 * (Key, Value) arguments and/or return values. Because the
3542 * elements of a ConcurrentHashMap are not ordered in any
3543 * particular way, and may be processed in different orders in
3544 * different parallel executions, the correctness of supplied
3545 * functions should not depend on any ordering, or on any other
3546 * objects or values that may transiently change while computation
3547 * is in progress; and except for forEach actions, should ideally
3548 * be side-effect-free.
3549 *
3550 * <ul>
3551 * <li> forEach: Perform a given action on each element.
3552 * A variant form applies a given transformation on each element
3553 * before performing the action.</li>
3554 *
3555 * <li> search: Return the first available non-null result of
3556 * applying a given function on each element; skipping further
3557 * search when a result is found.</li>
3558 *
3559 * <li> reduce: Accumulate each element. The supplied reduction
3560 * function cannot rely on ordering (more formally, it should be
3561 * both associative and commutative). There are five variants:
3562 *
3563 * <ul>
3564 *
3565 * <li> Plain reductions. (There is not a form of this method for
3566 * (key, value) function arguments since there is no corresponding
3567 * return type.)</li>
3568 *
3569 * <li> Mapped reductions that accumulate the results of a given
3570 * function applied to each element.</li>
3571 *
3572 * <li> Reductions to scalar doubles, longs, and ints, using a
3573 * given basis value.</li>
3574 *
3575 * </li>
3576 * </ul>
3577 * </ul>
3578 *
3579 * <p>The concurrency properties of the bulk operations follow
3580 * from those of ConcurrentHashMap: Any non-null result returned
3581 * from {@code get(key)} and related access methods bears a
3582 * happens-before relation with the associated insertion or
3583 * update. The result of any bulk operation reflects the
3584 * composition of these per-element relations (but is not
3585 * necessarily atomic with respect to the map as a whole unless it
3586 * is somehow known to be quiescent). Conversely, because keys
3587 * and values in the map are never null, null serves as a reliable
3588 * atomic indicator of the current lack of any result. To
3589 * maintain this property, null serves as an implicit basis for
3590 * all non-scalar reduction operations. For the double, long, and
3591 * int versions, the basis should be one that, when combined with
3592 * any other value, returns that other value (more formally, it
3593 * should be the identity element for the reduction). Most common
3594 * reductions have these properties; for example, computing a sum
3595 * with basis 0 or a minimum with basis MAX_VALUE.
3596 *
3597 * <p>Search and transformation functions provided as arguments
3598 * should similarly return null to indicate the lack of any result
3599 * (in which case it is not used). In the case of mapped
3600 * reductions, this also enables transformations to serve as
3601 * filters, returning null (or, in the case of primitive
3602 * specializations, the identity basis) if the element should not
3603 * be combined. You can create compound transformations and
3604 * filterings by composing them yourself under this "null means
3605 * there is nothing there now" rule before using them in search or
3606 * reduce operations.
3607 *
3608 * <p>Methods accepting and/or returning Entry arguments maintain
3609 * key-value associations. They may be useful for example when
3610 * finding the key for the greatest value. Note that "plain" Entry
3611 * arguments can be supplied using {@code new
3612 * AbstractMap.SimpleEntry(k,v)}.
3613 *
3614 * <p> Bulk operations may complete abruptly, throwing an
3615 * exception encountered in the application of a supplied
3616 * function. Bear in mind when handling such exceptions that other
3617 * concurrently executing functions could also have thrown
3618 * exceptions, or would have done so if the first exception had
3619 * not occurred.
3620 *
3621 * <p>Parallel speedups compared to sequential processing are
3622 * common but not guaranteed. Operations involving brief
3623 * functions on small maps may execute more slowly than sequential
3624 * loops if the underlying work to parallelize the computation is
3625 * more expensive than the computation itself. Similarly,
3626 * parallelization may not lead to much actual parallelism if all
3627 * processors are busy performing unrelated tasks.
3628 *
3629 * <p> All arguments to all task methods must be non-null.
3630 *
3631 * <p><em>jsr166e note: During transition, this class
3632 * uses nested functional interfaces with different names but the
3633 * same forms as those expected for JDK8.<em>
3634 */
3635 public class Parallel {
3636 final ForkJoinPool fjp;
3637
3638 /**
3639 * Returns an extended view of this map using the given
3640 * executor for bulk parallel operations.
3641 *
3642 * @param executor the executor
3643 */
3644 public Parallel(ForkJoinPool executor) {
3645 this.fjp = executor;
3646 }
3647
3648 /**
3649 * Performs the given action for each (key, value).
3650 *
3651 * @param action the action
3652 */
3653 public void forEach(BiAction<K,V> action) {
3654 fjp.invoke(ForkJoinTasks.forEach
3655 (ConcurrentHashMapV8.this, action));
3656 }
3657
3658 /**
3659 * Performs the given action for each non-null transformation
3660 * of each (key, value).
3661 *
3662 * @param transformer a function returning the transformation
3663 * for an element, or null of there is no transformation (in
3664 * which case the action is not applied).
3665 * @param action the action
3666 */
3667 public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3668 Action<U> action) {
3669 fjp.invoke(ForkJoinTasks.forEach
3670 (ConcurrentHashMapV8.this, transformer, action));
3671 }
3672
3673 /**
3674 * Returns a non-null result from applying the given search
3675 * function on each (key, value), or null if none. Upon
3676 * success, further element processing is suppressed and the
3677 * results of any other parallel invocations of the search
3678 * function are ignored.
3679 *
3680 * @param searchFunction a function returning a non-null
3681 * result on success, else null
3682 * @return a non-null result from applying the given search
3683 * function on each (key, value), or null if none
3684 */
3685 public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3686 return fjp.invoke(ForkJoinTasks.search
3687 (ConcurrentHashMapV8.this, searchFunction));
3688 }
3689
3690 /**
3691 * Returns the result of accumulating the given transformation
3692 * of all (key, value) pairs using the given reducer to
3693 * combine values, or null if none.
3694 *
3695 * @param transformer a function returning the transformation
3696 * for an element, or null of there is no transformation (in
3697 * which case it is not combined).
3698 * @param reducer a commutative associative combining function
3699 * @return the result of accumulating the given transformation
3700 * of all (key, value) pairs
3701 */
3702 public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3703 BiFun<? super U, ? super U, ? extends U> reducer) {
3704 return fjp.invoke(ForkJoinTasks.reduce
3705 (ConcurrentHashMapV8.this, transformer, reducer));
3706 }
3707
3708 /**
3709 * Returns the result of accumulating the given transformation
3710 * of all (key, value) pairs using the given reducer to
3711 * combine values, and the given basis as an identity value.
3712 *
3713 * @param transformer a function returning the transformation
3714 * for an element
3715 * @param basis the identity (initial default value) for the reduction
3716 * @param reducer a commutative associative combining function
3717 * @return the result of accumulating the given transformation
3718 * of all (key, value) pairs
3719 */
3720 public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3721 double basis,
3722 DoubleByDoubleToDouble reducer) {
3723 return fjp.invoke(ForkJoinTasks.reduceToDouble
3724 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3725 }
3726
3727 /**
3728 * Returns the result of accumulating the given transformation
3729 * of all (key, value) pairs using the given reducer to
3730 * combine values, and the given basis as an identity value.
3731 *
3732 * @param transformer a function returning the transformation
3733 * for an element
3734 * @param basis the identity (initial default value) for the reduction
3735 * @param reducer a commutative associative combining function
3736 * @return the result of accumulating the given transformation
3737 * of all (key, value) pairs
3738 */
3739 public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3740 long basis,
3741 LongByLongToLong reducer) {
3742 return fjp.invoke(ForkJoinTasks.reduceToLong
3743 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3744 }
3745
3746 /**
3747 * Returns the result of accumulating the given transformation
3748 * of all (key, value) pairs using the given reducer to
3749 * combine values, and the given basis as an identity value.
3750 *
3751 * @param transformer a function returning the transformation
3752 * for an element
3753 * @param basis the identity (initial default value) for the reduction
3754 * @param reducer a commutative associative combining function
3755 * @return the result of accumulating the given transformation
3756 * of all (key, value) pairs
3757 */
3758 public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3759 int basis,
3760 IntByIntToInt reducer) {
3761 return fjp.invoke(ForkJoinTasks.reduceToInt
3762 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3763 }
3764
3765 /**
3766 * Performs the given action for each key.
3767 *
3768 * @param action the action
3769 */
3770 public void forEachKey(Action<K> action) {
3771 fjp.invoke(ForkJoinTasks.forEachKey
3772 (ConcurrentHashMapV8.this, action));
3773 }
3774
3775 /**
3776 * Performs the given action for each non-null transformation
3777 * of each key.
3778 *
3779 * @param transformer a function returning the transformation
3780 * for an element, or null of there is no transformation (in
3781 * which case the action is not applied).
3782 * @param action the action
3783 */
3784 public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3785 Action<U> action) {
3786 fjp.invoke(ForkJoinTasks.forEachKey
3787 (ConcurrentHashMapV8.this, transformer, action));
3788 }
3789
3790 /**
3791 * Returns a non-null result from applying the given search
3792 * function on each key, or null if none. Upon success,
3793 * further element processing is suppressed and the results of
3794 * any other parallel invocations of the search function are
3795 * ignored.
3796 *
3797 * @param searchFunction a function returning a non-null
3798 * result on success, else null
3799 * @return a non-null result from applying the given search
3800 * function on each key, or null if none
3801 */
3802 public <U> U searchKeys(Fun<? super K, ? extends U> searchFunction) {
3803 return fjp.invoke(ForkJoinTasks.searchKeys
3804 (ConcurrentHashMapV8.this, searchFunction));
3805 }
3806
3807 /**
3808 * Returns the result of accumulating all keys using the given
3809 * reducer to combine values, or null if none.
3810 *
3811 * @param reducer a commutative associative combining function
3812 * @return the result of accumulating all keys using the given
3813 * reducer to combine values, or null if none
3814 */
3815 public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3816 return fjp.invoke(ForkJoinTasks.reduceKeys
3817 (ConcurrentHashMapV8.this, reducer));
3818 }
3819
3820 /**
3821 * Returns the result of accumulating the given transformation
3822 * of all keys using the given reducer to combine values, or
3823 * null if none.
3824 *
3825 * @param transformer a function returning the transformation
3826 * for an element, or null of there is no transformation (in
3827 * which case it is not combined).
3828 * @param reducer a commutative associative combining function
3829 * @return the result of accumulating the given transformation
3830 * of all keys
3831 */
3832 public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3833 BiFun<? super U, ? super U, ? extends U> reducer) {
3834 return fjp.invoke(ForkJoinTasks.reduceKeys
3835 (ConcurrentHashMapV8.this, transformer, reducer));
3836 }
3837
3838 /**
3839 * Returns the result of accumulating the given transformation
3840 * of all keys using the given reducer to combine values, and
3841 * the given basis as an identity value.
3842 *
3843 * @param transformer a function returning the transformation
3844 * for an element
3845 * @param basis the identity (initial default value) for the reduction
3846 * @param reducer a commutative associative combining function
3847 * @return the result of accumulating the given transformation
3848 * of all keys
3849 */
3850 public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3851 double basis,
3852 DoubleByDoubleToDouble reducer) {
3853 return fjp.invoke(ForkJoinTasks.reduceKeysToDouble
3854 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3855 }
3856
3857 /**
3858 * Returns the result of accumulating the given transformation
3859 * of all keys using the given reducer to combine values, and
3860 * the given basis as an identity value.
3861 *
3862 * @param transformer a function returning the transformation
3863 * for an element
3864 * @param basis the identity (initial default value) for the reduction
3865 * @param reducer a commutative associative combining function
3866 * @return the result of accumulating the given transformation
3867 * of all keys
3868 */
3869 public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3870 long basis,
3871 LongByLongToLong reducer) {
3872 return fjp.invoke(ForkJoinTasks.reduceKeysToLong
3873 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3874 }
3875
3876 /**
3877 * Returns the result of accumulating the given transformation
3878 * of all keys using the given reducer to combine values, and
3879 * the given basis as an identity value.
3880 *
3881 * @param transformer a function returning the transformation
3882 * for an element
3883 * @param basis the identity (initial default value) for the reduction
3884 * @param reducer a commutative associative combining function
3885 * @return the result of accumulating the given transformation
3886 * of all keys
3887 */
3888 public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3889 int basis,
3890 IntByIntToInt reducer) {
3891 return fjp.invoke(ForkJoinTasks.reduceKeysToInt
3892 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3893 }
3894
3895 /**
3896 * Performs the given action for each value.
3897 *
3898 * @param action the action
3899 */
3900 public void forEachValue(Action<V> action) {
3901 fjp.invoke(ForkJoinTasks.forEachValue
3902 (ConcurrentHashMapV8.this, action));
3903 }
3904
3905 /**
3906 * Performs the given action for each non-null transformation
3907 * of each value.
3908 *
3909 * @param transformer a function returning the transformation
3910 * for an element, or null of there is no transformation (in
3911 * which case the action is not applied).
3912 */
3913 public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3914 Action<U> action) {
3915 fjp.invoke(ForkJoinTasks.forEachValue
3916 (ConcurrentHashMapV8.this, transformer, action));
3917 }
3918
3919 /**
3920 * Returns a non-null result from applying the given search
3921 * function on each value, or null if none. Upon success,
3922 * further element processing is suppressed and the results of
3923 * any other parallel invocations of the search function are
3924 * ignored.
3925 *
3926 * @param searchFunction a function returning a non-null
3927 * result on success, else null
3928 * @return a non-null result from applying the given search
3929 * function on each value, or null if none
3930 *
3931 */
3932 public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
3933 return fjp.invoke(ForkJoinTasks.searchValues
3934 (ConcurrentHashMapV8.this, searchFunction));
3935 }
3936
3937 /**
3938 * Returns the result of accumulating all values using the
3939 * given reducer to combine values, or null if none.
3940 *
3941 * @param reducer a commutative associative combining function
3942 * @return the result of accumulating all values
3943 */
3944 public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
3945 return fjp.invoke(ForkJoinTasks.reduceValues
3946 (ConcurrentHashMapV8.this, reducer));
3947 }
3948
3949 /**
3950 * Returns the result of accumulating the given transformation
3951 * of all values using the given reducer to combine values, or
3952 * null if none.
3953 *
3954 * @param transformer a function returning the transformation
3955 * for an element, or null of there is no transformation (in
3956 * which case it is not combined).
3957 * @param reducer a commutative associative combining function
3958 * @return the result of accumulating the given transformation
3959 * of all values
3960 */
3961 public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
3962 BiFun<? super U, ? super U, ? extends U> reducer) {
3963 return fjp.invoke(ForkJoinTasks.reduceValues
3964 (ConcurrentHashMapV8.this, transformer, reducer));
3965 }
3966
3967 /**
3968 * Returns the result of accumulating the given transformation
3969 * of all values using the given reducer to combine values,
3970 * and the given basis as an identity value.
3971 *
3972 * @param transformer a function returning the transformation
3973 * for an element
3974 * @param basis the identity (initial default value) for the reduction
3975 * @param reducer a commutative associative combining function
3976 * @return the result of accumulating the given transformation
3977 * of all values
3978 */
3979 public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
3980 double basis,
3981 DoubleByDoubleToDouble reducer) {
3982 return fjp.invoke(ForkJoinTasks.reduceValuesToDouble
3983 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3984 }
3985
3986 /**
3987 * Returns the result of accumulating the given transformation
3988 * of all values using the given reducer to combine values,
3989 * and the given basis as an identity value.
3990 *
3991 * @param transformer a function returning the transformation
3992 * for an element
3993 * @param basis the identity (initial default value) for the reduction
3994 * @param reducer a commutative associative combining function
3995 * @return the result of accumulating the given transformation
3996 * of all values
3997 */
3998 public long reduceValuesToLong(ObjectToLong<? super V> transformer,
3999 long basis,
4000 LongByLongToLong reducer) {
4001 return fjp.invoke(ForkJoinTasks.reduceValuesToLong
4002 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4003 }
4004
4005 /**
4006 * Returns the result of accumulating the given transformation
4007 * of all values using the given reducer to combine values,
4008 * and the given basis as an identity value.
4009 *
4010 * @param transformer a function returning the transformation
4011 * for an element
4012 * @param basis the identity (initial default value) for the reduction
4013 * @param reducer a commutative associative combining function
4014 * @return the result of accumulating the given transformation
4015 * of all values
4016 */
4017 public int reduceValuesToInt(ObjectToInt<? super V> transformer,
4018 int basis,
4019 IntByIntToInt reducer) {
4020 return fjp.invoke(ForkJoinTasks.reduceValuesToInt
4021 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4022 }
4023
4024 /**
4025 * Performs the given action for each entry.
4026 *
4027 * @param action the action
4028 */
4029 public void forEachEntry(Action<Map.Entry<K,V>> action) {
4030 fjp.invoke(ForkJoinTasks.forEachEntry
4031 (ConcurrentHashMapV8.this, action));
4032 }
4033
4034 /**
4035 * Performs the given action for each non-null transformation
4036 * of each entry.
4037 *
4038 * @param transformer a function returning the transformation
4039 * for an element, or null of there is no transformation (in
4040 * which case the action is not applied).
4041 * @param action the action
4042 */
4043 public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
4044 Action<U> action) {
4045 fjp.invoke(ForkJoinTasks.forEachEntry
4046 (ConcurrentHashMapV8.this, transformer, action));
4047 }
4048
4049 /**
4050 * Returns a non-null result from applying the given search
4051 * function on each entry, or null if none. Upon success,
4052 * further element processing is suppressed and the results of
4053 * any other parallel invocations of the search function are
4054 * ignored.
4055 *
4056 * @param searchFunction a function returning a non-null
4057 * result on success, else null
4058 * @return a non-null result from applying the given search
4059 * function on each entry, or null if none
4060 */
4061 public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4062 return fjp.invoke(ForkJoinTasks.searchEntries
4063 (ConcurrentHashMapV8.this, searchFunction));
4064 }
4065
4066 /**
4067 * Returns the result of accumulating all entries using the
4068 * given reducer to combine values, or null if none.
4069 *
4070 * @param reducer a commutative associative combining function
4071 * @return the result of accumulating all entries
4072 */
4073 public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4074 return fjp.invoke(ForkJoinTasks.reduceEntries
4075 (ConcurrentHashMapV8.this, reducer));
4076 }
4077
4078 /**
4079 * Returns the result of accumulating the given transformation
4080 * of all entries using the given reducer to combine values,
4081 * or null if none.
4082 *
4083 * @param transformer a function returning the transformation
4084 * for an element, or null of there is no transformation (in
4085 * which case it is not combined).
4086 * @param reducer a commutative associative combining function
4087 * @return the result of accumulating the given transformation
4088 * of all entries
4089 */
4090 public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
4091 BiFun<? super U, ? super U, ? extends U> reducer) {
4092 return fjp.invoke(ForkJoinTasks.reduceEntries
4093 (ConcurrentHashMapV8.this, transformer, reducer));
4094 }
4095
4096 /**
4097 * Returns the result of accumulating the given transformation
4098 * of all entries using the given reducer to combine values,
4099 * and the given basis as an identity value.
4100 *
4101 * @param transformer a function returning the transformation
4102 * for an element
4103 * @param basis the identity (initial default value) for the reduction
4104 * @param reducer a commutative associative combining function
4105 * @return the result of accumulating the given transformation
4106 * of all entries
4107 */
4108 public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4109 double basis,
4110 DoubleByDoubleToDouble reducer) {
4111 return fjp.invoke(ForkJoinTasks.reduceEntriesToDouble
4112 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4113 }
4114
4115 /**
4116 * Returns the result of accumulating the given transformation
4117 * of all entries using the given reducer to combine values,
4118 * and the given basis as an identity value.
4119 *
4120 * @param transformer a function returning the transformation
4121 * for an element
4122 * @param basis the identity (initial default value) for the reduction
4123 * @param reducer a commutative associative combining function
4124 * @return the result of accumulating the given transformation
4125 * of all entries
4126 */
4127 public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4128 long basis,
4129 LongByLongToLong reducer) {
4130 return fjp.invoke(ForkJoinTasks.reduceEntriesToLong
4131 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4132 }
4133
4134 /**
4135 * Returns the result of accumulating the given transformation
4136 * of all entries using the given reducer to combine values,
4137 * and the given basis as an identity value.
4138 *
4139 * @param transformer a function returning the transformation
4140 * for an element
4141 * @param basis the identity (initial default value) for the reduction
4142 * @param reducer a commutative associative combining function
4143 * @return the result of accumulating the given transformation
4144 * of all entries
4145 */
4146 public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4147 int basis,
4148 IntByIntToInt reducer) {
4149 return fjp.invoke(ForkJoinTasks.reduceEntriesToInt
4150 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4151 }
4152 }
4153
4154 // ---------------------------------------------------------------------
4155
4156 /**
4157 * Predefined tasks for performing bulk parallel operations on
4158 * ConcurrentHashMaps. These tasks follow the forms and rules used
4159 * in class {@link Parallel}. Each method has the same name, but
4160 * returns a task rather than invoking it. These methods may be
4161 * useful in custom applications such as submitting a task without
4162 * waiting for completion, or combining with other tasks.
4163 */
4164 public static class ForkJoinTasks {
4165 private ForkJoinTasks() {}
4166
4167 /**
4168 * Returns a task that when invoked, performs the given
4169 * action for each (key, value)
4170 *
4171 * @param map the map
4172 * @param action the action
4173 * @return the task
4174 */
4175 public static <K,V> ForkJoinTask<Void> forEach
4176 (ConcurrentHashMapV8<K,V> map,
4177 BiAction<K,V> action) {
4178 if (action == null) throw new NullPointerException();
4179 return new ForEachMappingTask<K,V>(map, action);
4180 }
4181
4182 /**
4183 * Returns a task that when invoked, performs the given
4184 * action for each non-null transformation of each (key, value)
4185 *
4186 * @param map the map
4187 * @param transformer a function returning the transformation
4188 * for an element, or null of there is no transformation (in
4189 * which case the action is not applied).
4190 * @param action the action
4191 * @return the task
4192 */
4193 public static <K,V,U> ForkJoinTask<Void> forEach
4194 (ConcurrentHashMapV8<K,V> map,
4195 BiFun<? super K, ? super V, ? extends U> transformer,
4196 Action<U> action) {
4197 if (transformer == null || action == null)
4198 throw new NullPointerException();
4199 return new ForEachTransformedMappingTask<K,V,U>
4200 (map, transformer, action);
4201 }
4202
4203 /**
4204 * Returns a task that when invoked, returns a non-null result
4205 * from applying the given search function on each (key,
4206 * value), or null if none. Upon success, further element
4207 * processing is suppressed and the results of any other
4208 * parallel invocations of the search function are ignored.
4209 *
4210 * @param map the map
4211 * @param searchFunction a function returning a non-null
4212 * result on success, else null
4213 * @return the task
4214 */
4215 public static <K,V,U> ForkJoinTask<U> search
4216 (ConcurrentHashMapV8<K,V> map,
4217 BiFun<? super K, ? super V, ? extends U> searchFunction) {
4218 if (searchFunction == null) throw new NullPointerException();
4219 return new SearchMappingsTask<K,V,U>
4220 (map, searchFunction,
4221 new AtomicReference<U>());
4222 }
4223
4224 /**
4225 * Returns a task that when invoked, returns the result of
4226 * accumulating the given transformation of all (key, value) pairs
4227 * using the given reducer to combine values, or null if none.
4228 *
4229 * @param map the map
4230 * @param transformer a function returning the transformation
4231 * for an element, or null of there is no transformation (in
4232 * which case it is not combined).
4233 * @param reducer a commutative associative combining function
4234 * @return the task
4235 */
4236 public static <K,V,U> ForkJoinTask<U> reduce
4237 (ConcurrentHashMapV8<K,V> map,
4238 BiFun<? super K, ? super V, ? extends U> transformer,
4239 BiFun<? super U, ? super U, ? extends U> reducer) {
4240 if (transformer == null || reducer == null)
4241 throw new NullPointerException();
4242 return new MapReduceMappingsTask<K,V,U>
4243 (map, transformer, reducer);
4244 }
4245
4246 /**
4247 * Returns a task that when invoked, returns the result of
4248 * accumulating the given transformation of all (key, value) pairs
4249 * using the given reducer to combine values, and the given
4250 * basis as an identity value.
4251 *
4252 * @param map the map
4253 * @param transformer a function returning the transformation
4254 * for an element
4255 * @param basis the identity (initial default value) for the reduction
4256 * @param reducer a commutative associative combining function
4257 * @return the task
4258 */
4259 public static <K,V> ForkJoinTask<Double> reduceToDouble
4260 (ConcurrentHashMapV8<K,V> map,
4261 ObjectByObjectToDouble<? super K, ? super V> transformer,
4262 double basis,
4263 DoubleByDoubleToDouble reducer) {
4264 if (transformer == null || reducer == null)
4265 throw new NullPointerException();
4266 return new MapReduceMappingsToDoubleTask<K,V>
4267 (map, transformer, basis, reducer);
4268 }
4269
4270 /**
4271 * Returns a task that when invoked, returns the result of
4272 * accumulating the given transformation of all (key, value) pairs
4273 * using the given reducer to combine values, and the given
4274 * basis as an identity value.
4275 *
4276 * @param map the map
4277 * @param transformer a function returning the transformation
4278 * for an element
4279 * @param basis the identity (initial default value) for the reduction
4280 * @param reducer a commutative associative combining function
4281 * @return the task
4282 */
4283 public static <K,V> ForkJoinTask<Long> reduceToLong
4284 (ConcurrentHashMapV8<K,V> map,
4285 ObjectByObjectToLong<? super K, ? super V> transformer,
4286 long basis,
4287 LongByLongToLong reducer) {
4288 if (transformer == null || reducer == null)
4289 throw new NullPointerException();
4290 return new MapReduceMappingsToLongTask<K,V>
4291 (map, transformer, basis, reducer);
4292 }
4293
4294 /**
4295 * Returns a task that when invoked, returns the result of
4296 * accumulating the given transformation of all (key, value) pairs
4297 * using the given reducer to combine values, and the given
4298 * basis as an identity value.
4299 *
4300 * @param transformer a function returning the transformation
4301 * for an element
4302 * @param basis the identity (initial default value) for the reduction
4303 * @param reducer a commutative associative combining function
4304 * @return the task
4305 */
4306 public static <K,V> ForkJoinTask<Integer> reduceToInt
4307 (ConcurrentHashMapV8<K,V> map,
4308 ObjectByObjectToInt<? super K, ? super V> transformer,
4309 int basis,
4310 IntByIntToInt reducer) {
4311 if (transformer == null || reducer == null)
4312 throw new NullPointerException();
4313 return new MapReduceMappingsToIntTask<K,V>
4314 (map, transformer, basis, reducer);
4315 }
4316
4317 /**
4318 * Returns a task that when invoked, performs the given action
4319 * for each key.
4320 *
4321 * @param map the map
4322 * @param action the action
4323 * @return the task
4324 */
4325 public static <K,V> ForkJoinTask<Void> forEachKey
4326 (ConcurrentHashMapV8<K,V> map,
4327 Action<K> action) {
4328 if (action == null) throw new NullPointerException();
4329 return new ForEachKeyTask<K,V>(map, action);
4330 }
4331
4332 /**
4333 * Returns a task that when invoked, performs the given action
4334 * for each non-null transformation of each key.
4335 *
4336 * @param map the map
4337 * @param transformer a function returning the transformation
4338 * for an element, or null of there is no transformation (in
4339 * which case the action is not applied).
4340 * @param action the action
4341 * @return the task
4342 */
4343 public static <K,V,U> ForkJoinTask<Void> forEachKey
4344 (ConcurrentHashMapV8<K,V> map,
4345 Fun<? super K, ? extends U> transformer,
4346 Action<U> action) {
4347 if (transformer == null || action == null)
4348 throw new NullPointerException();
4349 return new ForEachTransformedKeyTask<K,V,U>
4350 (map, transformer, action);
4351 }
4352
4353 /**
4354 * Returns a task that when invoked, returns a non-null result
4355 * from applying the given search function on each key, or
4356 * null if none. Upon success, further element processing is
4357 * suppressed and the results of any other parallel
4358 * invocations of the search function are ignored.
4359 *
4360 * @param map the map
4361 * @param searchFunction a function returning a non-null
4362 * result on success, else null
4363 * @return the task
4364 */
4365 public static <K,V,U> ForkJoinTask<U> searchKeys
4366 (ConcurrentHashMapV8<K,V> map,
4367 Fun<? super K, ? extends U> searchFunction) {
4368 if (searchFunction == null) throw new NullPointerException();
4369 return new SearchKeysTask<K,V,U>
4370 (map, searchFunction,
4371 new AtomicReference<U>());
4372 }
4373
4374 /**
4375 * Returns a task that when invoked, returns the result of
4376 * accumulating all keys using the given reducer to combine
4377 * values, or null if none.
4378 *
4379 * @param map the map
4380 * @param reducer a commutative associative combining function
4381 * @return the task
4382 */
4383 public static <K,V> ForkJoinTask<K> reduceKeys
4384 (ConcurrentHashMapV8<K,V> map,
4385 BiFun<? super K, ? super K, ? extends K> reducer) {
4386 if (reducer == null) throw new NullPointerException();
4387 return new ReduceKeysTask<K,V>
4388 (map, reducer);
4389 }
4390
4391 /**
4392 * Returns a task that when invoked, returns the result of
4393 * accumulating the given transformation of all keys using the given
4394 * reducer to combine values, or null if none.
4395 *
4396 * @param map the map
4397 * @param transformer a function returning the transformation
4398 * for an element, or null of there is no transformation (in
4399 * which case it is not combined).
4400 * @param reducer a commutative associative combining function
4401 * @return the task
4402 */
4403 public static <K,V,U> ForkJoinTask<U> reduceKeys
4404 (ConcurrentHashMapV8<K,V> map,
4405 Fun<? super K, ? extends U> transformer,
4406 BiFun<? super U, ? super U, ? extends U> reducer) {
4407 if (transformer == null || reducer == null)
4408 throw new NullPointerException();
4409 return new MapReduceKeysTask<K,V,U>
4410 (map, transformer, reducer);
4411 }
4412
4413 /**
4414 * Returns a task that when invoked, returns the result of
4415 * accumulating the given transformation of all keys using the given
4416 * reducer to combine values, and the given basis as an
4417 * identity value.
4418 *
4419 * @param map the map
4420 * @param transformer a function returning the transformation
4421 * for an element
4422 * @param basis the identity (initial default value) for the reduction
4423 * @param reducer a commutative associative combining function
4424 * @return the task
4425 */
4426 public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4427 (ConcurrentHashMapV8<K,V> map,
4428 ObjectToDouble<? super K> transformer,
4429 double basis,
4430 DoubleByDoubleToDouble reducer) {
4431 if (transformer == null || reducer == null)
4432 throw new NullPointerException();
4433 return new MapReduceKeysToDoubleTask<K,V>
4434 (map, transformer, basis, reducer);
4435 }
4436
4437 /**
4438 * Returns a task that when invoked, returns the result of
4439 * accumulating the given transformation of all keys using the given
4440 * reducer to combine values, and the given basis as an
4441 * identity value.
4442 *
4443 * @param map the map
4444 * @param transformer a function returning the transformation
4445 * for an element
4446 * @param basis the identity (initial default value) for the reduction
4447 * @param reducer a commutative associative combining function
4448 * @return the task
4449 */
4450 public static <K,V> ForkJoinTask<Long> reduceKeysToLong
4451 (ConcurrentHashMapV8<K,V> map,
4452 ObjectToLong<? super K> transformer,
4453 long basis,
4454 LongByLongToLong reducer) {
4455 if (transformer == null || reducer == null)
4456 throw new NullPointerException();
4457 return new MapReduceKeysToLongTask<K,V>
4458 (map, transformer, basis, reducer);
4459 }
4460
4461 /**
4462 * Returns a task that when invoked, returns the result of
4463 * accumulating the given transformation of all keys using the given
4464 * reducer to combine values, and the given basis as an
4465 * identity value.
4466 *
4467 * @param map the map
4468 * @param transformer a function returning the transformation
4469 * for an element
4470 * @param basis the identity (initial default value) for the reduction
4471 * @param reducer a commutative associative combining function
4472 * @return the task
4473 */
4474 public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
4475 (ConcurrentHashMapV8<K,V> map,
4476 ObjectToInt<? super K> transformer,
4477 int basis,
4478 IntByIntToInt reducer) {
4479 if (transformer == null || reducer == null)
4480 throw new NullPointerException();
4481 return new MapReduceKeysToIntTask<K,V>
4482 (map, transformer, basis, reducer);
4483 }
4484
4485 /**
4486 * Returns a task that when invoked, performs the given action
4487 * for each value.
4488 *
4489 * @param map the map
4490 * @param action the action
4491 */
4492 public static <K,V> ForkJoinTask<Void> forEachValue
4493 (ConcurrentHashMapV8<K,V> map,
4494 Action<V> action) {
4495 if (action == null) throw new NullPointerException();
4496 return new ForEachValueTask<K,V>(map, action);
4497 }
4498
4499 /**
4500 * Returns a task that when invoked, performs the given action
4501 * for each non-null transformation of each value.
4502 *
4503 * @param map the map
4504 * @param transformer a function returning the transformation
4505 * for an element, or null of there is no transformation (in
4506 * which case the action is not applied).
4507 * @param action the action
4508 */
4509 public static <K,V,U> ForkJoinTask<Void> forEachValue
4510 (ConcurrentHashMapV8<K,V> map,
4511 Fun<? super V, ? extends U> transformer,
4512 Action<U> action) {
4513 if (transformer == null || action == null)
4514 throw new NullPointerException();
4515 return new ForEachTransformedValueTask<K,V,U>
4516 (map, transformer, action);
4517 }
4518
4519 /**
4520 * Returns a task that when invoked, returns a non-null result
4521 * from applying the given search function on each value, or
4522 * null if none. Upon success, further element processing is
4523 * suppressed and the results of any other parallel
4524 * invocations of the search function are ignored.
4525 *
4526 * @param map the map
4527 * @param searchFunction a function returning a non-null
4528 * result on success, else null
4529 * @return the task
4530 *
4531 */
4532 public static <K,V,U> ForkJoinTask<U> searchValues
4533 (ConcurrentHashMapV8<K,V> map,
4534 Fun<? super V, ? extends U> searchFunction) {
4535 if (searchFunction == null) throw new NullPointerException();
4536 return new SearchValuesTask<K,V,U>
4537 (map, searchFunction,
4538 new AtomicReference<U>());
4539 }
4540
4541 /**
4542 * Returns a task that when invoked, returns the result of
4543 * accumulating all values using the given reducer to combine
4544 * values, or null if none.
4545 *
4546 * @param map the map
4547 * @param reducer a commutative associative combining function
4548 * @return the task
4549 */
4550 public static <K,V> ForkJoinTask<V> reduceValues
4551 (ConcurrentHashMapV8<K,V> map,
4552 BiFun<? super V, ? super V, ? extends V> reducer) {
4553 if (reducer == null) throw new NullPointerException();
4554 return new ReduceValuesTask<K,V>
4555 (map, reducer);
4556 }
4557
4558 /**
4559 * Returns a task that when invoked, returns the result of
4560 * accumulating the given transformation of all values using the
4561 * given reducer to combine values, or null if none.
4562 *
4563 * @param map the map
4564 * @param transformer a function returning the transformation
4565 * for an element, or null of there is no transformation (in
4566 * which case it is not combined).
4567 * @param reducer a commutative associative combining function
4568 * @return the task
4569 */
4570 public static <K,V,U> ForkJoinTask<U> reduceValues
4571 (ConcurrentHashMapV8<K,V> map,
4572 Fun<? super V, ? extends U> transformer,
4573 BiFun<? super U, ? super U, ? extends U> reducer) {
4574 if (transformer == null || reducer == null)
4575 throw new NullPointerException();
4576 return new MapReduceValuesTask<K,V,U>
4577 (map, transformer, reducer);
4578 }
4579
4580 /**
4581 * Returns a task that when invoked, returns the result of
4582 * accumulating the given transformation of all values using the
4583 * given reducer to combine values, and the given basis as an
4584 * identity value.
4585 *
4586 * @param map the map
4587 * @param transformer a function returning the transformation
4588 * for an element
4589 * @param basis the identity (initial default value) for the reduction
4590 * @param reducer a commutative associative combining function
4591 * @return the task
4592 */
4593 public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
4594 (ConcurrentHashMapV8<K,V> map,
4595 ObjectToDouble<? super V> transformer,
4596 double basis,
4597 DoubleByDoubleToDouble reducer) {
4598 if (transformer == null || reducer == null)
4599 throw new NullPointerException();
4600 return new MapReduceValuesToDoubleTask<K,V>
4601 (map, transformer, basis, reducer);
4602 }
4603
4604 /**
4605 * Returns a task that when invoked, returns the result of
4606 * accumulating the given transformation of all values using the
4607 * given reducer to combine values, and the given basis as an
4608 * identity value.
4609 *
4610 * @param map the map
4611 * @param transformer a function returning the transformation
4612 * for an element
4613 * @param basis the identity (initial default value) for the reduction
4614 * @param reducer a commutative associative combining function
4615 * @return the task
4616 */
4617 public static <K,V> ForkJoinTask<Long> reduceValuesToLong
4618 (ConcurrentHashMapV8<K,V> map,
4619 ObjectToLong<? super V> transformer,
4620 long basis,
4621 LongByLongToLong reducer) {
4622 if (transformer == null || reducer == null)
4623 throw new NullPointerException();
4624 return new MapReduceValuesToLongTask<K,V>
4625 (map, transformer, basis, reducer);
4626 }
4627
4628 /**
4629 * Returns a task that when invoked, returns the result of
4630 * accumulating the given transformation of all values using the
4631 * given reducer to combine values, and the given basis as an
4632 * identity value.
4633 *
4634 * @param map the map
4635 * @param transformer a function returning the transformation
4636 * for an element
4637 * @param basis the identity (initial default value) for the reduction
4638 * @param reducer a commutative associative combining function
4639 * @return the task
4640 */
4641 public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
4642 (ConcurrentHashMapV8<K,V> map,
4643 ObjectToInt<? super V> transformer,
4644 int basis,
4645 IntByIntToInt reducer) {
4646 if (transformer == null || reducer == null)
4647 throw new NullPointerException();
4648 return new MapReduceValuesToIntTask<K,V>
4649 (map, transformer, basis, reducer);
4650 }
4651
4652 /**
4653 * Returns a task that when invoked, perform the given action
4654 * for each entry.
4655 *
4656 * @param map the map
4657 * @param action the action
4658 */
4659 public static <K,V> ForkJoinTask<Void> forEachEntry
4660 (ConcurrentHashMapV8<K,V> map,
4661 Action<Map.Entry<K,V>> action) {
4662 if (action == null) throw new NullPointerException();
4663 return new ForEachEntryTask<K,V>(map, action);
4664 }
4665
4666 /**
4667 * Returns a task that when invoked, perform the given action
4668 * for each non-null transformation of each entry.
4669 *
4670 * @param map the map
4671 * @param transformer a function returning the transformation
4672 * for an element, or null of there is no transformation (in
4673 * which case the action is not applied).
4674 * @param action the action
4675 */
4676 public static <K,V,U> ForkJoinTask<Void> forEachEntry
4677 (ConcurrentHashMapV8<K,V> map,
4678 Fun<Map.Entry<K,V>, ? extends U> transformer,
4679 Action<U> action) {
4680 if (transformer == null || action == null)
4681 throw new NullPointerException();
4682 return new ForEachTransformedEntryTask<K,V,U>
4683 (map, transformer, action);
4684 }
4685
4686 /**
4687 * Returns a task that when invoked, returns a non-null result
4688 * from applying the given search function on each entry, or
4689 * null if none. Upon success, further element processing is
4690 * suppressed and the results of any other parallel
4691 * invocations of the search function are ignored.
4692 *
4693 * @param map the map
4694 * @param searchFunction a function returning a non-null
4695 * result on success, else null
4696 * @return the task
4697 *
4698 */
4699 public static <K,V,U> ForkJoinTask<U> searchEntries
4700 (ConcurrentHashMapV8<K,V> map,
4701 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4702 if (searchFunction == null) throw new NullPointerException();
4703 return new SearchEntriesTask<K,V,U>
4704 (map, searchFunction,
4705 new AtomicReference<U>());
4706 }
4707
4708 /**
4709 * Returns a task that when invoked, returns the result of
4710 * accumulating all entries using the given reducer to combine
4711 * values, or null if none.
4712 *
4713 * @param map the map
4714 * @param reducer a commutative associative combining function
4715 * @return the task
4716 */
4717 public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
4718 (ConcurrentHashMapV8<K,V> map,
4719 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4720 if (reducer == null) throw new NullPointerException();
4721 return new ReduceEntriesTask<K,V>
4722 (map, reducer);
4723 }
4724
4725 /**
4726 * Returns a task that when invoked, returns the result of
4727 * accumulating the given transformation of all entries using the
4728 * given reducer to combine values, or null if none.
4729 *
4730 * @param map the map
4731 * @param transformer a function returning the transformation
4732 * for an element, or null of there is no transformation (in
4733 * which case it is not combined).
4734 * @param reducer a commutative associative combining function
4735 * @return the task
4736 */
4737 public static <K,V,U> ForkJoinTask<U> reduceEntries
4738 (ConcurrentHashMapV8<K,V> map,
4739 Fun<Map.Entry<K,V>, ? extends U> transformer,
4740 BiFun<? super U, ? super U, ? extends U> reducer) {
4741 if (transformer == null || reducer == null)
4742 throw new NullPointerException();
4743 return new MapReduceEntriesTask<K,V,U>
4744 (map, transformer, reducer);
4745 }
4746
4747 /**
4748 * Returns a task that when invoked, returns the result of
4749 * accumulating the given transformation of all entries using the
4750 * given reducer to combine values, and the given basis as an
4751 * identity value.
4752 *
4753 * @param map the map
4754 * @param transformer a function returning the transformation
4755 * for an element
4756 * @param basis the identity (initial default value) for the reduction
4757 * @param reducer a commutative associative combining function
4758 * @return the task
4759 */
4760 public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
4761 (ConcurrentHashMapV8<K,V> map,
4762 ObjectToDouble<Map.Entry<K,V>> transformer,
4763 double basis,
4764 DoubleByDoubleToDouble reducer) {
4765 if (transformer == null || reducer == null)
4766 throw new NullPointerException();
4767 return new MapReduceEntriesToDoubleTask<K,V>
4768 (map, transformer, basis, reducer);
4769 }
4770
4771 /**
4772 * Returns a task that when invoked, returns the result of
4773 * accumulating the given transformation of all entries using the
4774 * given reducer to combine values, and the given basis as an
4775 * identity value.
4776 *
4777 * @param map the map
4778 * @param transformer a function returning the transformation
4779 * for an element
4780 * @param basis the identity (initial default value) for the reduction
4781 * @param reducer a commutative associative combining function
4782 * @return the task
4783 */
4784 public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
4785 (ConcurrentHashMapV8<K,V> map,
4786 ObjectToLong<Map.Entry<K,V>> transformer,
4787 long basis,
4788 LongByLongToLong reducer) {
4789 if (transformer == null || reducer == null)
4790 throw new NullPointerException();
4791 return new MapReduceEntriesToLongTask<K,V>
4792 (map, transformer, basis, reducer);
4793 }
4794
4795 /**
4796 * Returns a task that when invoked, returns the result of
4797 * accumulating the given transformation of all entries using the
4798 * given reducer to combine values, and the given basis as an
4799 * identity value.
4800 *
4801 * @param map the map
4802 * @param transformer a function returning the transformation
4803 * for an element
4804 * @param basis the identity (initial default value) for the reduction
4805 * @param reducer a commutative associative combining function
4806 * @return the task
4807 */
4808 public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
4809 (ConcurrentHashMapV8<K,V> map,
4810 ObjectToInt<Map.Entry<K,V>> transformer,
4811 int basis,
4812 IntByIntToInt reducer) {
4813 if (transformer == null || reducer == null)
4814 throw new NullPointerException();
4815 return new MapReduceEntriesToIntTask<K,V>
4816 (map, transformer, basis, reducer);
4817 }
4818 }
4819
4820 // -------------------------------------------------------
4821
4822 /**
4823 * Base for FJ tasks for bulk operations. This adds a variant of
4824 * CountedCompleters and some split and merge bookkeeping to
4825 * iterator functionality. The forEach and reduce methods are
4826 * similar to those illustrated in CountedCompleter documentation,
4827 * except that bottom-up reduction completions perform them within
4828 * their compute methods. The search methods are like forEach
4829 * except they continually poll for success and exit early. Also,
4830 * exceptions are handled in a simpler manner, by just trying to
4831 * complete root task exceptionally.
4832 */
4833 @SuppressWarnings("serial") static abstract class BulkTask<K,V,R> extends Traverser<K,V,R> {
4834 final BulkTask<K,V,?> parent; // completion target
4835 int batch; // split control
4836 int pending; // completion control
4837
4838 /** Constructor for root tasks */
4839 BulkTask(ConcurrentHashMapV8<K,V> map) {
4840 super(map);
4841 this.parent = null;
4842 this.batch = -1; // force call to batch() on execution
4843 }
4844
4845 /** Constructor for subtasks */
4846 BulkTask(BulkTask<K,V,?> parent, int batch) {
4847 super(parent);
4848 this.parent = parent;
4849 this.batch = batch;
4850 }
4851
4852 // FJ methods
4853
4854 /**
4855 * Propagates completion. Note that all reduce actions
4856 * bypass this method to combine while completing.
4857 */
4858 final void tryComplete() {
4859 BulkTask<K,V,?> a = this, s = a;
4860 for (int c;;) {
4861 if ((c = a.pending) == 0) {
4862 if ((a = (s = a).parent) == null) {
4863 s.quietlyComplete();
4864 break;
4865 }
4866 }
4867 else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
4868 break;
4869 }
4870 }
4871
4872 /**
4873 * Forces root task to complete.
4874 * @param ex if null, complete normally, else exceptionally
4875 * @return false to simplify use
4876 */
4877 final boolean tryCompleteComputation(Throwable ex) {
4878 for (BulkTask<K,V,?> a = this;;) {
4879 BulkTask<K,V,?> p = a.parent;
4880 if (p == null) {
4881 if (ex != null)
4882 a.completeExceptionally(ex);
4883 else
4884 a.quietlyComplete();
4885 return false;
4886 }
4887 a = p;
4888 }
4889 }
4890
4891 /**
4892 * Version of tryCompleteComputation for function screening checks
4893 */
4894 final boolean abortOnNullFunction() {
4895 return tryCompleteComputation(new Error("Unexpected null function"));
4896 }
4897
4898 // utilities
4899
4900 /** CompareAndSet pending count */
4901 final boolean casPending(int cmp, int val) {
4902 return U.compareAndSwapInt(this, PENDING, cmp, val);
4903 }
4904
4905 /**
4906 * Returns approx exp2 of the number of times (minus one) to
4907 * split task by two before executing leaf action. This value
4908 * is faster to compute and more convenient to use as a guide
4909 * to splitting than is the depth, since it is used while
4910 * dividing by two anyway.
4911 */
4912 final int batch() {
4913 int b = batch;
4914 if (b < 0) {
4915 long n = map.counter.sum();
4916 int sp = getPool().getParallelism() << 3; // slack of 8
4917 b = batch = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
4918 }
4919 return b;
4920 }
4921
4922 /**
4923 * Returns exportable snapshot entry.
4924 */
4925 static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
4926 return new AbstractMap.SimpleEntry<K,V>(k, v);
4927 }
4928
4929 // Unsafe mechanics
4930 private static final sun.misc.Unsafe U;
4931 private static final long PENDING;
4932 static {
4933 try {
4934 U = getUnsafe();
4935 PENDING = U.objectFieldOffset
4936 (BulkTask.class.getDeclaredField("pending"));
4937 } catch (Exception e) {
4938 throw new Error(e);
4939 }
4940 }
4941 }
4942
4943 /*
4944 * Task classes. Coded in a regular but ugly format/style to
4945 * simplify checks that each variant differs in the right way from
4946 * others.
4947 */
4948
4949 @SuppressWarnings("serial") static final class ForEachKeyTask<K,V>
4950 extends BulkTask<K,V,Void> {
4951 final Action<K> action;
4952 ForEachKeyTask
4953 (ConcurrentHashMapV8<K,V> m,
4954 Action<K> action) {
4955 super(m);
4956 this.action = action;
4957 }
4958 ForEachKeyTask
4959 (BulkTask<K,V,?> p, int b,
4960 Action<K> action) {
4961 super(p, b);
4962 this.action = action;
4963 }
4964 @SuppressWarnings("unchecked") public final boolean exec() {
4965 final Action<K> action = this.action;
4966 if (action == null)
4967 return abortOnNullFunction();
4968 try {
4969 int b = batch(), c;
4970 while (b > 1 && baseIndex != baseLimit) {
4971 do {} while (!casPending(c = pending, c+1));
4972 new ForEachKeyTask<K,V>(this, b >>>= 1, action).fork();
4973 }
4974 while (advance() != null)
4975 action.apply((K)nextKey);
4976 tryComplete();
4977 } catch (Throwable ex) {
4978 return tryCompleteComputation(ex);
4979 }
4980 return false;
4981 }
4982 }
4983
4984 @SuppressWarnings("serial") static final class ForEachValueTask<K,V>
4985 extends BulkTask<K,V,Void> {
4986 final Action<V> action;
4987 ForEachValueTask
4988 (ConcurrentHashMapV8<K,V> m,
4989 Action<V> action) {
4990 super(m);
4991 this.action = action;
4992 }
4993 ForEachValueTask
4994 (BulkTask<K,V,?> p, int b,
4995 Action<V> action) {
4996 super(p, b);
4997 this.action = action;
4998 }
4999 @SuppressWarnings("unchecked") public final boolean exec() {
5000 final Action<V> action = this.action;
5001 if (action == null)
5002 return abortOnNullFunction();
5003 try {
5004 int b = batch(), c;
5005 while (b > 1 && baseIndex != baseLimit) {
5006 do {} while (!casPending(c = pending, c+1));
5007 new ForEachValueTask<K,V>(this, b >>>= 1, action).fork();
5008 }
5009 Object v;
5010 while ((v = advance()) != null)
5011 action.apply((V)v);
5012 tryComplete();
5013 } catch (Throwable ex) {
5014 return tryCompleteComputation(ex);
5015 }
5016 return false;
5017 }
5018 }
5019
5020 @SuppressWarnings("serial") static final class ForEachEntryTask<K,V>
5021 extends BulkTask<K,V,Void> {
5022 final Action<Entry<K,V>> action;
5023 ForEachEntryTask
5024 (ConcurrentHashMapV8<K,V> m,
5025 Action<Entry<K,V>> action) {
5026 super(m);
5027 this.action = action;
5028 }
5029 ForEachEntryTask
5030 (BulkTask<K,V,?> p, int b,
5031 Action<Entry<K,V>> action) {
5032 super(p, b);
5033 this.action = action;
5034 }
5035 @SuppressWarnings("unchecked") public final boolean exec() {
5036 final Action<Entry<K,V>> action = this.action;
5037 if (action == null)
5038 return abortOnNullFunction();
5039 try {
5040 int b = batch(), c;
5041 while (b > 1 && baseIndex != baseLimit) {
5042 do {} while (!casPending(c = pending, c+1));
5043 new ForEachEntryTask<K,V>(this, b >>>= 1, action).fork();
5044 }
5045 Object v;
5046 while ((v = advance()) != null)
5047 action.apply(entryFor((K)nextKey, (V)v));
5048 tryComplete();
5049 } catch (Throwable ex) {
5050 return tryCompleteComputation(ex);
5051 }
5052 return false;
5053 }
5054 }
5055
5056 @SuppressWarnings("serial") static final class ForEachMappingTask<K,V>
5057 extends BulkTask<K,V,Void> {
5058 final BiAction<K,V> action;
5059 ForEachMappingTask
5060 (ConcurrentHashMapV8<K,V> m,
5061 BiAction<K,V> action) {
5062 super(m);
5063 this.action = action;
5064 }
5065 ForEachMappingTask
5066 (BulkTask<K,V,?> p, int b,
5067 BiAction<K,V> action) {
5068 super(p, b);
5069 this.action = action;
5070 }
5071
5072 @SuppressWarnings("unchecked") public final boolean exec() {
5073 final BiAction<K,V> action = this.action;
5074 if (action == null)
5075 return abortOnNullFunction();
5076 try {
5077 int b = batch(), c;
5078 while (b > 1 && baseIndex != baseLimit) {
5079 do {} while (!casPending(c = pending, c+1));
5080 new ForEachMappingTask<K,V>(this, b >>>= 1,
5081 action).fork();
5082 }
5083 Object v;
5084 while ((v = advance()) != null)
5085 action.apply((K)nextKey, (V)v);
5086 tryComplete();
5087 } catch (Throwable ex) {
5088 return tryCompleteComputation(ex);
5089 }
5090 return false;
5091 }
5092 }
5093
5094 @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U>
5095 extends BulkTask<K,V,Void> {
5096 final Fun<? super K, ? extends U> transformer;
5097 final Action<U> action;
5098 ForEachTransformedKeyTask
5099 (ConcurrentHashMapV8<K,V> m,
5100 Fun<? super K, ? extends U> transformer,
5101 Action<U> action) {
5102 super(m);
5103 this.transformer = transformer;
5104 this.action = action;
5105
5106 }
5107 ForEachTransformedKeyTask
5108 (BulkTask<K,V,?> p, int b,
5109 Fun<? super K, ? extends U> transformer,
5110 Action<U> action) {
5111 super(p, b);
5112 this.transformer = transformer;
5113 this.action = action;
5114 }
5115 @SuppressWarnings("unchecked") public final boolean exec() {
5116 final Fun<? super K, ? extends U> transformer =
5117 this.transformer;
5118 final Action<U> action = this.action;
5119 if (transformer == null || action == null)
5120 return abortOnNullFunction();
5121 try {
5122 int b = batch(), c;
5123 while (b > 1 && baseIndex != baseLimit) {
5124 do {} while (!casPending(c = pending, c+1));
5125 new ForEachTransformedKeyTask<K,V,U>
5126 (this, b >>>= 1, transformer, action).fork();
5127 }
5128 U u;
5129 while (advance() != null) {
5130 if ((u = transformer.apply((K)nextKey)) != null)
5131 action.apply(u);
5132 }
5133 tryComplete();
5134 } catch (Throwable ex) {
5135 return tryCompleteComputation(ex);
5136 }
5137 return false;
5138 }
5139 }
5140
5141 @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U>
5142 extends BulkTask<K,V,Void> {
5143 final Fun<? super V, ? extends U> transformer;
5144 final Action<U> action;
5145 ForEachTransformedValueTask
5146 (ConcurrentHashMapV8<K,V> m,
5147 Fun<? super V, ? extends U> transformer,
5148 Action<U> action) {
5149 super(m);
5150 this.transformer = transformer;
5151 this.action = action;
5152
5153 }
5154 ForEachTransformedValueTask
5155 (BulkTask<K,V,?> p, int b,
5156 Fun<? super V, ? extends U> transformer,
5157 Action<U> action) {
5158 super(p, b);
5159 this.transformer = transformer;
5160 this.action = action;
5161 }
5162 @SuppressWarnings("unchecked") public final boolean exec() {
5163 final Fun<? super V, ? extends U> transformer =
5164 this.transformer;
5165 final Action<U> action = this.action;
5166 if (transformer == null || action == null)
5167 return abortOnNullFunction();
5168 try {
5169 int b = batch(), c;
5170 while (b > 1 && baseIndex != baseLimit) {
5171 do {} while (!casPending(c = pending, c+1));
5172 new ForEachTransformedValueTask<K,V,U>
5173 (this, b >>>= 1, transformer, action).fork();
5174 }
5175 Object v; U u;
5176 while ((v = advance()) != null) {
5177 if ((u = transformer.apply((V)v)) != null)
5178 action.apply(u);
5179 }
5180 tryComplete();
5181 } catch (Throwable ex) {
5182 return tryCompleteComputation(ex);
5183 }
5184 return false;
5185 }
5186 }
5187
5188 @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U>
5189 extends BulkTask<K,V,Void> {
5190 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5191 final Action<U> action;
5192 ForEachTransformedEntryTask
5193 (ConcurrentHashMapV8<K,V> m,
5194 Fun<Map.Entry<K,V>, ? extends U> transformer,
5195 Action<U> action) {
5196 super(m);
5197 this.transformer = transformer;
5198 this.action = action;
5199
5200 }
5201 ForEachTransformedEntryTask
5202 (BulkTask<K,V,?> p, int b,
5203 Fun<Map.Entry<K,V>, ? extends U> transformer,
5204 Action<U> action) {
5205 super(p, b);
5206 this.transformer = transformer;
5207 this.action = action;
5208 }
5209 @SuppressWarnings("unchecked") public final boolean exec() {
5210 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5211 this.transformer;
5212 final Action<U> action = this.action;
5213 if (transformer == null || action == null)
5214 return abortOnNullFunction();
5215 try {
5216 int b = batch(), c;
5217 while (b > 1 && baseIndex != baseLimit) {
5218 do {} while (!casPending(c = pending, c+1));
5219 new ForEachTransformedEntryTask<K,V,U>
5220 (this, b >>>= 1, transformer, action).fork();
5221 }
5222 Object v; U u;
5223 while ((v = advance()) != null) {
5224 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5225 action.apply(u);
5226 }
5227 tryComplete();
5228 } catch (Throwable ex) {
5229 return tryCompleteComputation(ex);
5230 }
5231 return false;
5232 }
5233 }
5234
5235 @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U>
5236 extends BulkTask<K,V,Void> {
5237 final BiFun<? super K, ? super V, ? extends U> transformer;
5238 final Action<U> action;
5239 ForEachTransformedMappingTask
5240 (ConcurrentHashMapV8<K,V> m,
5241 BiFun<? super K, ? super V, ? extends U> transformer,
5242 Action<U> action) {
5243 super(m);
5244 this.transformer = transformer;
5245 this.action = action;
5246
5247 }
5248 ForEachTransformedMappingTask
5249 (BulkTask<K,V,?> p, int b,
5250 BiFun<? super K, ? super V, ? extends U> transformer,
5251 Action<U> action) {
5252 super(p, b);
5253 this.transformer = transformer;
5254 this.action = action;
5255 }
5256 @SuppressWarnings("unchecked") public final boolean exec() {
5257 final BiFun<? super K, ? super V, ? extends U> transformer =
5258 this.transformer;
5259 final Action<U> action = this.action;
5260 if (transformer == null || action == null)
5261 return abortOnNullFunction();
5262 try {
5263 int b = batch(), c;
5264 while (b > 1 && baseIndex != baseLimit) {
5265 do {} while (!casPending(c = pending, c+1));
5266 new ForEachTransformedMappingTask<K,V,U>
5267 (this, b >>>= 1, transformer, action).fork();
5268 }
5269 Object v; U u;
5270 while ((v = advance()) != null) {
5271 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5272 action.apply(u);
5273 }
5274 tryComplete();
5275 } catch (Throwable ex) {
5276 return tryCompleteComputation(ex);
5277 }
5278 return false;
5279 }
5280 }
5281
5282 @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U>
5283 extends BulkTask<K,V,U> {
5284 final Fun<? super K, ? extends U> searchFunction;
5285 final AtomicReference<U> result;
5286 SearchKeysTask
5287 (ConcurrentHashMapV8<K,V> m,
5288 Fun<? super K, ? extends U> searchFunction,
5289 AtomicReference<U> result) {
5290 super(m);
5291 this.searchFunction = searchFunction; this.result = result;
5292 }
5293 SearchKeysTask
5294 (BulkTask<K,V,?> p, int b,
5295 Fun<? super K, ? extends U> searchFunction,
5296 AtomicReference<U> result) {
5297 super(p, b);
5298 this.searchFunction = searchFunction; this.result = result;
5299 }
5300 @SuppressWarnings("unchecked") public final boolean exec() {
5301 AtomicReference<U> result = this.result;
5302 final Fun<? super K, ? extends U> searchFunction =
5303 this.searchFunction;
5304 if (searchFunction == null || result == null)
5305 return abortOnNullFunction();
5306 try {
5307 int b = batch(), c;
5308 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5309 do {} while (!casPending(c = pending, c+1));
5310 new SearchKeysTask<K,V,U>(this, b >>>= 1,
5311 searchFunction, result).fork();
5312 }
5313 U u;
5314 while (result.get() == null && advance() != null) {
5315 if ((u = searchFunction.apply((K)nextKey)) != null) {
5316 if (result.compareAndSet(null, u))
5317 tryCompleteComputation(null);
5318 break;
5319 }
5320 }
5321 tryComplete();
5322 } catch (Throwable ex) {
5323 return tryCompleteComputation(ex);
5324 }
5325 return false;
5326 }
5327 public final U getRawResult() { return result.get(); }
5328 }
5329
5330 @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U>
5331 extends BulkTask<K,V,U> {
5332 final Fun<? super V, ? extends U> searchFunction;
5333 final AtomicReference<U> result;
5334 SearchValuesTask
5335 (ConcurrentHashMapV8<K,V> m,
5336 Fun<? super V, ? extends U> searchFunction,
5337 AtomicReference<U> result) {
5338 super(m);
5339 this.searchFunction = searchFunction; this.result = result;
5340 }
5341 SearchValuesTask
5342 (BulkTask<K,V,?> p, int b,
5343 Fun<? super V, ? extends U> searchFunction,
5344 AtomicReference<U> result) {
5345 super(p, b);
5346 this.searchFunction = searchFunction; this.result = result;
5347 }
5348 @SuppressWarnings("unchecked") public final boolean exec() {
5349 AtomicReference<U> result = this.result;
5350 final Fun<? super V, ? extends U> searchFunction =
5351 this.searchFunction;
5352 if (searchFunction == null || result == null)
5353 return abortOnNullFunction();
5354 try {
5355 int b = batch(), c;
5356 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5357 do {} while (!casPending(c = pending, c+1));
5358 new SearchValuesTask<K,V,U>(this, b >>>= 1,
5359 searchFunction, result).fork();
5360 }
5361 Object v; U u;
5362 while (result.get() == null && (v = advance()) != null) {
5363 if ((u = searchFunction.apply((V)v)) != null) {
5364 if (result.compareAndSet(null, u))
5365 tryCompleteComputation(null);
5366 break;
5367 }
5368 }
5369 tryComplete();
5370 } catch (Throwable ex) {
5371 return tryCompleteComputation(ex);
5372 }
5373 return false;
5374 }
5375 public final U getRawResult() { return result.get(); }
5376 }
5377
5378 @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U>
5379 extends BulkTask<K,V,U> {
5380 final Fun<Entry<K,V>, ? extends U> searchFunction;
5381 final AtomicReference<U> result;
5382 SearchEntriesTask
5383 (ConcurrentHashMapV8<K,V> m,
5384 Fun<Entry<K,V>, ? extends U> searchFunction,
5385 AtomicReference<U> result) {
5386 super(m);
5387 this.searchFunction = searchFunction; this.result = result;
5388 }
5389 SearchEntriesTask
5390 (BulkTask<K,V,?> p, int b,
5391 Fun<Entry<K,V>, ? extends U> searchFunction,
5392 AtomicReference<U> result) {
5393 super(p, b);
5394 this.searchFunction = searchFunction; this.result = result;
5395 }
5396 @SuppressWarnings("unchecked") public final boolean exec() {
5397 AtomicReference<U> result = this.result;
5398 final Fun<Entry<K,V>, ? extends U> searchFunction =
5399 this.searchFunction;
5400 if (searchFunction == null || result == null)
5401 return abortOnNullFunction();
5402 try {
5403 int b = batch(), c;
5404 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5405 do {} while (!casPending(c = pending, c+1));
5406 new SearchEntriesTask<K,V,U>(this, b >>>= 1,
5407 searchFunction, result).fork();
5408 }
5409 Object v; U u;
5410 while (result.get() == null && (v = advance()) != null) {
5411 if ((u = searchFunction.apply(entryFor((K)nextKey, (V)v))) != null) {
5412 if (result.compareAndSet(null, u))
5413 tryCompleteComputation(null);
5414 break;
5415 }
5416 }
5417 tryComplete();
5418 } catch (Throwable ex) {
5419 return tryCompleteComputation(ex);
5420 }
5421 return false;
5422 }
5423 public final U getRawResult() { return result.get(); }
5424 }
5425
5426 @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U>
5427 extends BulkTask<K,V,U> {
5428 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5429 final AtomicReference<U> result;
5430 SearchMappingsTask
5431 (ConcurrentHashMapV8<K,V> m,
5432 BiFun<? super K, ? super V, ? extends U> searchFunction,
5433 AtomicReference<U> result) {
5434 super(m);
5435 this.searchFunction = searchFunction; this.result = result;
5436 }
5437 SearchMappingsTask
5438 (BulkTask<K,V,?> p, int b,
5439 BiFun<? super K, ? super V, ? extends U> searchFunction,
5440 AtomicReference<U> result) {
5441 super(p, b);
5442 this.searchFunction = searchFunction; this.result = result;
5443 }
5444 @SuppressWarnings("unchecked") public final boolean exec() {
5445 AtomicReference<U> result = this.result;
5446 final BiFun<? super K, ? super V, ? extends U> searchFunction =
5447 this.searchFunction;
5448 if (searchFunction == null || result == null)
5449 return abortOnNullFunction();
5450 try {
5451 int b = batch(), c;
5452 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5453 do {} while (!casPending(c = pending, c+1));
5454 new SearchMappingsTask<K,V,U>(this, b >>>= 1,
5455 searchFunction, result).fork();
5456 }
5457 Object v; U u;
5458 while (result.get() == null && (v = advance()) != null) {
5459 if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5460 if (result.compareAndSet(null, u))
5461 tryCompleteComputation(null);
5462 break;
5463 }
5464 }
5465 tryComplete();
5466 } catch (Throwable ex) {
5467 return tryCompleteComputation(ex);
5468 }
5469 return false;
5470 }
5471 public final U getRawResult() { return result.get(); }
5472 }
5473
5474 @SuppressWarnings("serial") static final class ReduceKeysTask<K,V>
5475 extends BulkTask<K,V,K> {
5476 final BiFun<? super K, ? super K, ? extends K> reducer;
5477 K result;
5478 ReduceKeysTask<K,V> rights, nextRight;
5479 ReduceKeysTask
5480 (ConcurrentHashMapV8<K,V> m,
5481 BiFun<? super K, ? super K, ? extends K> reducer) {
5482 super(m);
5483 this.reducer = reducer;
5484 }
5485 ReduceKeysTask
5486 (BulkTask<K,V,?> p, int b,
5487 ReduceKeysTask<K,V> nextRight,
5488 BiFun<? super K, ? super K, ? extends K> reducer) {
5489 super(p, b); this.nextRight = nextRight;
5490 this.reducer = reducer;
5491 }
5492
5493 @SuppressWarnings("unchecked") public final boolean exec() {
5494 final BiFun<? super K, ? super K, ? extends K> reducer =
5495 this.reducer;
5496 if (reducer == null)
5497 return abortOnNullFunction();
5498 try {
5499 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5500 do {} while (!casPending(c = pending, c+1));
5501 (rights = new ReduceKeysTask<K,V>
5502 (this, b >>>= 1, rights, reducer)).fork();
5503 }
5504 K r = null;
5505 while (advance() != null) {
5506 K u = (K)nextKey;
5507 r = (r == null) ? u : reducer.apply(r, u);
5508 }
5509 result = r;
5510 for (ReduceKeysTask<K,V> t = this, s;;) {
5511 int c; BulkTask<K,V,?> par; K tr, sr;
5512 if ((c = t.pending) == 0) {
5513 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5514 if ((sr = s.result) != null)
5515 t.result = (tr = t.result) == null? sr : reducer.apply(tr, sr);
5516 }
5517 if ((par = t.parent) == null ||
5518 !(par instanceof ReduceKeysTask)) {
5519 t.quietlyComplete();
5520 break;
5521 }
5522 t = (ReduceKeysTask<K,V>)par;
5523 }
5524 else if (t.casPending(c, c - 1))
5525 break;
5526 }
5527 } catch (Throwable ex) {
5528 return tryCompleteComputation(ex);
5529 }
5530 return false;
5531 }
5532 public final K getRawResult() { return result; }
5533 }
5534
5535 @SuppressWarnings("serial") static final class ReduceValuesTask<K,V>
5536 extends BulkTask<K,V,V> {
5537 final BiFun<? super V, ? super V, ? extends V> reducer;
5538 V result;
5539 ReduceValuesTask<K,V> rights, nextRight;
5540 ReduceValuesTask
5541 (ConcurrentHashMapV8<K,V> m,
5542 BiFun<? super V, ? super V, ? extends V> reducer) {
5543 super(m);
5544 this.reducer = reducer;
5545 }
5546 ReduceValuesTask
5547 (BulkTask<K,V,?> p, int b,
5548 ReduceValuesTask<K,V> nextRight,
5549 BiFun<? super V, ? super V, ? extends V> reducer) {
5550 super(p, b); this.nextRight = nextRight;
5551 this.reducer = reducer;
5552 }
5553
5554 @SuppressWarnings("unchecked") public final boolean exec() {
5555 final BiFun<? super V, ? super V, ? extends V> reducer =
5556 this.reducer;
5557 if (reducer == null)
5558 return abortOnNullFunction();
5559 try {
5560 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5561 do {} while (!casPending(c = pending, c+1));
5562 (rights = new ReduceValuesTask<K,V>
5563 (this, b >>>= 1, rights, reducer)).fork();
5564 }
5565 V r = null;
5566 Object v;
5567 while ((v = advance()) != null) {
5568 V u = (V)v;
5569 r = (r == null) ? u : reducer.apply(r, u);
5570 }
5571 result = r;
5572 for (ReduceValuesTask<K,V> t = this, s;;) {
5573 int c; BulkTask<K,V,?> par; V tr, sr;
5574 if ((c = t.pending) == 0) {
5575 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5576 if ((sr = s.result) != null)
5577 t.result = (tr = t.result) == null? sr : reducer.apply(tr, sr);
5578 }
5579 if ((par = t.parent) == null ||
5580 !(par instanceof ReduceValuesTask)) {
5581 t.quietlyComplete();
5582 break;
5583 }
5584 t = (ReduceValuesTask<K,V>)par;
5585 }
5586 else if (t.casPending(c, c - 1))
5587 break;
5588 }
5589 } catch (Throwable ex) {
5590 return tryCompleteComputation(ex);
5591 }
5592 return false;
5593 }
5594 public final V getRawResult() { return result; }
5595 }
5596
5597 @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V>
5598 extends BulkTask<K,V,Map.Entry<K,V>> {
5599 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5600 Map.Entry<K,V> result;
5601 ReduceEntriesTask<K,V> rights, nextRight;
5602 ReduceEntriesTask
5603 (ConcurrentHashMapV8<K,V> m,
5604 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5605 super(m);
5606 this.reducer = reducer;
5607 }
5608 ReduceEntriesTask
5609 (BulkTask<K,V,?> p, int b,
5610 ReduceEntriesTask<K,V> nextRight,
5611 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5612 super(p, b); this.nextRight = nextRight;
5613 this.reducer = reducer;
5614 }
5615
5616 @SuppressWarnings("unchecked") public final boolean exec() {
5617 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer =
5618 this.reducer;
5619 if (reducer == null)
5620 return abortOnNullFunction();
5621 try {
5622 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5623 do {} while (!casPending(c = pending, c+1));
5624 (rights = new ReduceEntriesTask<K,V>
5625 (this, b >>>= 1, rights, reducer)).fork();
5626 }
5627 Map.Entry<K,V> r = null;
5628 Object v;
5629 while ((v = advance()) != null) {
5630 Map.Entry<K,V> u = entryFor((K)nextKey, (V)v);
5631 r = (r == null) ? u : reducer.apply(r, u);
5632 }
5633 result = r;
5634 for (ReduceEntriesTask<K,V> t = this, s;;) {
5635 int c; BulkTask<K,V,?> par; Map.Entry<K,V> tr, sr;
5636 if ((c = t.pending) == 0) {
5637 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5638 if ((sr = s.result) != null)
5639 t.result = (tr = t.result) == null? sr : reducer.apply(tr, sr);
5640 }
5641 if ((par = t.parent) == null ||
5642 !(par instanceof ReduceEntriesTask)) {
5643 t.quietlyComplete();
5644 break;
5645 }
5646 t = (ReduceEntriesTask<K,V>)par;
5647 }
5648 else if (t.casPending(c, c - 1))
5649 break;
5650 }
5651 } catch (Throwable ex) {
5652 return tryCompleteComputation(ex);
5653 }
5654 return false;
5655 }
5656 public final Map.Entry<K,V> getRawResult() { return result; }
5657 }
5658
5659 @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U>
5660 extends BulkTask<K,V,U> {
5661 final Fun<? super K, ? extends U> transformer;
5662 final BiFun<? super U, ? super U, ? extends U> reducer;
5663 U result;
5664 MapReduceKeysTask<K,V,U> rights, nextRight;
5665 MapReduceKeysTask
5666 (ConcurrentHashMapV8<K,V> m,
5667 Fun<? super K, ? extends U> transformer,
5668 BiFun<? super U, ? super U, ? extends U> reducer) {
5669 super(m);
5670 this.transformer = transformer;
5671 this.reducer = reducer;
5672 }
5673 MapReduceKeysTask
5674 (BulkTask<K,V,?> p, int b,
5675 MapReduceKeysTask<K,V,U> nextRight,
5676 Fun<? super K, ? extends U> transformer,
5677 BiFun<? super U, ? super U, ? extends U> reducer) {
5678 super(p, b); this.nextRight = nextRight;
5679 this.transformer = transformer;
5680 this.reducer = reducer;
5681 }
5682 @SuppressWarnings("unchecked") public final boolean exec() {
5683 final Fun<? super K, ? extends U> transformer =
5684 this.transformer;
5685 final BiFun<? super U, ? super U, ? extends U> reducer =
5686 this.reducer;
5687 if (transformer == null || reducer == null)
5688 return abortOnNullFunction();
5689 try {
5690 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5691 do {} while (!casPending(c = pending, c+1));
5692 (rights = new MapReduceKeysTask<K,V,U>
5693 (this, b >>>= 1, rights, transformer, reducer)).fork();
5694 }
5695 U r = null, u;
5696 while (advance() != null) {
5697 if ((u = transformer.apply((K)nextKey)) != null)
5698 r = (r == null) ? u : reducer.apply(r, u);
5699 }
5700 result = r;
5701 for (MapReduceKeysTask<K,V,U> t = this, s;;) {
5702 int c; BulkTask<K,V,?> par; U tr, sr;
5703 if ((c = t.pending) == 0) {
5704 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5705 if ((sr = s.result) != null)
5706 t.result = (tr = t.result) == null? sr : reducer.apply(tr, sr);
5707 }
5708 if ((par = t.parent) == null ||
5709 !(par instanceof MapReduceKeysTask)) {
5710 t.quietlyComplete();
5711 break;
5712 }
5713 t = (MapReduceKeysTask<K,V,U>)par;
5714 }
5715 else if (t.casPending(c, c - 1))
5716 break;
5717 }
5718 } catch (Throwable ex) {
5719 return tryCompleteComputation(ex);
5720 }
5721 return false;
5722 }
5723 public final U getRawResult() { return result; }
5724 }
5725
5726 @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U>
5727 extends BulkTask<K,V,U> {
5728 final Fun<? super V, ? extends U> transformer;
5729 final BiFun<? super U, ? super U, ? extends U> reducer;
5730 U result;
5731 MapReduceValuesTask<K,V,U> rights, nextRight;
5732 MapReduceValuesTask
5733 (ConcurrentHashMapV8<K,V> m,
5734 Fun<? super V, ? extends U> transformer,
5735 BiFun<? super U, ? super U, ? extends U> reducer) {
5736 super(m);
5737 this.transformer = transformer;
5738 this.reducer = reducer;
5739 }
5740 MapReduceValuesTask
5741 (BulkTask<K,V,?> p, int b,
5742 MapReduceValuesTask<K,V,U> nextRight,
5743 Fun<? super V, ? extends U> transformer,
5744 BiFun<? super U, ? super U, ? extends U> reducer) {
5745 super(p, b); this.nextRight = nextRight;
5746 this.transformer = transformer;
5747 this.reducer = reducer;
5748 }
5749 @SuppressWarnings("unchecked") public final boolean exec() {
5750 final Fun<? super V, ? extends U> transformer =
5751 this.transformer;
5752 final BiFun<? super U, ? super U, ? extends U> reducer =
5753 this.reducer;
5754 if (transformer == null || reducer == null)
5755 return abortOnNullFunction();
5756 try {
5757 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5758 do {} while (!casPending(c = pending, c+1));
5759 (rights = new MapReduceValuesTask<K,V,U>
5760 (this, b >>>= 1, rights, transformer, reducer)).fork();
5761 }
5762 U r = null, u;
5763 Object v;
5764 while ((v = advance()) != null) {
5765 if ((u = transformer.apply((V)v)) != null)
5766 r = (r == null) ? u : reducer.apply(r, u);
5767 }
5768 result = r;
5769 for (MapReduceValuesTask<K,V,U> t = this, s;;) {
5770 int c; BulkTask<K,V,?> par; U tr, sr;
5771 if ((c = t.pending) == 0) {
5772 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5773 if ((sr = s.result) != null)
5774 t.result = (tr = t.result) == null? sr : reducer.apply(tr, sr);
5775 }
5776 if ((par = t.parent) == null ||
5777 !(par instanceof MapReduceValuesTask)) {
5778 t.quietlyComplete();
5779 break;
5780 }
5781 t = (MapReduceValuesTask<K,V,U>)par;
5782 }
5783 else if (t.casPending(c, c - 1))
5784 break;
5785 }
5786 } catch (Throwable ex) {
5787 return tryCompleteComputation(ex);
5788 }
5789 return false;
5790 }
5791 public final U getRawResult() { return result; }
5792 }
5793
5794 @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U>
5795 extends BulkTask<K,V,U> {
5796 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5797 final BiFun<? super U, ? super U, ? extends U> reducer;
5798 U result;
5799 MapReduceEntriesTask<K,V,U> rights, nextRight;
5800 MapReduceEntriesTask
5801 (ConcurrentHashMapV8<K,V> m,
5802 Fun<Map.Entry<K,V>, ? extends U> transformer,
5803 BiFun<? super U, ? super U, ? extends U> reducer) {
5804 super(m);
5805 this.transformer = transformer;
5806 this.reducer = reducer;
5807 }
5808 MapReduceEntriesTask
5809 (BulkTask<K,V,?> p, int b,
5810 MapReduceEntriesTask<K,V,U> nextRight,
5811 Fun<Map.Entry<K,V>, ? extends U> transformer,
5812 BiFun<? super U, ? super U, ? extends U> reducer) {
5813 super(p, b); this.nextRight = nextRight;
5814 this.transformer = transformer;
5815 this.reducer = reducer;
5816 }
5817 @SuppressWarnings("unchecked") public final boolean exec() {
5818 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5819 this.transformer;
5820 final BiFun<? super U, ? super U, ? extends U> reducer =
5821 this.reducer;
5822 if (transformer == null || reducer == null)
5823 return abortOnNullFunction();
5824 try {
5825 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5826 do {} while (!casPending(c = pending, c+1));
5827 (rights = new MapReduceEntriesTask<K,V,U>
5828 (this, b >>>= 1, rights, transformer, reducer)).fork();
5829 }
5830 U r = null, u;
5831 Object v;
5832 while ((v = advance()) != null) {
5833 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5834 r = (r == null) ? u : reducer.apply(r, u);
5835 }
5836 result = r;
5837 for (MapReduceEntriesTask<K,V,U> t = this, s;;) {
5838 int c; BulkTask<K,V,?> par; U tr, sr;
5839 if ((c = t.pending) == 0) {
5840 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5841 if ((sr = s.result) != null)
5842 t.result = (tr = t.result) == null? sr : reducer.apply(tr, sr);
5843 }
5844 if ((par = t.parent) == null ||
5845 !(par instanceof MapReduceEntriesTask)) {
5846 t.quietlyComplete();
5847 break;
5848 }
5849 t = (MapReduceEntriesTask<K,V,U>)par;
5850 }
5851 else if (t.casPending(c, c - 1))
5852 break;
5853 }
5854 } catch (Throwable ex) {
5855 return tryCompleteComputation(ex);
5856 }
5857 return false;
5858 }
5859 public final U getRawResult() { return result; }
5860 }
5861
5862 @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U>
5863 extends BulkTask<K,V,U> {
5864 final BiFun<? super K, ? super V, ? extends U> transformer;
5865 final BiFun<? super U, ? super U, ? extends U> reducer;
5866 U result;
5867 MapReduceMappingsTask<K,V,U> rights, nextRight;
5868 MapReduceMappingsTask
5869 (ConcurrentHashMapV8<K,V> m,
5870 BiFun<? super K, ? super V, ? extends U> transformer,
5871 BiFun<? super U, ? super U, ? extends U> reducer) {
5872 super(m);
5873 this.transformer = transformer;
5874 this.reducer = reducer;
5875 }
5876 MapReduceMappingsTask
5877 (BulkTask<K,V,?> p, int b,
5878 MapReduceMappingsTask<K,V,U> nextRight,
5879 BiFun<? super K, ? super V, ? extends U> transformer,
5880 BiFun<? super U, ? super U, ? extends U> reducer) {
5881 super(p, b); this.nextRight = nextRight;
5882 this.transformer = transformer;
5883 this.reducer = reducer;
5884 }
5885 @SuppressWarnings("unchecked") public final boolean exec() {
5886 final BiFun<? super K, ? super V, ? extends U> transformer =
5887 this.transformer;
5888 final BiFun<? super U, ? super U, ? extends U> reducer =
5889 this.reducer;
5890 if (transformer == null || reducer == null)
5891 return abortOnNullFunction();
5892 try {
5893 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5894 do {} while (!casPending(c = pending, c+1));
5895 (rights = new MapReduceMappingsTask<K,V,U>
5896 (this, b >>>= 1, rights, transformer, reducer)).fork();
5897 }
5898 U r = null, u;
5899 Object v;
5900 while ((v = advance()) != null) {
5901 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5902 r = (r == null) ? u : reducer.apply(r, u);
5903 }
5904 result = r;
5905 for (MapReduceMappingsTask<K,V,U> t = this, s;;) {
5906 int c; BulkTask<K,V,?> par; U tr, sr;
5907 if ((c = t.pending) == 0) {
5908 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5909 if ((sr = s.result) != null)
5910 t.result = (tr = t.result) == null? sr : reducer.apply(tr, sr);
5911 }
5912 if ((par = t.parent) == null ||
5913 !(par instanceof MapReduceMappingsTask)) {
5914 t.quietlyComplete();
5915 break;
5916 }
5917 t = (MapReduceMappingsTask<K,V,U>)par;
5918 }
5919 else if (t.casPending(c, c - 1))
5920 break;
5921 }
5922 } catch (Throwable ex) {
5923 return tryCompleteComputation(ex);
5924 }
5925 return false;
5926 }
5927 public final U getRawResult() { return result; }
5928 }
5929
5930 @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V>
5931 extends BulkTask<K,V,Double> {
5932 final ObjectToDouble<? super K> transformer;
5933 final DoubleByDoubleToDouble reducer;
5934 final double basis;
5935 double result;
5936 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5937 MapReduceKeysToDoubleTask
5938 (ConcurrentHashMapV8<K,V> m,
5939 ObjectToDouble<? super K> transformer,
5940 double basis,
5941 DoubleByDoubleToDouble reducer) {
5942 super(m);
5943 this.transformer = transformer;
5944 this.basis = basis; this.reducer = reducer;
5945 }
5946 MapReduceKeysToDoubleTask
5947 (BulkTask<K,V,?> p, int b,
5948 MapReduceKeysToDoubleTask<K,V> nextRight,
5949 ObjectToDouble<? super K> transformer,
5950 double basis,
5951 DoubleByDoubleToDouble reducer) {
5952 super(p, b); this.nextRight = nextRight;
5953 this.transformer = transformer;
5954 this.basis = basis; this.reducer = reducer;
5955 }
5956 @SuppressWarnings("unchecked") public final boolean exec() {
5957 final ObjectToDouble<? super K> transformer =
5958 this.transformer;
5959 final DoubleByDoubleToDouble reducer = this.reducer;
5960 if (transformer == null || reducer == null)
5961 return abortOnNullFunction();
5962 try {
5963 final double id = this.basis;
5964 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5965 do {} while (!casPending(c = pending, c+1));
5966 (rights = new MapReduceKeysToDoubleTask<K,V>
5967 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
5968 }
5969 double r = id;
5970 while (advance() != null)
5971 r = reducer.apply(r, transformer.apply((K)nextKey));
5972 result = r;
5973 for (MapReduceKeysToDoubleTask<K,V> t = this, s;;) {
5974 int c; BulkTask<K,V,?> par;
5975 if ((c = t.pending) == 0) {
5976 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5977 t.result = reducer.apply(t.result, s.result);
5978 }
5979 if ((par = t.parent) == null ||
5980 !(par instanceof MapReduceKeysToDoubleTask)) {
5981 t.quietlyComplete();
5982 break;
5983 }
5984 t = (MapReduceKeysToDoubleTask<K,V>)par;
5985 }
5986 else if (t.casPending(c, c - 1))
5987 break;
5988 }
5989 } catch (Throwable ex) {
5990 return tryCompleteComputation(ex);
5991 }
5992 return false;
5993 }
5994 public final Double getRawResult() { return result; }
5995 }
5996
5997 @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V>
5998 extends BulkTask<K,V,Double> {
5999 final ObjectToDouble<? super V> transformer;
6000 final DoubleByDoubleToDouble reducer;
6001 final double basis;
6002 double result;
6003 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
6004 MapReduceValuesToDoubleTask
6005 (ConcurrentHashMapV8<K,V> m,
6006 ObjectToDouble<? super V> transformer,
6007 double basis,
6008 DoubleByDoubleToDouble reducer) {
6009 super(m);
6010 this.transformer = transformer;
6011 this.basis = basis; this.reducer = reducer;
6012 }
6013 MapReduceValuesToDoubleTask
6014 (BulkTask<K,V,?> p, int b,
6015 MapReduceValuesToDoubleTask<K,V> nextRight,
6016 ObjectToDouble<? super V> transformer,
6017 double basis,
6018 DoubleByDoubleToDouble reducer) {
6019 super(p, b); this.nextRight = nextRight;
6020 this.transformer = transformer;
6021 this.basis = basis; this.reducer = reducer;
6022 }
6023 @SuppressWarnings("unchecked") public final boolean exec() {
6024 final ObjectToDouble<? super V> transformer =
6025 this.transformer;
6026 final DoubleByDoubleToDouble reducer = this.reducer;
6027 if (transformer == null || reducer == null)
6028 return abortOnNullFunction();
6029 try {
6030 final double id = this.basis;
6031 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6032 do {} while (!casPending(c = pending, c+1));
6033 (rights = new MapReduceValuesToDoubleTask<K,V>
6034 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6035 }
6036 double r = id;
6037 Object v;
6038 while ((v = advance()) != null)
6039 r = reducer.apply(r, transformer.apply((V)v));
6040 result = r;
6041 for (MapReduceValuesToDoubleTask<K,V> t = this, s;;) {
6042 int c; BulkTask<K,V,?> par;
6043 if ((c = t.pending) == 0) {
6044 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6045 t.result = reducer.apply(t.result, s.result);
6046 }
6047 if ((par = t.parent) == null ||
6048 !(par instanceof MapReduceValuesToDoubleTask)) {
6049 t.quietlyComplete();
6050 break;
6051 }
6052 t = (MapReduceValuesToDoubleTask<K,V>)par;
6053 }
6054 else if (t.casPending(c, c - 1))
6055 break;
6056 }
6057 } catch (Throwable ex) {
6058 return tryCompleteComputation(ex);
6059 }
6060 return false;
6061 }
6062 public final Double getRawResult() { return result; }
6063 }
6064
6065 @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V>
6066 extends BulkTask<K,V,Double> {
6067 final ObjectToDouble<Map.Entry<K,V>> transformer;
6068 final DoubleByDoubleToDouble reducer;
6069 final double basis;
6070 double result;
6071 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
6072 MapReduceEntriesToDoubleTask
6073 (ConcurrentHashMapV8<K,V> m,
6074 ObjectToDouble<Map.Entry<K,V>> transformer,
6075 double basis,
6076 DoubleByDoubleToDouble reducer) {
6077 super(m);
6078 this.transformer = transformer;
6079 this.basis = basis; this.reducer = reducer;
6080 }
6081 MapReduceEntriesToDoubleTask
6082 (BulkTask<K,V,?> p, int b,
6083 MapReduceEntriesToDoubleTask<K,V> nextRight,
6084 ObjectToDouble<Map.Entry<K,V>> transformer,
6085 double basis,
6086 DoubleByDoubleToDouble reducer) {
6087 super(p, b); this.nextRight = nextRight;
6088 this.transformer = transformer;
6089 this.basis = basis; this.reducer = reducer;
6090 }
6091 @SuppressWarnings("unchecked") public final boolean exec() {
6092 final ObjectToDouble<Map.Entry<K,V>> transformer =
6093 this.transformer;
6094 final DoubleByDoubleToDouble reducer = this.reducer;
6095 if (transformer == null || reducer == null)
6096 return abortOnNullFunction();
6097 try {
6098 final double id = this.basis;
6099 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6100 do {} while (!casPending(c = pending, c+1));
6101 (rights = new MapReduceEntriesToDoubleTask<K,V>
6102 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6103 }
6104 double r = id;
6105 Object v;
6106 while ((v = advance()) != null)
6107 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6108 result = r;
6109 for (MapReduceEntriesToDoubleTask<K,V> t = this, s;;) {
6110 int c; BulkTask<K,V,?> par;
6111 if ((c = t.pending) == 0) {
6112 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6113 t.result = reducer.apply(t.result, s.result);
6114 }
6115 if ((par = t.parent) == null ||
6116 !(par instanceof MapReduceEntriesToDoubleTask)) {
6117 t.quietlyComplete();
6118 break;
6119 }
6120 t = (MapReduceEntriesToDoubleTask<K,V>)par;
6121 }
6122 else if (t.casPending(c, c - 1))
6123 break;
6124 }
6125 } catch (Throwable ex) {
6126 return tryCompleteComputation(ex);
6127 }
6128 return false;
6129 }
6130 public final Double getRawResult() { return result; }
6131 }
6132
6133 @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V>
6134 extends BulkTask<K,V,Double> {
6135 final ObjectByObjectToDouble<? super K, ? super V> transformer;
6136 final DoubleByDoubleToDouble reducer;
6137 final double basis;
6138 double result;
6139 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
6140 MapReduceMappingsToDoubleTask
6141 (ConcurrentHashMapV8<K,V> m,
6142 ObjectByObjectToDouble<? super K, ? super V> transformer,
6143 double basis,
6144 DoubleByDoubleToDouble reducer) {
6145 super(m);
6146 this.transformer = transformer;
6147 this.basis = basis; this.reducer = reducer;
6148 }
6149 MapReduceMappingsToDoubleTask
6150 (BulkTask<K,V,?> p, int b,
6151 MapReduceMappingsToDoubleTask<K,V> nextRight,
6152 ObjectByObjectToDouble<? super K, ? super V> transformer,
6153 double basis,
6154 DoubleByDoubleToDouble reducer) {
6155 super(p, b); this.nextRight = nextRight;
6156 this.transformer = transformer;
6157 this.basis = basis; this.reducer = reducer;
6158 }
6159 @SuppressWarnings("unchecked") public final boolean exec() {
6160 final ObjectByObjectToDouble<? super K, ? super V> transformer =
6161 this.transformer;
6162 final DoubleByDoubleToDouble reducer = this.reducer;
6163 if (transformer == null || reducer == null)
6164 return abortOnNullFunction();
6165 try {
6166 final double id = this.basis;
6167 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6168 do {} while (!casPending(c = pending, c+1));
6169 (rights = new MapReduceMappingsToDoubleTask<K,V>
6170 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6171 }
6172 double r = id;
6173 Object v;
6174 while ((v = advance()) != null)
6175 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6176 result = r;
6177 for (MapReduceMappingsToDoubleTask<K,V> t = this, s;;) {
6178 int c; BulkTask<K,V,?> par;
6179 if ((c = t.pending) == 0) {
6180 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6181 t.result = reducer.apply(t.result, s.result);
6182 }
6183 if ((par = t.parent) == null ||
6184 !(par instanceof MapReduceMappingsToDoubleTask)) {
6185 t.quietlyComplete();
6186 break;
6187 }
6188 t = (MapReduceMappingsToDoubleTask<K,V>)par;
6189 }
6190 else if (t.casPending(c, c - 1))
6191 break;
6192 }
6193 } catch (Throwable ex) {
6194 return tryCompleteComputation(ex);
6195 }
6196 return false;
6197 }
6198 public final Double getRawResult() { return result; }
6199 }
6200
6201 @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V>
6202 extends BulkTask<K,V,Long> {
6203 final ObjectToLong<? super K> transformer;
6204 final LongByLongToLong reducer;
6205 final long basis;
6206 long result;
6207 MapReduceKeysToLongTask<K,V> rights, nextRight;
6208 MapReduceKeysToLongTask
6209 (ConcurrentHashMapV8<K,V> m,
6210 ObjectToLong<? super K> transformer,
6211 long basis,
6212 LongByLongToLong reducer) {
6213 super(m);
6214 this.transformer = transformer;
6215 this.basis = basis; this.reducer = reducer;
6216 }
6217 MapReduceKeysToLongTask
6218 (BulkTask<K,V,?> p, int b,
6219 MapReduceKeysToLongTask<K,V> nextRight,
6220 ObjectToLong<? super K> transformer,
6221 long basis,
6222 LongByLongToLong reducer) {
6223 super(p, b); this.nextRight = nextRight;
6224 this.transformer = transformer;
6225 this.basis = basis; this.reducer = reducer;
6226 }
6227 @SuppressWarnings("unchecked") public final boolean exec() {
6228 final ObjectToLong<? super K> transformer =
6229 this.transformer;
6230 final LongByLongToLong reducer = this.reducer;
6231 if (transformer == null || reducer == null)
6232 return abortOnNullFunction();
6233 try {
6234 final long id = this.basis;
6235 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6236 do {} while (!casPending(c = pending, c+1));
6237 (rights = new MapReduceKeysToLongTask<K,V>
6238 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6239 }
6240 long r = id;
6241 while (advance() != null)
6242 r = reducer.apply(r, transformer.apply((K)nextKey));
6243 result = r;
6244 for (MapReduceKeysToLongTask<K,V> t = this, s;;) {
6245 int c; BulkTask<K,V,?> par;
6246 if ((c = t.pending) == 0) {
6247 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6248 t.result = reducer.apply(t.result, s.result);
6249 }
6250 if ((par = t.parent) == null ||
6251 !(par instanceof MapReduceKeysToLongTask)) {
6252 t.quietlyComplete();
6253 break;
6254 }
6255 t = (MapReduceKeysToLongTask<K,V>)par;
6256 }
6257 else if (t.casPending(c, c - 1))
6258 break;
6259 }
6260 } catch (Throwable ex) {
6261 return tryCompleteComputation(ex);
6262 }
6263 return false;
6264 }
6265 public final Long getRawResult() { return result; }
6266 }
6267
6268 @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V>
6269 extends BulkTask<K,V,Long> {
6270 final ObjectToLong<? super V> transformer;
6271 final LongByLongToLong reducer;
6272 final long basis;
6273 long result;
6274 MapReduceValuesToLongTask<K,V> rights, nextRight;
6275 MapReduceValuesToLongTask
6276 (ConcurrentHashMapV8<K,V> m,
6277 ObjectToLong<? super V> transformer,
6278 long basis,
6279 LongByLongToLong reducer) {
6280 super(m);
6281 this.transformer = transformer;
6282 this.basis = basis; this.reducer = reducer;
6283 }
6284 MapReduceValuesToLongTask
6285 (BulkTask<K,V,?> p, int b,
6286 MapReduceValuesToLongTask<K,V> nextRight,
6287 ObjectToLong<? super V> transformer,
6288 long basis,
6289 LongByLongToLong reducer) {
6290 super(p, b); this.nextRight = nextRight;
6291 this.transformer = transformer;
6292 this.basis = basis; this.reducer = reducer;
6293 }
6294 @SuppressWarnings("unchecked") public final boolean exec() {
6295 final ObjectToLong<? super V> transformer =
6296 this.transformer;
6297 final LongByLongToLong reducer = this.reducer;
6298 if (transformer == null || reducer == null)
6299 return abortOnNullFunction();
6300 try {
6301 final long id = this.basis;
6302 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6303 do {} while (!casPending(c = pending, c+1));
6304 (rights = new MapReduceValuesToLongTask<K,V>
6305 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6306 }
6307 long r = id;
6308 Object v;
6309 while ((v = advance()) != null)
6310 r = reducer.apply(r, transformer.apply((V)v));
6311 result = r;
6312 for (MapReduceValuesToLongTask<K,V> t = this, s;;) {
6313 int c; BulkTask<K,V,?> par;
6314 if ((c = t.pending) == 0) {
6315 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6316 t.result = reducer.apply(t.result, s.result);
6317 }
6318 if ((par = t.parent) == null ||
6319 !(par instanceof MapReduceValuesToLongTask)) {
6320 t.quietlyComplete();
6321 break;
6322 }
6323 t = (MapReduceValuesToLongTask<K,V>)par;
6324 }
6325 else if (t.casPending(c, c - 1))
6326 break;
6327 }
6328 } catch (Throwable ex) {
6329 return tryCompleteComputation(ex);
6330 }
6331 return false;
6332 }
6333 public final Long getRawResult() { return result; }
6334 }
6335
6336 @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V>
6337 extends BulkTask<K,V,Long> {
6338 final ObjectToLong<Map.Entry<K,V>> transformer;
6339 final LongByLongToLong reducer;
6340 final long basis;
6341 long result;
6342 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6343 MapReduceEntriesToLongTask
6344 (ConcurrentHashMapV8<K,V> m,
6345 ObjectToLong<Map.Entry<K,V>> transformer,
6346 long basis,
6347 LongByLongToLong reducer) {
6348 super(m);
6349 this.transformer = transformer;
6350 this.basis = basis; this.reducer = reducer;
6351 }
6352 MapReduceEntriesToLongTask
6353 (BulkTask<K,V,?> p, int b,
6354 MapReduceEntriesToLongTask<K,V> nextRight,
6355 ObjectToLong<Map.Entry<K,V>> transformer,
6356 long basis,
6357 LongByLongToLong reducer) {
6358 super(p, b); this.nextRight = nextRight;
6359 this.transformer = transformer;
6360 this.basis = basis; this.reducer = reducer;
6361 }
6362 @SuppressWarnings("unchecked") public final boolean exec() {
6363 final ObjectToLong<Map.Entry<K,V>> transformer =
6364 this.transformer;
6365 final LongByLongToLong reducer = this.reducer;
6366 if (transformer == null || reducer == null)
6367 return abortOnNullFunction();
6368 try {
6369 final long id = this.basis;
6370 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6371 do {} while (!casPending(c = pending, c+1));
6372 (rights = new MapReduceEntriesToLongTask<K,V>
6373 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6374 }
6375 long r = id;
6376 Object v;
6377 while ((v = advance()) != null)
6378 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6379 result = r;
6380 for (MapReduceEntriesToLongTask<K,V> t = this, s;;) {
6381 int c; BulkTask<K,V,?> par;
6382 if ((c = t.pending) == 0) {
6383 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6384 t.result = reducer.apply(t.result, s.result);
6385 }
6386 if ((par = t.parent) == null ||
6387 !(par instanceof MapReduceEntriesToLongTask)) {
6388 t.quietlyComplete();
6389 break;
6390 }
6391 t = (MapReduceEntriesToLongTask<K,V>)par;
6392 }
6393 else if (t.casPending(c, c - 1))
6394 break;
6395 }
6396 } catch (Throwable ex) {
6397 return tryCompleteComputation(ex);
6398 }
6399 return false;
6400 }
6401 public final Long getRawResult() { return result; }
6402 }
6403
6404 @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V>
6405 extends BulkTask<K,V,Long> {
6406 final ObjectByObjectToLong<? super K, ? super V> transformer;
6407 final LongByLongToLong reducer;
6408 final long basis;
6409 long result;
6410 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6411 MapReduceMappingsToLongTask
6412 (ConcurrentHashMapV8<K,V> m,
6413 ObjectByObjectToLong<? super K, ? super V> transformer,
6414 long basis,
6415 LongByLongToLong reducer) {
6416 super(m);
6417 this.transformer = transformer;
6418 this.basis = basis; this.reducer = reducer;
6419 }
6420 MapReduceMappingsToLongTask
6421 (BulkTask<K,V,?> p, int b,
6422 MapReduceMappingsToLongTask<K,V> nextRight,
6423 ObjectByObjectToLong<? super K, ? super V> transformer,
6424 long basis,
6425 LongByLongToLong reducer) {
6426 super(p, b); this.nextRight = nextRight;
6427 this.transformer = transformer;
6428 this.basis = basis; this.reducer = reducer;
6429 }
6430 @SuppressWarnings("unchecked") public final boolean exec() {
6431 final ObjectByObjectToLong<? super K, ? super V> transformer =
6432 this.transformer;
6433 final LongByLongToLong reducer = this.reducer;
6434 if (transformer == null || reducer == null)
6435 return abortOnNullFunction();
6436 try {
6437 final long id = this.basis;
6438 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6439 do {} while (!casPending(c = pending, c+1));
6440 (rights = new MapReduceMappingsToLongTask<K,V>
6441 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6442 }
6443 long r = id;
6444 Object v;
6445 while ((v = advance()) != null)
6446 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6447 result = r;
6448 for (MapReduceMappingsToLongTask<K,V> t = this, s;;) {
6449 int c; BulkTask<K,V,?> par;
6450 if ((c = t.pending) == 0) {
6451 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6452 t.result = reducer.apply(t.result, s.result);
6453 }
6454 if ((par = t.parent) == null ||
6455 !(par instanceof MapReduceMappingsToLongTask)) {
6456 t.quietlyComplete();
6457 break;
6458 }
6459 t = (MapReduceMappingsToLongTask<K,V>)par;
6460 }
6461 else if (t.casPending(c, c - 1))
6462 break;
6463 }
6464 } catch (Throwable ex) {
6465 return tryCompleteComputation(ex);
6466 }
6467 return false;
6468 }
6469 public final Long getRawResult() { return result; }
6470 }
6471
6472 @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V>
6473 extends BulkTask<K,V,Integer> {
6474 final ObjectToInt<? super K> transformer;
6475 final IntByIntToInt reducer;
6476 final int basis;
6477 int result;
6478 MapReduceKeysToIntTask<K,V> rights, nextRight;
6479 MapReduceKeysToIntTask
6480 (ConcurrentHashMapV8<K,V> m,
6481 ObjectToInt<? super K> transformer,
6482 int basis,
6483 IntByIntToInt reducer) {
6484 super(m);
6485 this.transformer = transformer;
6486 this.basis = basis; this.reducer = reducer;
6487 }
6488 MapReduceKeysToIntTask
6489 (BulkTask<K,V,?> p, int b,
6490 MapReduceKeysToIntTask<K,V> nextRight,
6491 ObjectToInt<? super K> transformer,
6492 int basis,
6493 IntByIntToInt reducer) {
6494 super(p, b); this.nextRight = nextRight;
6495 this.transformer = transformer;
6496 this.basis = basis; this.reducer = reducer;
6497 }
6498 @SuppressWarnings("unchecked") public final boolean exec() {
6499 final ObjectToInt<? super K> transformer =
6500 this.transformer;
6501 final IntByIntToInt reducer = this.reducer;
6502 if (transformer == null || reducer == null)
6503 return abortOnNullFunction();
6504 try {
6505 final int id = this.basis;
6506 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6507 do {} while (!casPending(c = pending, c+1));
6508 (rights = new MapReduceKeysToIntTask<K,V>
6509 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6510 }
6511 int r = id;
6512 while (advance() != null)
6513 r = reducer.apply(r, transformer.apply((K)nextKey));
6514 result = r;
6515 for (MapReduceKeysToIntTask<K,V> t = this, s;;) {
6516 int c; BulkTask<K,V,?> par;
6517 if ((c = t.pending) == 0) {
6518 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6519 t.result = reducer.apply(t.result, s.result);
6520 }
6521 if ((par = t.parent) == null ||
6522 !(par instanceof MapReduceKeysToIntTask)) {
6523 t.quietlyComplete();
6524 break;
6525 }
6526 t = (MapReduceKeysToIntTask<K,V>)par;
6527 }
6528 else if (t.casPending(c, c - 1))
6529 break;
6530 }
6531 } catch (Throwable ex) {
6532 return tryCompleteComputation(ex);
6533 }
6534 return false;
6535 }
6536 public final Integer getRawResult() { return result; }
6537 }
6538
6539 @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V>
6540 extends BulkTask<K,V,Integer> {
6541 final ObjectToInt<? super V> transformer;
6542 final IntByIntToInt reducer;
6543 final int basis;
6544 int result;
6545 MapReduceValuesToIntTask<K,V> rights, nextRight;
6546 MapReduceValuesToIntTask
6547 (ConcurrentHashMapV8<K,V> m,
6548 ObjectToInt<? super V> transformer,
6549 int basis,
6550 IntByIntToInt reducer) {
6551 super(m);
6552 this.transformer = transformer;
6553 this.basis = basis; this.reducer = reducer;
6554 }
6555 MapReduceValuesToIntTask
6556 (BulkTask<K,V,?> p, int b,
6557 MapReduceValuesToIntTask<K,V> nextRight,
6558 ObjectToInt<? super V> transformer,
6559 int basis,
6560 IntByIntToInt reducer) {
6561 super(p, b); this.nextRight = nextRight;
6562 this.transformer = transformer;
6563 this.basis = basis; this.reducer = reducer;
6564 }
6565 @SuppressWarnings("unchecked") public final boolean exec() {
6566 final ObjectToInt<? super V> transformer =
6567 this.transformer;
6568 final IntByIntToInt reducer = this.reducer;
6569 if (transformer == null || reducer == null)
6570 return abortOnNullFunction();
6571 try {
6572 final int id = this.basis;
6573 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6574 do {} while (!casPending(c = pending, c+1));
6575 (rights = new MapReduceValuesToIntTask<K,V>
6576 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6577 }
6578 int r = id;
6579 Object v;
6580 while ((v = advance()) != null)
6581 r = reducer.apply(r, transformer.apply((V)v));
6582 result = r;
6583 for (MapReduceValuesToIntTask<K,V> t = this, s;;) {
6584 int c; BulkTask<K,V,?> par;
6585 if ((c = t.pending) == 0) {
6586 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6587 t.result = reducer.apply(t.result, s.result);
6588 }
6589 if ((par = t.parent) == null ||
6590 !(par instanceof MapReduceValuesToIntTask)) {
6591 t.quietlyComplete();
6592 break;
6593 }
6594 t = (MapReduceValuesToIntTask<K,V>)par;
6595 }
6596 else if (t.casPending(c, c - 1))
6597 break;
6598 }
6599 } catch (Throwable ex) {
6600 return tryCompleteComputation(ex);
6601 }
6602 return false;
6603 }
6604 public final Integer getRawResult() { return result; }
6605 }
6606
6607 @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V>
6608 extends BulkTask<K,V,Integer> {
6609 final ObjectToInt<Map.Entry<K,V>> transformer;
6610 final IntByIntToInt reducer;
6611 final int basis;
6612 int result;
6613 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6614 MapReduceEntriesToIntTask
6615 (ConcurrentHashMapV8<K,V> m,
6616 ObjectToInt<Map.Entry<K,V>> transformer,
6617 int basis,
6618 IntByIntToInt reducer) {
6619 super(m);
6620 this.transformer = transformer;
6621 this.basis = basis; this.reducer = reducer;
6622 }
6623 MapReduceEntriesToIntTask
6624 (BulkTask<K,V,?> p, int b,
6625 MapReduceEntriesToIntTask<K,V> nextRight,
6626 ObjectToInt<Map.Entry<K,V>> transformer,
6627 int basis,
6628 IntByIntToInt reducer) {
6629 super(p, b); this.nextRight = nextRight;
6630 this.transformer = transformer;
6631 this.basis = basis; this.reducer = reducer;
6632 }
6633 @SuppressWarnings("unchecked") public final boolean exec() {
6634 final ObjectToInt<Map.Entry<K,V>> transformer =
6635 this.transformer;
6636 final IntByIntToInt reducer = this.reducer;
6637 if (transformer == null || reducer == null)
6638 return abortOnNullFunction();
6639 try {
6640 final int id = this.basis;
6641 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6642 do {} while (!casPending(c = pending, c+1));
6643 (rights = new MapReduceEntriesToIntTask<K,V>
6644 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6645 }
6646 int r = id;
6647 Object v;
6648 while ((v = advance()) != null)
6649 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6650 result = r;
6651 for (MapReduceEntriesToIntTask<K,V> t = this, s;;) {
6652 int c; BulkTask<K,V,?> par;
6653 if ((c = t.pending) == 0) {
6654 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6655 t.result = reducer.apply(t.result, s.result);
6656 }
6657 if ((par = t.parent) == null ||
6658 !(par instanceof MapReduceEntriesToIntTask)) {
6659 t.quietlyComplete();
6660 break;
6661 }
6662 t = (MapReduceEntriesToIntTask<K,V>)par;
6663 }
6664 else if (t.casPending(c, c - 1))
6665 break;
6666 }
6667 } catch (Throwable ex) {
6668 return tryCompleteComputation(ex);
6669 }
6670 return false;
6671 }
6672 public final Integer getRawResult() { return result; }
6673 }
6674
6675 @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V>
6676 extends BulkTask<K,V,Integer> {
6677 final ObjectByObjectToInt<? super K, ? super V> transformer;
6678 final IntByIntToInt reducer;
6679 final int basis;
6680 int result;
6681 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6682 MapReduceMappingsToIntTask
6683 (ConcurrentHashMapV8<K,V> m,
6684 ObjectByObjectToInt<? super K, ? super V> transformer,
6685 int basis,
6686 IntByIntToInt reducer) {
6687 super(m);
6688 this.transformer = transformer;
6689 this.basis = basis; this.reducer = reducer;
6690 }
6691 MapReduceMappingsToIntTask
6692 (BulkTask<K,V,?> p, int b,
6693 MapReduceMappingsToIntTask<K,V> nextRight,
6694 ObjectByObjectToInt<? super K, ? super V> transformer,
6695 int basis,
6696 IntByIntToInt reducer) {
6697 super(p, b); this.nextRight = nextRight;
6698 this.transformer = transformer;
6699 this.basis = basis; this.reducer = reducer;
6700 }
6701 @SuppressWarnings("unchecked") public final boolean exec() {
6702 final ObjectByObjectToInt<? super K, ? super V> transformer =
6703 this.transformer;
6704 final IntByIntToInt reducer = this.reducer;
6705 if (transformer == null || reducer == null)
6706 return abortOnNullFunction();
6707 try {
6708 final int id = this.basis;
6709 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6710 do {} while (!casPending(c = pending, c+1));
6711 (rights = new MapReduceMappingsToIntTask<K,V>
6712 (this, b >>>= 1, rights, transformer, id, reducer)).fork();
6713 }
6714 int r = id;
6715 Object v;
6716 while ((v = advance()) != null)
6717 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6718 result = r;
6719 for (MapReduceMappingsToIntTask<K,V> t = this, s;;) {
6720 int c; BulkTask<K,V,?> par;
6721 if ((c = t.pending) == 0) {
6722 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6723 t.result = reducer.apply(t.result, s.result);
6724 }
6725 if ((par = t.parent) == null ||
6726 !(par instanceof MapReduceMappingsToIntTask)) {
6727 t.quietlyComplete();
6728 break;
6729 }
6730 t = (MapReduceMappingsToIntTask<K,V>)par;
6731 }
6732 else if (t.casPending(c, c - 1))
6733 break;
6734 }
6735 } catch (Throwable ex) {
6736 return tryCompleteComputation(ex);
6737 }
6738 return false;
6739 }
6740 public final Integer getRawResult() { return result; }
6741 }
6742
6743
6744 // Unsafe mechanics
6745 private static final sun.misc.Unsafe UNSAFE;
6746 private static final long counterOffset;
6747 private static final long sizeCtlOffset;
6748 private static final long ABASE;
6749 private static final int ASHIFT;
6750
6751 static {
6752 int ss;
6753 try {
6754 UNSAFE = getUnsafe();
6755 Class<?> k = ConcurrentHashMapV8.class;
6756 counterOffset = UNSAFE.objectFieldOffset
6757 (k.getDeclaredField("counter"));
6758 sizeCtlOffset = UNSAFE.objectFieldOffset
6759 (k.getDeclaredField("sizeCtl"));
6760 Class<?> sc = Node[].class;
6761 ABASE = UNSAFE.arrayBaseOffset(sc);
6762 ss = UNSAFE.arrayIndexScale(sc);
6763 } catch (Exception e) {
6764 throw new Error(e);
6765 }
6766 if ((ss & (ss-1)) != 0)
6767 throw new Error("data type scale not a power of two");
6768 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
6769 }
6770
6771 /**
6772 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6773 * Replace with a simple call to Unsafe.getUnsafe when integrating
6774 * into a jdk.
6775 *
6776 * @return a sun.misc.Unsafe
6777 */
6778 private static sun.misc.Unsafe getUnsafe() {
6779 try {
6780 return sun.misc.Unsafe.getUnsafe();
6781 } catch (SecurityException se) {
6782 try {
6783 return java.security.AccessController.doPrivileged
6784 (new java.security
6785 .PrivilegedExceptionAction<sun.misc.Unsafe>() {
6786 public sun.misc.Unsafe run() throws Exception {
6787 java.lang.reflect.Field f = sun.misc
6788 .Unsafe.class.getDeclaredField("theUnsafe");
6789 f.setAccessible(true);
6790 return (sun.misc.Unsafe) f.get(null);
6791 }});
6792 } catch (java.security.PrivilegedActionException e) {
6793 throw new RuntimeException("Could not initialize intrinsics",
6794 e.getCause());
6795 }
6796 }
6797 }
6798 }