ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.58
Committed: Tue Aug 14 05:55:08 2012 UTC (11 years, 8 months ago) by jsr166
Branch: MAIN
Changes since 1.57: +1 -0 lines
Log Message:
whitespace

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8 import jsr166e.LongAdder;
9 import jsr166e.ForkJoinPool;
10 import jsr166e.ForkJoinTask;
11
12 import java.util.Comparator;
13 import java.util.Arrays;
14 import java.util.Map;
15 import java.util.Set;
16 import java.util.Collection;
17 import java.util.AbstractMap;
18 import java.util.AbstractSet;
19 import java.util.AbstractCollection;
20 import java.util.Hashtable;
21 import java.util.HashMap;
22 import java.util.Iterator;
23 import java.util.Enumeration;
24 import java.util.ConcurrentModificationException;
25 import java.util.NoSuchElementException;
26 import java.util.concurrent.ConcurrentMap;
27 import java.util.concurrent.ThreadLocalRandom;
28 import java.util.concurrent.locks.LockSupport;
29 import java.util.concurrent.locks.AbstractQueuedSynchronizer;
30 import java.util.concurrent.atomic.AtomicReference;
31
32 import java.io.Serializable;
33
34 /**
35 * A hash table supporting full concurrency of retrievals and
36 * high expected concurrency for updates. This class obeys the
37 * same functional specification as {@link java.util.Hashtable}, and
38 * includes versions of methods corresponding to each method of
39 * {@code Hashtable}. However, even though all operations are
40 * thread-safe, retrieval operations do <em>not</em> entail locking,
41 * and there is <em>not</em> any support for locking the entire table
42 * in a way that prevents all access. This class is fully
43 * interoperable with {@code Hashtable} in programs that rely on its
44 * thread safety but not on its synchronization details.
45 *
46 * <p> Retrieval operations (including {@code get}) generally do not
47 * block, so may overlap with update operations (including {@code put}
48 * and {@code remove}). Retrievals reflect the results of the most
49 * recently <em>completed</em> update operations holding upon their
50 * onset. For aggregate operations such as {@code putAll} and {@code
51 * clear}, concurrent retrievals may reflect insertion or removal of
52 * only some entries. Similarly, Iterators and Enumerations return
53 * elements reflecting the state of the hash table at some point at or
54 * since the creation of the iterator/enumeration. They do
55 * <em>not</em> throw {@link ConcurrentModificationException}.
56 * However, iterators are designed to be used by only one thread at a
57 * time. Bear in mind that the results of aggregate status methods
58 * including {@code size}, {@code isEmpty}, and {@code containsValue}
59 * are typically useful only when a map is not undergoing concurrent
60 * updates in other threads. Otherwise the results of these methods
61 * reflect transient states that may be adequate for monitoring
62 * or estimation purposes, but not for program control.
63 *
64 * <p> The table is dynamically expanded when there are too many
65 * collisions (i.e., keys that have distinct hash codes but fall into
66 * the same slot modulo the table size), with the expected average
67 * effect of maintaining roughly two bins per mapping (corresponding
68 * to a 0.75 load factor threshold for resizing). There may be much
69 * variance around this average as mappings are added and removed, but
70 * overall, this maintains a commonly accepted time/space tradeoff for
71 * hash tables. However, resizing this or any other kind of hash
72 * table may be a relatively slow operation. When possible, it is a
73 * good idea to provide a size estimate as an optional {@code
74 * initialCapacity} constructor argument. An additional optional
75 * {@code loadFactor} constructor argument provides a further means of
76 * customizing initial table capacity by specifying the table density
77 * to be used in calculating the amount of space to allocate for the
78 * given number of elements. Also, for compatibility with previous
79 * versions of this class, constructors may optionally specify an
80 * expected {@code concurrencyLevel} as an additional hint for
81 * internal sizing. Note that using many keys with exactly the same
82 * {@code hashCode()} is a sure way to slow down performance of any
83 * hash table.
84 *
85 * <p>This class and its views and iterators implement all of the
86 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
87 * interfaces.
88 *
89 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
90 * does <em>not</em> allow {@code null} to be used as a key or value.
91 *
92 * <p>This class is a member of the
93 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
94 * Java Collections Framework</a>.
95 *
96 * <p><em>jsr166e note: This class is a candidate replacement for
97 * java.util.concurrent.ConcurrentHashMap. During transition, this
98 * class declares and uses nested functional interfaces with different
99 * names but the same forms as those expected for JDK8.<em>
100 *
101 * @since 1.5
102 * @author Doug Lea
103 * @param <K> the type of keys maintained by this map
104 * @param <V> the type of mapped values
105 */
106 public class ConcurrentHashMapV8<K, V>
107 implements ConcurrentMap<K, V>, Serializable {
108 private static final long serialVersionUID = 7249069246763182397L;
109
110 /**
111 * A partitionable iterator. A Spliterator can be traversed
112 * directly, but can also be partitioned (before traversal) by
113 * creating another Spliterator that covers a non-overlapping
114 * portion of the elements, and so may be amenable to parallel
115 * execution.
116 *
117 * <p> This interface exports a subset of expected JDK8
118 * functionality.
119 *
120 * <p>Sample usage: Here is one (of the several) ways to compute
121 * the sum of the values held in a map using the ForkJoin
122 * framework. As illustrated here, Spliterators are well suited to
123 * designs in which a task repeatedly splits off half its work
124 * into forked subtasks until small enough to process directly,
125 * and then joins these subtasks. Variants of this style can also
126 * be used in completion-based designs.
127 *
128 * <pre>
129 * {@code ConcurrentHashMapV8<String, Long> m = ...
130 * // split as if have 8 * parallelism, for load balance
131 * int n = m.size();
132 * int p = aForkJoinPool.getParallelism() * 8;
133 * int split = (n < p)? n : p;
134 * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
135 * // ...
136 * static class SumValues extends RecursiveTask<Long> {
137 * final Spliterator<Long> s;
138 * final int split; // split while > 1
139 * final SumValues nextJoin; // records forked subtasks to join
140 * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
141 * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
142 * }
143 * public Long compute() {
144 * long sum = 0;
145 * SumValues subtasks = null; // fork subtasks
146 * for (int s = split >>> 1; s > 0; s >>>= 1)
147 * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
148 * while (s.hasNext()) // directly process remaining elements
149 * sum += s.next();
150 * for (SumValues t = subtasks; t != null; t = t.nextJoin)
151 * sum += t.join(); // collect subtask results
152 * return sum;
153 * }
154 * }
155 * }</pre>
156 */
157 public static interface Spliterator<T> extends Iterator<T> {
158 /**
159 * Returns a Spliterator covering approximately half of the
160 * elements, guaranteed not to overlap with those subsequently
161 * returned by this Spliterator. After invoking this method,
162 * the current Spliterator will <em>not</em> produce any of
163 * the elements of the returned Spliterator, but the two
164 * Spliterators together will produce all of the elements that
165 * would have been produced by this Spliterator had this
166 * method not been called. The exact number of elements
167 * produced by the returned Spliterator is not guaranteed, and
168 * may be zero (i.e., with {@code hasNext()} reporting {@code
169 * false}) if this Spliterator cannot be further split.
170 *
171 * @return a Spliterator covering approximately half of the
172 * elements
173 * @throws IllegalStateException if this Spliterator has
174 * already commenced traversing elements
175 */
176 Spliterator<T> split();
177 }
178
179 /*
180 * Overview:
181 *
182 * The primary design goal of this hash table is to maintain
183 * concurrent readability (typically method get(), but also
184 * iterators and related methods) while minimizing update
185 * contention. Secondary goals are to keep space consumption about
186 * the same or better than java.util.HashMap, and to support high
187 * initial insertion rates on an empty table by many threads.
188 *
189 * Each key-value mapping is held in a Node. Because Node fields
190 * can contain special values, they are defined using plain Object
191 * types. Similarly in turn, all internal methods that use them
192 * work off Object types. And similarly, so do the internal
193 * methods of auxiliary iterator and view classes. All public
194 * generic typed methods relay in/out of these internal methods,
195 * supplying null-checks and casts as needed. This also allows
196 * many of the public methods to be factored into a smaller number
197 * of internal methods (although sadly not so for the five
198 * variants of put-related operations). The validation-based
199 * approach explained below leads to a lot of code sprawl because
200 * retry-control precludes factoring into smaller methods.
201 *
202 * The table is lazily initialized to a power-of-two size upon the
203 * first insertion. Each bin in the table normally contains a
204 * list of Nodes (most often, the list has only zero or one Node).
205 * Table accesses require volatile/atomic reads, writes, and
206 * CASes. Because there is no other way to arrange this without
207 * adding further indirections, we use intrinsics
208 * (sun.misc.Unsafe) operations. The lists of nodes within bins
209 * are always accurately traversable under volatile reads, so long
210 * as lookups check hash code and non-nullness of value before
211 * checking key equality.
212 *
213 * We use the top two bits of Node hash fields for control
214 * purposes -- they are available anyway because of addressing
215 * constraints. As explained further below, these top bits are
216 * used as follows:
217 * 00 - Normal
218 * 01 - Locked
219 * 11 - Locked and may have a thread waiting for lock
220 * 10 - Node is a forwarding node
221 *
222 * The lower 30 bits of each Node's hash field contain a
223 * transformation of the key's hash code, except for forwarding
224 * nodes, for which the lower bits are zero (and so always have
225 * hash field == MOVED).
226 *
227 * Insertion (via put or its variants) of the first node in an
228 * empty bin is performed by just CASing it to the bin. This is
229 * by far the most common case for put operations under most
230 * key/hash distributions. Other update operations (insert,
231 * delete, and replace) require locks. We do not want to waste
232 * the space required to associate a distinct lock object with
233 * each bin, so instead use the first node of a bin list itself as
234 * a lock. Blocking support for these locks relies on the builtin
235 * "synchronized" monitors. However, we also need a tryLock
236 * construction, so we overlay these by using bits of the Node
237 * hash field for lock control (see above), and so normally use
238 * builtin monitors only for blocking and signalling using
239 * wait/notifyAll constructions. See Node.tryAwaitLock.
240 *
241 * Using the first node of a list as a lock does not by itself
242 * suffice though: When a node is locked, any update must first
243 * validate that it is still the first node after locking it, and
244 * retry if not. Because new nodes are always appended to lists,
245 * once a node is first in a bin, it remains first until deleted
246 * or the bin becomes invalidated (upon resizing). However,
247 * operations that only conditionally update may inspect nodes
248 * until the point of update. This is a converse of sorts to the
249 * lazy locking technique described by Herlihy & Shavit.
250 *
251 * The main disadvantage of per-bin locks is that other update
252 * operations on other nodes in a bin list protected by the same
253 * lock can stall, for example when user equals() or mapping
254 * functions take a long time. However, statistically, under
255 * random hash codes, this is not a common problem. Ideally, the
256 * frequency of nodes in bins follows a Poisson distribution
257 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
258 * parameter of about 0.5 on average, given the resizing threshold
259 * of 0.75, although with a large variance because of resizing
260 * granularity. Ignoring variance, the expected occurrences of
261 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
262 * first values are:
263 *
264 * 0: 0.60653066
265 * 1: 0.30326533
266 * 2: 0.07581633
267 * 3: 0.01263606
268 * 4: 0.00157952
269 * 5: 0.00015795
270 * 6: 0.00001316
271 * 7: 0.00000094
272 * 8: 0.00000006
273 * more: less than 1 in ten million
274 *
275 * Lock contention probability for two threads accessing distinct
276 * elements is roughly 1 / (8 * #elements) under random hashes.
277 *
278 * Actual hash code distributions encountered in practice
279 * sometimes deviate significantly from uniform randomness. This
280 * includes the case when N > (1<<30), so some keys MUST collide.
281 * Similarly for dumb or hostile usages in which multiple keys are
282 * designed to have identical hash codes. Also, although we guard
283 * against the worst effects of this (see method spread), sets of
284 * hashes may differ only in bits that do not impact their bin
285 * index for a given power-of-two mask. So we use a secondary
286 * strategy that applies when the number of nodes in a bin exceeds
287 * a threshold, and at least one of the keys implements
288 * Comparable. These TreeBins use a balanced tree to hold nodes
289 * (a specialized form of red-black trees), bounding search time
290 * to O(log N). Each search step in a TreeBin is around twice as
291 * slow as in a regular list, but given that N cannot exceed
292 * (1<<64) (before running out of addresses) this bounds search
293 * steps, lock hold times, etc, to reasonable constants (roughly
294 * 100 nodes inspected per operation worst case) so long as keys
295 * are Comparable (which is very common -- String, Long, etc).
296 * TreeBin nodes (TreeNodes) also maintain the same "next"
297 * traversal pointers as regular nodes, so can be traversed in
298 * iterators in the same way.
299 *
300 * The table is resized when occupancy exceeds a percentage
301 * threshold (nominally, 0.75, but see below). Only a single
302 * thread performs the resize (using field "sizeCtl", to arrange
303 * exclusion), but the table otherwise remains usable for reads
304 * and updates. Resizing proceeds by transferring bins, one by
305 * one, from the table to the next table. Because we are using
306 * power-of-two expansion, the elements from each bin must either
307 * stay at same index, or move with a power of two offset. We
308 * eliminate unnecessary node creation by catching cases where old
309 * nodes can be reused because their next fields won't change. On
310 * average, only about one-sixth of them need cloning when a table
311 * doubles. The nodes they replace will be garbage collectable as
312 * soon as they are no longer referenced by any reader thread that
313 * may be in the midst of concurrently traversing table. Upon
314 * transfer, the old table bin contains only a special forwarding
315 * node (with hash field "MOVED") that contains the next table as
316 * its key. On encountering a forwarding node, access and update
317 * operations restart, using the new table.
318 *
319 * Each bin transfer requires its bin lock. However, unlike other
320 * cases, a transfer can skip a bin if it fails to acquire its
321 * lock, and revisit it later (unless it is a TreeBin). Method
322 * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
323 * have been skipped because of failure to acquire a lock, and
324 * blocks only if none are available (i.e., only very rarely).
325 * The transfer operation must also ensure that all accessible
326 * bins in both the old and new table are usable by any traversal.
327 * When there are no lock acquisition failures, this is arranged
328 * simply by proceeding from the last bin (table.length - 1) up
329 * towards the first. Upon seeing a forwarding node, traversals
330 * (see class Iter) arrange to move to the new table
331 * without revisiting nodes. However, when any node is skipped
332 * during a transfer, all earlier table bins may have become
333 * visible, so are initialized with a reverse-forwarding node back
334 * to the old table until the new ones are established. (This
335 * sometimes requires transiently locking a forwarding node, which
336 * is possible under the above encoding.) These more expensive
337 * mechanics trigger only when necessary.
338 *
339 * The traversal scheme also applies to partial traversals of
340 * ranges of bins (via an alternate Traverser constructor)
341 * to support partitioned aggregate operations. Also, read-only
342 * operations give up if ever forwarded to a null table, which
343 * provides support for shutdown-style clearing, which is also not
344 * currently implemented.
345 *
346 * Lazy table initialization minimizes footprint until first use,
347 * and also avoids resizings when the first operation is from a
348 * putAll, constructor with map argument, or deserialization.
349 * These cases attempt to override the initial capacity settings,
350 * but harmlessly fail to take effect in cases of races.
351 *
352 * The element count is maintained using a LongAdder, which avoids
353 * contention on updates but can encounter cache thrashing if read
354 * too frequently during concurrent access. To avoid reading so
355 * often, resizing is attempted either when a bin lock is
356 * contended, or upon adding to a bin already holding two or more
357 * nodes (checked before adding in the xIfAbsent methods, after
358 * adding in others). Under uniform hash distributions, the
359 * probability of this occurring at threshold is around 13%,
360 * meaning that only about 1 in 8 puts check threshold (and after
361 * resizing, many fewer do so). But this approximation has high
362 * variance for small table sizes, so we check on any collision
363 * for sizes <= 64. The bulk putAll operation further reduces
364 * contention by only committing count updates upon these size
365 * checks.
366 *
367 * Maintaining API and serialization compatibility with previous
368 * versions of this class introduces several oddities. Mainly: We
369 * leave untouched but unused constructor arguments refering to
370 * concurrencyLevel. We accept a loadFactor constructor argument,
371 * but apply it only to initial table capacity (which is the only
372 * time that we can guarantee to honor it.) We also declare an
373 * unused "Segment" class that is instantiated in minimal form
374 * only when serializing.
375 */
376
377 /* ---------------- Constants -------------- */
378
379 /**
380 * The largest possible table capacity. This value must be
381 * exactly 1<<30 to stay within Java array allocation and indexing
382 * bounds for power of two table sizes, and is further required
383 * because the top two bits of 32bit hash fields are used for
384 * control purposes.
385 */
386 private static final int MAXIMUM_CAPACITY = 1 << 30;
387
388 /**
389 * The default initial table capacity. Must be a power of 2
390 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
391 */
392 private static final int DEFAULT_CAPACITY = 16;
393
394 /**
395 * The largest possible (non-power of two) array size.
396 * Needed by toArray and related methods.
397 */
398 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
399
400 /**
401 * The default concurrency level for this table. Unused but
402 * defined for compatibility with previous versions of this class.
403 */
404 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
405
406 /**
407 * The load factor for this table. Overrides of this value in
408 * constructors affect only the initial table capacity. The
409 * actual floating point value isn't normally used -- it is
410 * simpler to use expressions such as {@code n - (n >>> 2)} for
411 * the associated resizing threshold.
412 */
413 private static final float LOAD_FACTOR = 0.75f;
414
415 /**
416 * The buffer size for skipped bins during transfers. The
417 * value is arbitrary but should be large enough to avoid
418 * most locking stalls during resizes.
419 */
420 private static final int TRANSFER_BUFFER_SIZE = 32;
421
422 /**
423 * The bin count threshold for using a tree rather than list for a
424 * bin. The value reflects the approximate break-even point for
425 * using tree-based operations.
426 */
427 private static final int TREE_THRESHOLD = 8;
428
429 /*
430 * Encodings for special uses of Node hash fields. See above for
431 * explanation.
432 */
433 static final int MOVED = 0x80000000; // hash field for forwarding nodes
434 static final int LOCKED = 0x40000000; // set/tested only as a bit
435 static final int WAITING = 0xc0000000; // both bits set/tested together
436 static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
437
438 /* ---------------- Fields -------------- */
439
440 /**
441 * The array of bins. Lazily initialized upon first insertion.
442 * Size is always a power of two. Accessed directly by iterators.
443 */
444 transient volatile Node[] table;
445
446 /**
447 * The counter maintaining number of elements.
448 */
449 private transient final LongAdder counter;
450
451 /**
452 * Table initialization and resizing control. When negative, the
453 * table is being initialized or resized. Otherwise, when table is
454 * null, holds the initial table size to use upon creation, or 0
455 * for default. After initialization, holds the next element count
456 * value upon which to resize the table.
457 */
458 private transient volatile int sizeCtl;
459
460 // views
461 private transient KeySet<K,V> keySet;
462 private transient Values<K,V> values;
463 private transient EntrySet<K,V> entrySet;
464
465 /** For serialization compatibility. Null unless serialized; see below */
466 private Segment<K,V>[] segments;
467
468 /* ---------------- Table element access -------------- */
469
470 /*
471 * Volatile access methods are used for table elements as well as
472 * elements of in-progress next table while resizing. Uses are
473 * null checked by callers, and implicitly bounds-checked, relying
474 * on the invariants that tab arrays have non-zero size, and all
475 * indices are masked with (tab.length - 1) which is never
476 * negative and always less than length. Note that, to be correct
477 * wrt arbitrary concurrency errors by users, bounds checks must
478 * operate on local variables, which accounts for some odd-looking
479 * inline assignments below.
480 */
481
482 static final Node tabAt(Node[] tab, int i) { // used by Iter
483 return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
484 }
485
486 private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
487 return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
488 }
489
490 private static final void setTabAt(Node[] tab, int i, Node v) {
491 UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
492 }
493
494 /* ---------------- Nodes -------------- */
495
496 /**
497 * Key-value entry. Note that this is never exported out as a
498 * user-visible Map.Entry (see MapEntry below). Nodes with a hash
499 * field of MOVED are special, and do not contain user keys or
500 * values. Otherwise, keys are never null, and null val fields
501 * indicate that a node is in the process of being deleted or
502 * created. For purposes of read-only access, a key may be read
503 * before a val, but can only be used after checking val to be
504 * non-null.
505 */
506 static class Node {
507 volatile int hash;
508 final Object key;
509 volatile Object val;
510 volatile Node next;
511
512 Node(int hash, Object key, Object val, Node next) {
513 this.hash = hash;
514 this.key = key;
515 this.val = val;
516 this.next = next;
517 }
518
519 /** CompareAndSet the hash field */
520 final boolean casHash(int cmp, int val) {
521 return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
522 }
523
524 /** The number of spins before blocking for a lock */
525 static final int MAX_SPINS =
526 Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
527
528 /**
529 * Spins a while if LOCKED bit set and this node is the first
530 * of its bin, and then sets WAITING bits on hash field and
531 * blocks (once) if they are still set. It is OK for this
532 * method to return even if lock is not available upon exit,
533 * which enables these simple single-wait mechanics.
534 *
535 * The corresponding signalling operation is performed within
536 * callers: Upon detecting that WAITING has been set when
537 * unlocking lock (via a failed CAS from non-waiting LOCKED
538 * state), unlockers acquire the sync lock and perform a
539 * notifyAll.
540 */
541 final void tryAwaitLock(Node[] tab, int i) {
542 if (tab != null && i >= 0 && i < tab.length) { // bounds check
543 int r = ThreadLocalRandom.current().nextInt(); // randomize spins
544 int spins = MAX_SPINS, h;
545 while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
546 if (spins >= 0) {
547 r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
548 if (r >= 0 && --spins == 0)
549 Thread.yield(); // yield before block
550 }
551 else if (casHash(h, h | WAITING)) {
552 synchronized (this) {
553 if (tabAt(tab, i) == this &&
554 (hash & WAITING) == WAITING) {
555 try {
556 wait();
557 } catch (InterruptedException ie) {
558 Thread.currentThread().interrupt();
559 }
560 }
561 else
562 notifyAll(); // possibly won race vs signaller
563 }
564 break;
565 }
566 }
567 }
568 }
569
570 // Unsafe mechanics for casHash
571 private static final sun.misc.Unsafe UNSAFE;
572 private static final long hashOffset;
573
574 static {
575 try {
576 UNSAFE = getUnsafe();
577 Class<?> k = Node.class;
578 hashOffset = UNSAFE.objectFieldOffset
579 (k.getDeclaredField("hash"));
580 } catch (Exception e) {
581 throw new Error(e);
582 }
583 }
584 }
585
586 /* ---------------- TreeBins -------------- */
587
588 /**
589 * Nodes for use in TreeBins
590 */
591 static final class TreeNode extends Node {
592 TreeNode parent; // red-black tree links
593 TreeNode left;
594 TreeNode right;
595 TreeNode prev; // needed to unlink next upon deletion
596 boolean red;
597
598 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
599 super(hash, key, val, next);
600 this.parent = parent;
601 }
602 }
603
604 /**
605 * A specialized form of red-black tree for use in bins
606 * whose size exceeds a threshold.
607 *
608 * TreeBins use a special form of comparison for search and
609 * related operations (which is the main reason we cannot use
610 * existing collections such as TreeMaps). TreeBins contain
611 * Comparable elements, but may contain others, as well as
612 * elements that are Comparable but not necessarily Comparable<T>
613 * for the same T, so we cannot invoke compareTo among them. To
614 * handle this, the tree is ordered primarily by hash value, then
615 * by getClass().getName() order, and then by Comparator order
616 * among elements of the same class. On lookup at a node, if
617 * elements are not comparable or compare as 0, both left and
618 * right children may need to be searched in the case of tied hash
619 * values. (This corresponds to the full list search that would be
620 * necessary if all elements were non-Comparable and had tied
621 * hashes.) The red-black balancing code is updated from
622 * pre-jdk-collections
623 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
624 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
625 * Algorithms" (CLR).
626 *
627 * TreeBins also maintain a separate locking discipline than
628 * regular bins. Because they are forwarded via special MOVED
629 * nodes at bin heads (which can never change once established),
630 * we cannot use those nodes as locks. Instead, TreeBin
631 * extends AbstractQueuedSynchronizer to support a simple form of
632 * read-write lock. For update operations and table validation,
633 * the exclusive form of lock behaves in the same way as bin-head
634 * locks. However, lookups use shared read-lock mechanics to allow
635 * multiple readers in the absence of writers. Additionally,
636 * these lookups do not ever block: While the lock is not
637 * available, they proceed along the slow traversal path (via
638 * next-pointers) until the lock becomes available or the list is
639 * exhausted, whichever comes first. (These cases are not fast,
640 * but maximize aggregate expected throughput.) The AQS mechanics
641 * for doing this are straightforward. The lock state is held as
642 * AQS getState(). Read counts are negative; the write count (1)
643 * is positive. There are no signalling preferences among readers
644 * and writers. Since we don't need to export full Lock API, we
645 * just override the minimal AQS methods and use them directly.
646 */
647 static final class TreeBin extends AbstractQueuedSynchronizer {
648 private static final long serialVersionUID = 2249069246763182397L;
649 transient TreeNode root; // root of tree
650 transient TreeNode first; // head of next-pointer list
651
652 /* AQS overrides */
653 public final boolean isHeldExclusively() { return getState() > 0; }
654 public final boolean tryAcquire(int ignore) {
655 if (compareAndSetState(0, 1)) {
656 setExclusiveOwnerThread(Thread.currentThread());
657 return true;
658 }
659 return false;
660 }
661 public final boolean tryRelease(int ignore) {
662 setExclusiveOwnerThread(null);
663 setState(0);
664 return true;
665 }
666 public final int tryAcquireShared(int ignore) {
667 for (int c;;) {
668 if ((c = getState()) > 0)
669 return -1;
670 if (compareAndSetState(c, c -1))
671 return 1;
672 }
673 }
674 public final boolean tryReleaseShared(int ignore) {
675 int c;
676 do {} while (!compareAndSetState(c = getState(), c + 1));
677 return c == -1;
678 }
679
680 /** From CLR */
681 private void rotateLeft(TreeNode p) {
682 if (p != null) {
683 TreeNode r = p.right, pp, rl;
684 if ((rl = p.right = r.left) != null)
685 rl.parent = p;
686 if ((pp = r.parent = p.parent) == null)
687 root = r;
688 else if (pp.left == p)
689 pp.left = r;
690 else
691 pp.right = r;
692 r.left = p;
693 p.parent = r;
694 }
695 }
696
697 /** From CLR */
698 private void rotateRight(TreeNode p) {
699 if (p != null) {
700 TreeNode l = p.left, pp, lr;
701 if ((lr = p.left = l.right) != null)
702 lr.parent = p;
703 if ((pp = l.parent = p.parent) == null)
704 root = l;
705 else if (pp.right == p)
706 pp.right = l;
707 else
708 pp.left = l;
709 l.right = p;
710 p.parent = l;
711 }
712 }
713
714 /**
715 * Returns the TreeNode (or null if not found) for the given key
716 * starting at given root.
717 */
718 @SuppressWarnings("unchecked") // suppress Comparable cast warning
719 final TreeNode getTreeNode(int h, Object k, TreeNode p) {
720 Class<?> c = k.getClass();
721 while (p != null) {
722 int dir, ph; Object pk; Class<?> pc;
723 if ((ph = p.hash) == h) {
724 if ((pk = p.key) == k || k.equals(pk))
725 return p;
726 if (c != (pc = pk.getClass()) ||
727 !(k instanceof Comparable) ||
728 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
729 dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
730 TreeNode r = null, s = null, pl, pr;
731 if (dir >= 0) {
732 if ((pl = p.left) != null && h <= pl.hash)
733 s = pl;
734 }
735 else if ((pr = p.right) != null && h >= pr.hash)
736 s = pr;
737 if (s != null && (r = getTreeNode(h, k, s)) != null)
738 return r;
739 }
740 }
741 else
742 dir = (h < ph) ? -1 : 1;
743 p = (dir > 0) ? p.right : p.left;
744 }
745 return null;
746 }
747
748 /**
749 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
750 * read-lock to call getTreeNode, but during failure to get
751 * lock, searches along next links.
752 */
753 final Object getValue(int h, Object k) {
754 Node r = null;
755 int c = getState(); // Must read lock state first
756 for (Node e = first; e != null; e = e.next) {
757 if (c <= 0 && compareAndSetState(c, c - 1)) {
758 try {
759 r = getTreeNode(h, k, root);
760 } finally {
761 releaseShared(0);
762 }
763 break;
764 }
765 else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
766 r = e;
767 break;
768 }
769 else
770 c = getState();
771 }
772 return r == null ? null : r.val;
773 }
774
775 /**
776 * Finds or adds a node.
777 * @return null if added
778 */
779 @SuppressWarnings("unchecked") // suppress Comparable cast warning
780 final TreeNode putTreeNode(int h, Object k, Object v) {
781 Class<?> c = k.getClass();
782 TreeNode pp = root, p = null;
783 int dir = 0;
784 while (pp != null) { // find existing node or leaf to insert at
785 int ph; Object pk; Class<?> pc;
786 p = pp;
787 if ((ph = p.hash) == h) {
788 if ((pk = p.key) == k || k.equals(pk))
789 return p;
790 if (c != (pc = pk.getClass()) ||
791 !(k instanceof Comparable) ||
792 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
793 dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
794 TreeNode r = null, s = null, pl, pr;
795 if (dir >= 0) {
796 if ((pl = p.left) != null && h <= pl.hash)
797 s = pl;
798 }
799 else if ((pr = p.right) != null && h >= pr.hash)
800 s = pr;
801 if (s != null && (r = getTreeNode(h, k, s)) != null)
802 return r;
803 }
804 }
805 else
806 dir = (h < ph) ? -1 : 1;
807 pp = (dir > 0) ? p.right : p.left;
808 }
809
810 TreeNode f = first;
811 TreeNode x = first = new TreeNode(h, k, v, f, p);
812 if (p == null)
813 root = x;
814 else { // attach and rebalance; adapted from CLR
815 TreeNode xp, xpp;
816 if (f != null)
817 f.prev = x;
818 if (dir <= 0)
819 p.left = x;
820 else
821 p.right = x;
822 x.red = true;
823 while (x != null && (xp = x.parent) != null && xp.red &&
824 (xpp = xp.parent) != null) {
825 TreeNode xppl = xpp.left;
826 if (xp == xppl) {
827 TreeNode y = xpp.right;
828 if (y != null && y.red) {
829 y.red = false;
830 xp.red = false;
831 xpp.red = true;
832 x = xpp;
833 }
834 else {
835 if (x == xp.right) {
836 rotateLeft(x = xp);
837 xpp = (xp = x.parent) == null ? null : xp.parent;
838 }
839 if (xp != null) {
840 xp.red = false;
841 if (xpp != null) {
842 xpp.red = true;
843 rotateRight(xpp);
844 }
845 }
846 }
847 }
848 else {
849 TreeNode y = xppl;
850 if (y != null && y.red) {
851 y.red = false;
852 xp.red = false;
853 xpp.red = true;
854 x = xpp;
855 }
856 else {
857 if (x == xp.left) {
858 rotateRight(x = xp);
859 xpp = (xp = x.parent) == null ? null : xp.parent;
860 }
861 if (xp != null) {
862 xp.red = false;
863 if (xpp != null) {
864 xpp.red = true;
865 rotateLeft(xpp);
866 }
867 }
868 }
869 }
870 }
871 TreeNode r = root;
872 if (r != null && r.red)
873 r.red = false;
874 }
875 return null;
876 }
877
878 /**
879 * Removes the given node, that must be present before this
880 * call. This is messier than typical red-black deletion code
881 * because we cannot swap the contents of an interior node
882 * with a leaf successor that is pinned by "next" pointers
883 * that are accessible independently of lock. So instead we
884 * swap the tree linkages.
885 */
886 final void deleteTreeNode(TreeNode p) {
887 TreeNode next = (TreeNode)p.next; // unlink traversal pointers
888 TreeNode pred = p.prev;
889 if (pred == null)
890 first = next;
891 else
892 pred.next = next;
893 if (next != null)
894 next.prev = pred;
895 TreeNode replacement;
896 TreeNode pl = p.left;
897 TreeNode pr = p.right;
898 if (pl != null && pr != null) {
899 TreeNode s = pr, sl;
900 while ((sl = s.left) != null) // find successor
901 s = sl;
902 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
903 TreeNode sr = s.right;
904 TreeNode pp = p.parent;
905 if (s == pr) { // p was s's direct parent
906 p.parent = s;
907 s.right = p;
908 }
909 else {
910 TreeNode sp = s.parent;
911 if ((p.parent = sp) != null) {
912 if (s == sp.left)
913 sp.left = p;
914 else
915 sp.right = p;
916 }
917 if ((s.right = pr) != null)
918 pr.parent = s;
919 }
920 p.left = null;
921 if ((p.right = sr) != null)
922 sr.parent = p;
923 if ((s.left = pl) != null)
924 pl.parent = s;
925 if ((s.parent = pp) == null)
926 root = s;
927 else if (p == pp.left)
928 pp.left = s;
929 else
930 pp.right = s;
931 replacement = sr;
932 }
933 else
934 replacement = (pl != null) ? pl : pr;
935 TreeNode pp = p.parent;
936 if (replacement == null) {
937 if (pp == null) {
938 root = null;
939 return;
940 }
941 replacement = p;
942 }
943 else {
944 replacement.parent = pp;
945 if (pp == null)
946 root = replacement;
947 else if (p == pp.left)
948 pp.left = replacement;
949 else
950 pp.right = replacement;
951 p.left = p.right = p.parent = null;
952 }
953 if (!p.red) { // rebalance, from CLR
954 TreeNode x = replacement;
955 while (x != null) {
956 TreeNode xp, xpl;
957 if (x.red || (xp = x.parent) == null) {
958 x.red = false;
959 break;
960 }
961 if (x == (xpl = xp.left)) {
962 TreeNode sib = xp.right;
963 if (sib != null && sib.red) {
964 sib.red = false;
965 xp.red = true;
966 rotateLeft(xp);
967 sib = (xp = x.parent) == null ? null : xp.right;
968 }
969 if (sib == null)
970 x = xp;
971 else {
972 TreeNode sl = sib.left, sr = sib.right;
973 if ((sr == null || !sr.red) &&
974 (sl == null || !sl.red)) {
975 sib.red = true;
976 x = xp;
977 }
978 else {
979 if (sr == null || !sr.red) {
980 if (sl != null)
981 sl.red = false;
982 sib.red = true;
983 rotateRight(sib);
984 sib = (xp = x.parent) == null ? null : xp.right;
985 }
986 if (sib != null) {
987 sib.red = (xp == null) ? false : xp.red;
988 if ((sr = sib.right) != null)
989 sr.red = false;
990 }
991 if (xp != null) {
992 xp.red = false;
993 rotateLeft(xp);
994 }
995 x = root;
996 }
997 }
998 }
999 else { // symmetric
1000 TreeNode sib = xpl;
1001 if (sib != null && sib.red) {
1002 sib.red = false;
1003 xp.red = true;
1004 rotateRight(xp);
1005 sib = (xp = x.parent) == null ? null : xp.left;
1006 }
1007 if (sib == null)
1008 x = xp;
1009 else {
1010 TreeNode sl = sib.left, sr = sib.right;
1011 if ((sl == null || !sl.red) &&
1012 (sr == null || !sr.red)) {
1013 sib.red = true;
1014 x = xp;
1015 }
1016 else {
1017 if (sl == null || !sl.red) {
1018 if (sr != null)
1019 sr.red = false;
1020 sib.red = true;
1021 rotateLeft(sib);
1022 sib = (xp = x.parent) == null ? null : xp.left;
1023 }
1024 if (sib != null) {
1025 sib.red = (xp == null) ? false : xp.red;
1026 if ((sl = sib.left) != null)
1027 sl.red = false;
1028 }
1029 if (xp != null) {
1030 xp.red = false;
1031 rotateRight(xp);
1032 }
1033 x = root;
1034 }
1035 }
1036 }
1037 }
1038 }
1039 if (p == replacement && (pp = p.parent) != null) {
1040 if (p == pp.left) // detach pointers
1041 pp.left = null;
1042 else if (p == pp.right)
1043 pp.right = null;
1044 p.parent = null;
1045 }
1046 }
1047 }
1048
1049 /* ---------------- Collision reduction methods -------------- */
1050
1051 /**
1052 * Spreads higher bits to lower, and also forces top 2 bits to 0.
1053 * Because the table uses power-of-two masking, sets of hashes
1054 * that vary only in bits above the current mask will always
1055 * collide. (Among known examples are sets of Float keys holding
1056 * consecutive whole numbers in small tables.) To counter this,
1057 * we apply a transform that spreads the impact of higher bits
1058 * downward. There is a tradeoff between speed, utility, and
1059 * quality of bit-spreading. Because many common sets of hashes
1060 * are already reasonably distributed across bits (so don't benefit
1061 * from spreading), and because we use trees to handle large sets
1062 * of collisions in bins, we don't need excessively high quality.
1063 */
1064 private static final int spread(int h) {
1065 h ^= (h >>> 18) ^ (h >>> 12);
1066 return (h ^ (h >>> 10)) & HASH_BITS;
1067 }
1068
1069 /**
1070 * Replaces a list bin with a tree bin. Call only when locked.
1071 * Fails to replace if the given key is non-comparable or table
1072 * is, or needs, resizing.
1073 */
1074 private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1075 if ((key instanceof Comparable) &&
1076 (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1077 TreeBin t = new TreeBin();
1078 for (Node e = tabAt(tab, index); e != null; e = e.next)
1079 t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1080 setTabAt(tab, index, new Node(MOVED, t, null, null));
1081 }
1082 }
1083
1084 /* ---------------- Internal access and update methods -------------- */
1085
1086 /** Implementation for get and containsKey */
1087 private final Object internalGet(Object k) {
1088 int h = spread(k.hashCode());
1089 retry: for (Node[] tab = table; tab != null;) {
1090 Node e, p; Object ek, ev; int eh; // locals to read fields once
1091 for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1092 if ((eh = e.hash) == MOVED) {
1093 if ((ek = e.key) instanceof TreeBin) // search TreeBin
1094 return ((TreeBin)ek).getValue(h, k);
1095 else { // restart with new table
1096 tab = (Node[])ek;
1097 continue retry;
1098 }
1099 }
1100 else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1101 ((ek = e.key) == k || k.equals(ek)))
1102 return ev;
1103 }
1104 break;
1105 }
1106 return null;
1107 }
1108
1109 /**
1110 * Implementation for the four public remove/replace methods:
1111 * Replaces node value with v, conditional upon match of cv if
1112 * non-null. If resulting value is null, delete.
1113 */
1114 private final Object internalReplace(Object k, Object v, Object cv) {
1115 int h = spread(k.hashCode());
1116 Object oldVal = null;
1117 for (Node[] tab = table;;) {
1118 Node f; int i, fh; Object fk;
1119 if (tab == null ||
1120 (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1121 break;
1122 else if ((fh = f.hash) == MOVED) {
1123 if ((fk = f.key) instanceof TreeBin) {
1124 TreeBin t = (TreeBin)fk;
1125 boolean validated = false;
1126 boolean deleted = false;
1127 t.acquire(0);
1128 try {
1129 if (tabAt(tab, i) == f) {
1130 validated = true;
1131 TreeNode p = t.getTreeNode(h, k, t.root);
1132 if (p != null) {
1133 Object pv = p.val;
1134 if (cv == null || cv == pv || cv.equals(pv)) {
1135 oldVal = pv;
1136 if ((p.val = v) == null) {
1137 deleted = true;
1138 t.deleteTreeNode(p);
1139 }
1140 }
1141 }
1142 }
1143 } finally {
1144 t.release(0);
1145 }
1146 if (validated) {
1147 if (deleted)
1148 counter.add(-1L);
1149 break;
1150 }
1151 }
1152 else
1153 tab = (Node[])fk;
1154 }
1155 else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1156 break; // rules out possible existence
1157 else if ((fh & LOCKED) != 0) {
1158 checkForResize(); // try resizing if can't get lock
1159 f.tryAwaitLock(tab, i);
1160 }
1161 else if (f.casHash(fh, fh | LOCKED)) {
1162 boolean validated = false;
1163 boolean deleted = false;
1164 try {
1165 if (tabAt(tab, i) == f) {
1166 validated = true;
1167 for (Node e = f, pred = null;;) {
1168 Object ek, ev;
1169 if ((e.hash & HASH_BITS) == h &&
1170 ((ev = e.val) != null) &&
1171 ((ek = e.key) == k || k.equals(ek))) {
1172 if (cv == null || cv == ev || cv.equals(ev)) {
1173 oldVal = ev;
1174 if ((e.val = v) == null) {
1175 deleted = true;
1176 Node en = e.next;
1177 if (pred != null)
1178 pred.next = en;
1179 else
1180 setTabAt(tab, i, en);
1181 }
1182 }
1183 break;
1184 }
1185 pred = e;
1186 if ((e = e.next) == null)
1187 break;
1188 }
1189 }
1190 } finally {
1191 if (!f.casHash(fh | LOCKED, fh)) {
1192 f.hash = fh;
1193 synchronized (f) { f.notifyAll(); };
1194 }
1195 }
1196 if (validated) {
1197 if (deleted)
1198 counter.add(-1L);
1199 break;
1200 }
1201 }
1202 }
1203 return oldVal;
1204 }
1205
1206 /*
1207 * Internal versions of the five insertion methods, each a
1208 * little more complicated than the last. All have
1209 * the same basic structure as the first (internalPut):
1210 * 1. If table uninitialized, create
1211 * 2. If bin empty, try to CAS new node
1212 * 3. If bin stale, use new table
1213 * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1214 * 5. Lock and validate; if valid, scan and add or update
1215 *
1216 * The others interweave other checks and/or alternative actions:
1217 * * Plain put checks for and performs resize after insertion.
1218 * * putIfAbsent prescans for mapping without lock (and fails to add
1219 * if present), which also makes pre-emptive resize checks worthwhile.
1220 * * computeIfAbsent extends form used in putIfAbsent with additional
1221 * mechanics to deal with, calls, potential exceptions and null
1222 * returns from function call.
1223 * * compute uses the same function-call mechanics, but without
1224 * the prescans
1225 * * putAll attempts to pre-allocate enough table space
1226 * and more lazily performs count updates and checks.
1227 *
1228 * Someday when details settle down a bit more, it might be worth
1229 * some factoring to reduce sprawl.
1230 */
1231
1232 /** Implementation for put */
1233 private final Object internalPut(Object k, Object v) {
1234 int h = spread(k.hashCode());
1235 int count = 0;
1236 for (Node[] tab = table;;) {
1237 int i; Node f; int fh; Object fk;
1238 if (tab == null)
1239 tab = initTable();
1240 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1241 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1242 break; // no lock when adding to empty bin
1243 }
1244 else if ((fh = f.hash) == MOVED) {
1245 if ((fk = f.key) instanceof TreeBin) {
1246 TreeBin t = (TreeBin)fk;
1247 Object oldVal = null;
1248 t.acquire(0);
1249 try {
1250 if (tabAt(tab, i) == f) {
1251 count = 2;
1252 TreeNode p = t.putTreeNode(h, k, v);
1253 if (p != null) {
1254 oldVal = p.val;
1255 p.val = v;
1256 }
1257 }
1258 } finally {
1259 t.release(0);
1260 }
1261 if (count != 0) {
1262 if (oldVal != null)
1263 return oldVal;
1264 break;
1265 }
1266 }
1267 else
1268 tab = (Node[])fk;
1269 }
1270 else if ((fh & LOCKED) != 0) {
1271 checkForResize();
1272 f.tryAwaitLock(tab, i);
1273 }
1274 else if (f.casHash(fh, fh | LOCKED)) {
1275 Object oldVal = null;
1276 try { // needed in case equals() throws
1277 if (tabAt(tab, i) == f) {
1278 count = 1;
1279 for (Node e = f;; ++count) {
1280 Object ek, ev;
1281 if ((e.hash & HASH_BITS) == h &&
1282 (ev = e.val) != null &&
1283 ((ek = e.key) == k || k.equals(ek))) {
1284 oldVal = ev;
1285 e.val = v;
1286 break;
1287 }
1288 Node last = e;
1289 if ((e = e.next) == null) {
1290 last.next = new Node(h, k, v, null);
1291 if (count >= TREE_THRESHOLD)
1292 replaceWithTreeBin(tab, i, k);
1293 break;
1294 }
1295 }
1296 }
1297 } finally { // unlock and signal if needed
1298 if (!f.casHash(fh | LOCKED, fh)) {
1299 f.hash = fh;
1300 synchronized (f) { f.notifyAll(); };
1301 }
1302 }
1303 if (count != 0) {
1304 if (oldVal != null)
1305 return oldVal;
1306 if (tab.length <= 64)
1307 count = 2;
1308 break;
1309 }
1310 }
1311 }
1312 counter.add(1L);
1313 if (count > 1)
1314 checkForResize();
1315 return null;
1316 }
1317
1318 /** Implementation for putIfAbsent */
1319 private final Object internalPutIfAbsent(Object k, Object v) {
1320 int h = spread(k.hashCode());
1321 int count = 0;
1322 for (Node[] tab = table;;) {
1323 int i; Node f; int fh; Object fk, fv;
1324 if (tab == null)
1325 tab = initTable();
1326 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1327 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1328 break;
1329 }
1330 else if ((fh = f.hash) == MOVED) {
1331 if ((fk = f.key) instanceof TreeBin) {
1332 TreeBin t = (TreeBin)fk;
1333 Object oldVal = null;
1334 t.acquire(0);
1335 try {
1336 if (tabAt(tab, i) == f) {
1337 count = 2;
1338 TreeNode p = t.putTreeNode(h, k, v);
1339 if (p != null)
1340 oldVal = p.val;
1341 }
1342 } finally {
1343 t.release(0);
1344 }
1345 if (count != 0) {
1346 if (oldVal != null)
1347 return oldVal;
1348 break;
1349 }
1350 }
1351 else
1352 tab = (Node[])fk;
1353 }
1354 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1355 ((fk = f.key) == k || k.equals(fk)))
1356 return fv;
1357 else {
1358 Node g = f.next;
1359 if (g != null) { // at least 2 nodes -- search and maybe resize
1360 for (Node e = g;;) {
1361 Object ek, ev;
1362 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1363 ((ek = e.key) == k || k.equals(ek)))
1364 return ev;
1365 if ((e = e.next) == null) {
1366 checkForResize();
1367 break;
1368 }
1369 }
1370 }
1371 if (((fh = f.hash) & LOCKED) != 0) {
1372 checkForResize();
1373 f.tryAwaitLock(tab, i);
1374 }
1375 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1376 Object oldVal = null;
1377 try {
1378 if (tabAt(tab, i) == f) {
1379 count = 1;
1380 for (Node e = f;; ++count) {
1381 Object ek, ev;
1382 if ((e.hash & HASH_BITS) == h &&
1383 (ev = e.val) != null &&
1384 ((ek = e.key) == k || k.equals(ek))) {
1385 oldVal = ev;
1386 break;
1387 }
1388 Node last = e;
1389 if ((e = e.next) == null) {
1390 last.next = new Node(h, k, v, null);
1391 if (count >= TREE_THRESHOLD)
1392 replaceWithTreeBin(tab, i, k);
1393 break;
1394 }
1395 }
1396 }
1397 } finally {
1398 if (!f.casHash(fh | LOCKED, fh)) {
1399 f.hash = fh;
1400 synchronized (f) { f.notifyAll(); };
1401 }
1402 }
1403 if (count != 0) {
1404 if (oldVal != null)
1405 return oldVal;
1406 if (tab.length <= 64)
1407 count = 2;
1408 break;
1409 }
1410 }
1411 }
1412 }
1413 counter.add(1L);
1414 if (count > 1)
1415 checkForResize();
1416 return null;
1417 }
1418
1419 /** Implementation for computeIfAbsent */
1420 private final Object internalComputeIfAbsent(K k,
1421 Fun<? super K, ?> mf) {
1422 int h = spread(k.hashCode());
1423 Object val = null;
1424 int count = 0;
1425 for (Node[] tab = table;;) {
1426 Node f; int i, fh; Object fk, fv;
1427 if (tab == null)
1428 tab = initTable();
1429 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1430 Node node = new Node(fh = h | LOCKED, k, null, null);
1431 if (casTabAt(tab, i, null, node)) {
1432 count = 1;
1433 try {
1434 if ((val = mf.apply(k)) != null)
1435 node.val = val;
1436 } finally {
1437 if (val == null)
1438 setTabAt(tab, i, null);
1439 if (!node.casHash(fh, h)) {
1440 node.hash = h;
1441 synchronized (node) { node.notifyAll(); };
1442 }
1443 }
1444 }
1445 if (count != 0)
1446 break;
1447 }
1448 else if ((fh = f.hash) == MOVED) {
1449 if ((fk = f.key) instanceof TreeBin) {
1450 TreeBin t = (TreeBin)fk;
1451 boolean added = false;
1452 t.acquire(0);
1453 try {
1454 if (tabAt(tab, i) == f) {
1455 count = 1;
1456 TreeNode p = t.getTreeNode(h, k, t.root);
1457 if (p != null)
1458 val = p.val;
1459 else if ((val = mf.apply(k)) != null) {
1460 added = true;
1461 count = 2;
1462 t.putTreeNode(h, k, val);
1463 }
1464 }
1465 } finally {
1466 t.release(0);
1467 }
1468 if (count != 0) {
1469 if (!added)
1470 return val;
1471 break;
1472 }
1473 }
1474 else
1475 tab = (Node[])fk;
1476 }
1477 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1478 ((fk = f.key) == k || k.equals(fk)))
1479 return fv;
1480 else {
1481 Node g = f.next;
1482 if (g != null) {
1483 for (Node e = g;;) {
1484 Object ek, ev;
1485 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1486 ((ek = e.key) == k || k.equals(ek)))
1487 return ev;
1488 if ((e = e.next) == null) {
1489 checkForResize();
1490 break;
1491 }
1492 }
1493 }
1494 if (((fh = f.hash) & LOCKED) != 0) {
1495 checkForResize();
1496 f.tryAwaitLock(tab, i);
1497 }
1498 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1499 boolean added = false;
1500 try {
1501 if (tabAt(tab, i) == f) {
1502 count = 1;
1503 for (Node e = f;; ++count) {
1504 Object ek, ev;
1505 if ((e.hash & HASH_BITS) == h &&
1506 (ev = e.val) != null &&
1507 ((ek = e.key) == k || k.equals(ek))) {
1508 val = ev;
1509 break;
1510 }
1511 Node last = e;
1512 if ((e = e.next) == null) {
1513 if ((val = mf.apply(k)) != null) {
1514 added = true;
1515 last.next = new Node(h, k, val, null);
1516 if (count >= TREE_THRESHOLD)
1517 replaceWithTreeBin(tab, i, k);
1518 }
1519 break;
1520 }
1521 }
1522 }
1523 } finally {
1524 if (!f.casHash(fh | LOCKED, fh)) {
1525 f.hash = fh;
1526 synchronized (f) { f.notifyAll(); };
1527 }
1528 }
1529 if (count != 0) {
1530 if (!added)
1531 return val;
1532 if (tab.length <= 64)
1533 count = 2;
1534 break;
1535 }
1536 }
1537 }
1538 }
1539 if (val != null) {
1540 counter.add(1L);
1541 if (count > 1)
1542 checkForResize();
1543 }
1544 return val;
1545 }
1546
1547 /** Implementation for compute */
1548 @SuppressWarnings("unchecked")
1549 private final Object internalCompute(K k, boolean onlyIfPresent,
1550 BiFun<? super K, ? super V, ? extends V> mf) {
1551 int h = spread(k.hashCode());
1552 Object val = null;
1553 int delta = 0;
1554 int count = 0;
1555 for (Node[] tab = table;;) {
1556 Node f; int i, fh; Object fk;
1557 if (tab == null)
1558 tab = initTable();
1559 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1560 if (onlyIfPresent)
1561 break;
1562 Node node = new Node(fh = h | LOCKED, k, null, null);
1563 if (casTabAt(tab, i, null, node)) {
1564 try {
1565 count = 1;
1566 if ((val = mf.apply(k, null)) != null) {
1567 node.val = val;
1568 delta = 1;
1569 }
1570 } finally {
1571 if (delta == 0)
1572 setTabAt(tab, i, null);
1573 if (!node.casHash(fh, h)) {
1574 node.hash = h;
1575 synchronized (node) { node.notifyAll(); };
1576 }
1577 }
1578 }
1579 if (count != 0)
1580 break;
1581 }
1582 else if ((fh = f.hash) == MOVED) {
1583 if ((fk = f.key) instanceof TreeBin) {
1584 TreeBin t = (TreeBin)fk;
1585 t.acquire(0);
1586 try {
1587 if (tabAt(tab, i) == f) {
1588 count = 1;
1589 TreeNode p = t.getTreeNode(h, k, t.root);
1590 Object pv = (p == null) ? null : p.val;
1591 if ((val = mf.apply(k, (V)pv)) != null) {
1592 if (p != null)
1593 p.val = val;
1594 else {
1595 count = 2;
1596 delta = 1;
1597 t.putTreeNode(h, k, val);
1598 }
1599 }
1600 else if (p != null) {
1601 delta = -1;
1602 t.deleteTreeNode(p);
1603 }
1604 }
1605 } finally {
1606 t.release(0);
1607 }
1608 if (count != 0)
1609 break;
1610 }
1611 else
1612 tab = (Node[])fk;
1613 }
1614 else if ((fh & LOCKED) != 0) {
1615 checkForResize();
1616 f.tryAwaitLock(tab, i);
1617 }
1618 else if (f.casHash(fh, fh | LOCKED)) {
1619 try {
1620 if (tabAt(tab, i) == f) {
1621 count = 1;
1622 for (Node e = f, pred = null;; ++count) {
1623 Object ek, ev;
1624 if ((e.hash & HASH_BITS) == h &&
1625 (ev = e.val) != null &&
1626 ((ek = e.key) == k || k.equals(ek))) {
1627 val = mf.apply(k, (V)ev);
1628 if (val != null)
1629 e.val = val;
1630 else {
1631 delta = -1;
1632 Node en = e.next;
1633 if (pred != null)
1634 pred.next = en;
1635 else
1636 setTabAt(tab, i, en);
1637 }
1638 break;
1639 }
1640 pred = e;
1641 if ((e = e.next) == null) {
1642 if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1643 pred.next = new Node(h, k, val, null);
1644 delta = 1;
1645 if (count >= TREE_THRESHOLD)
1646 replaceWithTreeBin(tab, i, k);
1647 }
1648 break;
1649 }
1650 }
1651 }
1652 } finally {
1653 if (!f.casHash(fh | LOCKED, fh)) {
1654 f.hash = fh;
1655 synchronized (f) { f.notifyAll(); };
1656 }
1657 }
1658 if (count != 0) {
1659 if (tab.length <= 64)
1660 count = 2;
1661 break;
1662 }
1663 }
1664 }
1665 if (delta != 0) {
1666 counter.add((long)delta);
1667 if (count > 1)
1668 checkForResize();
1669 }
1670 return val;
1671 }
1672
1673 private final Object internalMerge(K k, V v,
1674 BiFun<? super V, ? super V, ? extends V> mf) {
1675 int h = spread(k.hashCode());
1676 Object val = null;
1677 int delta = 0;
1678 int count = 0;
1679 for (Node[] tab = table;;) {
1680 int i; Node f; int fh; Object fk, fv;
1681 if (tab == null)
1682 tab = initTable();
1683 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1684 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1685 delta = 1;
1686 val = v;
1687 break;
1688 }
1689 }
1690 else if ((fh = f.hash) == MOVED) {
1691 if ((fk = f.key) instanceof TreeBin) {
1692 TreeBin t = (TreeBin)fk;
1693 t.acquire(0);
1694 try {
1695 if (tabAt(tab, i) == f) {
1696 count = 1;
1697 TreeNode p = t.getTreeNode(h, k, t.root);
1698 val = (p == null) ? v : mf.apply((V)p.val, v);
1699 if (val != null) {
1700 if (p != null)
1701 p.val = val;
1702 else {
1703 count = 2;
1704 delta = 1;
1705 t.putTreeNode(h, k, val);
1706 }
1707 }
1708 else if (p != null) {
1709 delta = -1;
1710 t.deleteTreeNode(p);
1711 }
1712 }
1713 } finally {
1714 t.release(0);
1715 }
1716 if (count != 0)
1717 break;
1718 }
1719 else
1720 tab = (Node[])fk;
1721 }
1722 else if ((fh & LOCKED) != 0) {
1723 checkForResize();
1724 f.tryAwaitLock(tab, i);
1725 }
1726 else if (f.casHash(fh, fh | LOCKED)) {
1727 try {
1728 if (tabAt(tab, i) == f) {
1729 count = 1;
1730 for (Node e = f, pred = null;; ++count) {
1731 Object ek, ev;
1732 if ((e.hash & HASH_BITS) == h &&
1733 (ev = e.val) != null &&
1734 ((ek = e.key) == k || k.equals(ek))) {
1735 val = mf.apply(v, (V)ev);
1736 if (val != null)
1737 e.val = val;
1738 else {
1739 delta = -1;
1740 Node en = e.next;
1741 if (pred != null)
1742 pred.next = en;
1743 else
1744 setTabAt(tab, i, en);
1745 }
1746 break;
1747 }
1748 pred = e;
1749 if ((e = e.next) == null) {
1750 val = v;
1751 pred.next = new Node(h, k, val, null);
1752 delta = 1;
1753 if (count >= TREE_THRESHOLD)
1754 replaceWithTreeBin(tab, i, k);
1755 break;
1756 }
1757 }
1758 }
1759 } finally {
1760 if (!f.casHash(fh | LOCKED, fh)) {
1761 f.hash = fh;
1762 synchronized (f) { f.notifyAll(); };
1763 }
1764 }
1765 if (count != 0) {
1766 if (tab.length <= 64)
1767 count = 2;
1768 break;
1769 }
1770 }
1771 }
1772 if (delta != 0) {
1773 counter.add((long)delta);
1774 if (count > 1)
1775 checkForResize();
1776 }
1777 return val;
1778 }
1779
1780 /** Implementation for putAll */
1781 private final void internalPutAll(Map<?, ?> m) {
1782 tryPresize(m.size());
1783 long delta = 0L; // number of uncommitted additions
1784 boolean npe = false; // to throw exception on exit for nulls
1785 try { // to clean up counts on other exceptions
1786 for (Map.Entry<?, ?> entry : m.entrySet()) {
1787 Object k, v;
1788 if (entry == null || (k = entry.getKey()) == null ||
1789 (v = entry.getValue()) == null) {
1790 npe = true;
1791 break;
1792 }
1793 int h = spread(k.hashCode());
1794 for (Node[] tab = table;;) {
1795 int i; Node f; int fh; Object fk;
1796 if (tab == null)
1797 tab = initTable();
1798 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1799 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1800 ++delta;
1801 break;
1802 }
1803 }
1804 else if ((fh = f.hash) == MOVED) {
1805 if ((fk = f.key) instanceof TreeBin) {
1806 TreeBin t = (TreeBin)fk;
1807 boolean validated = false;
1808 t.acquire(0);
1809 try {
1810 if (tabAt(tab, i) == f) {
1811 validated = true;
1812 TreeNode p = t.getTreeNode(h, k, t.root);
1813 if (p != null)
1814 p.val = v;
1815 else {
1816 t.putTreeNode(h, k, v);
1817 ++delta;
1818 }
1819 }
1820 } finally {
1821 t.release(0);
1822 }
1823 if (validated)
1824 break;
1825 }
1826 else
1827 tab = (Node[])fk;
1828 }
1829 else if ((fh & LOCKED) != 0) {
1830 counter.add(delta);
1831 delta = 0L;
1832 checkForResize();
1833 f.tryAwaitLock(tab, i);
1834 }
1835 else if (f.casHash(fh, fh | LOCKED)) {
1836 int count = 0;
1837 try {
1838 if (tabAt(tab, i) == f) {
1839 count = 1;
1840 for (Node e = f;; ++count) {
1841 Object ek, ev;
1842 if ((e.hash & HASH_BITS) == h &&
1843 (ev = e.val) != null &&
1844 ((ek = e.key) == k || k.equals(ek))) {
1845 e.val = v;
1846 break;
1847 }
1848 Node last = e;
1849 if ((e = e.next) == null) {
1850 ++delta;
1851 last.next = new Node(h, k, v, null);
1852 if (count >= TREE_THRESHOLD)
1853 replaceWithTreeBin(tab, i, k);
1854 break;
1855 }
1856 }
1857 }
1858 } finally {
1859 if (!f.casHash(fh | LOCKED, fh)) {
1860 f.hash = fh;
1861 synchronized (f) { f.notifyAll(); };
1862 }
1863 }
1864 if (count != 0) {
1865 if (count > 1) {
1866 counter.add(delta);
1867 delta = 0L;
1868 checkForResize();
1869 }
1870 break;
1871 }
1872 }
1873 }
1874 }
1875 } finally {
1876 if (delta != 0)
1877 counter.add(delta);
1878 }
1879 if (npe)
1880 throw new NullPointerException();
1881 }
1882
1883 /* ---------------- Table Initialization and Resizing -------------- */
1884
1885 /**
1886 * Returns a power of two table size for the given desired capacity.
1887 * See Hackers Delight, sec 3.2
1888 */
1889 private static final int tableSizeFor(int c) {
1890 int n = c - 1;
1891 n |= n >>> 1;
1892 n |= n >>> 2;
1893 n |= n >>> 4;
1894 n |= n >>> 8;
1895 n |= n >>> 16;
1896 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
1897 }
1898
1899 /**
1900 * Initializes table, using the size recorded in sizeCtl.
1901 */
1902 private final Node[] initTable() {
1903 Node[] tab; int sc;
1904 while ((tab = table) == null) {
1905 if ((sc = sizeCtl) < 0)
1906 Thread.yield(); // lost initialization race; just spin
1907 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1908 try {
1909 if ((tab = table) == null) {
1910 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
1911 tab = table = new Node[n];
1912 sc = n - (n >>> 2);
1913 }
1914 } finally {
1915 sizeCtl = sc;
1916 }
1917 break;
1918 }
1919 }
1920 return tab;
1921 }
1922
1923 /**
1924 * If table is too small and not already resizing, creates next
1925 * table and transfers bins. Rechecks occupancy after a transfer
1926 * to see if another resize is already needed because resizings
1927 * are lagging additions.
1928 */
1929 private final void checkForResize() {
1930 Node[] tab; int n, sc;
1931 while ((tab = table) != null &&
1932 (n = tab.length) < MAXIMUM_CAPACITY &&
1933 (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
1934 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1935 try {
1936 if (tab == table) {
1937 table = rebuild(tab);
1938 sc = (n << 1) - (n >>> 1);
1939 }
1940 } finally {
1941 sizeCtl = sc;
1942 }
1943 }
1944 }
1945
1946 /**
1947 * Tries to presize table to accommodate the given number of elements.
1948 *
1949 * @param size number of elements (doesn't need to be perfectly accurate)
1950 */
1951 private final void tryPresize(int size) {
1952 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
1953 tableSizeFor(size + (size >>> 1) + 1);
1954 int sc;
1955 while ((sc = sizeCtl) >= 0) {
1956 Node[] tab = table; int n;
1957 if (tab == null || (n = tab.length) == 0) {
1958 n = (sc > c) ? sc : c;
1959 if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1960 try {
1961 if (table == tab) {
1962 table = new Node[n];
1963 sc = n - (n >>> 2);
1964 }
1965 } finally {
1966 sizeCtl = sc;
1967 }
1968 }
1969 }
1970 else if (c <= sc || n >= MAXIMUM_CAPACITY)
1971 break;
1972 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
1973 try {
1974 if (table == tab) {
1975 table = rebuild(tab);
1976 sc = (n << 1) - (n >>> 1);
1977 }
1978 } finally {
1979 sizeCtl = sc;
1980 }
1981 }
1982 }
1983 }
1984
1985 /*
1986 * Moves and/or copies the nodes in each bin to new table. See
1987 * above for explanation.
1988 *
1989 * @return the new table
1990 */
1991 private static final Node[] rebuild(Node[] tab) {
1992 int n = tab.length;
1993 Node[] nextTab = new Node[n << 1];
1994 Node fwd = new Node(MOVED, nextTab, null, null);
1995 int[] buffer = null; // holds bins to revisit; null until needed
1996 Node rev = null; // reverse forwarder; null until needed
1997 int nbuffered = 0; // the number of bins in buffer list
1998 int bufferIndex = 0; // buffer index of current buffered bin
1999 int bin = n - 1; // current non-buffered bin or -1 if none
2000
2001 for (int i = bin;;) { // start upwards sweep
2002 int fh; Node f;
2003 if ((f = tabAt(tab, i)) == null) {
2004 if (bin >= 0) { // no lock needed (or available)
2005 if (!casTabAt(tab, i, f, fwd))
2006 continue;
2007 }
2008 else { // transiently use a locked forwarding node
2009 Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2010 if (!casTabAt(tab, i, f, g))
2011 continue;
2012 setTabAt(nextTab, i, null);
2013 setTabAt(nextTab, i + n, null);
2014 setTabAt(tab, i, fwd);
2015 if (!g.casHash(MOVED|LOCKED, MOVED)) {
2016 g.hash = MOVED;
2017 synchronized (g) { g.notifyAll(); }
2018 }
2019 }
2020 }
2021 else if ((fh = f.hash) == MOVED) {
2022 Object fk = f.key;
2023 if (fk instanceof TreeBin) {
2024 TreeBin t = (TreeBin)fk;
2025 boolean validated = false;
2026 t.acquire(0);
2027 try {
2028 if (tabAt(tab, i) == f) {
2029 validated = true;
2030 splitTreeBin(nextTab, i, t);
2031 setTabAt(tab, i, fwd);
2032 }
2033 } finally {
2034 t.release(0);
2035 }
2036 if (!validated)
2037 continue;
2038 }
2039 }
2040 else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2041 boolean validated = false;
2042 try { // split to lo and hi lists; copying as needed
2043 if (tabAt(tab, i) == f) {
2044 validated = true;
2045 splitBin(nextTab, i, f);
2046 setTabAt(tab, i, fwd);
2047 }
2048 } finally {
2049 if (!f.casHash(fh | LOCKED, fh)) {
2050 f.hash = fh;
2051 synchronized (f) { f.notifyAll(); };
2052 }
2053 }
2054 if (!validated)
2055 continue;
2056 }
2057 else {
2058 if (buffer == null) // initialize buffer for revisits
2059 buffer = new int[TRANSFER_BUFFER_SIZE];
2060 if (bin < 0 && bufferIndex > 0) {
2061 int j = buffer[--bufferIndex];
2062 buffer[bufferIndex] = i;
2063 i = j; // swap with another bin
2064 continue;
2065 }
2066 if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2067 f.tryAwaitLock(tab, i);
2068 continue; // no other options -- block
2069 }
2070 if (rev == null) // initialize reverse-forwarder
2071 rev = new Node(MOVED, tab, null, null);
2072 if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2073 continue; // recheck before adding to list
2074 buffer[nbuffered++] = i;
2075 setTabAt(nextTab, i, rev); // install place-holders
2076 setTabAt(nextTab, i + n, rev);
2077 }
2078
2079 if (bin > 0)
2080 i = --bin;
2081 else if (buffer != null && nbuffered > 0) {
2082 bin = -1;
2083 i = buffer[bufferIndex = --nbuffered];
2084 }
2085 else
2086 return nextTab;
2087 }
2088 }
2089
2090 /**
2091 * Splits a normal bin with list headed by e into lo and hi parts;
2092 * installs in given table.
2093 */
2094 private static void splitBin(Node[] nextTab, int i, Node e) {
2095 int bit = nextTab.length >>> 1; // bit to split on
2096 int runBit = e.hash & bit;
2097 Node lastRun = e, lo = null, hi = null;
2098 for (Node p = e.next; p != null; p = p.next) {
2099 int b = p.hash & bit;
2100 if (b != runBit) {
2101 runBit = b;
2102 lastRun = p;
2103 }
2104 }
2105 if (runBit == 0)
2106 lo = lastRun;
2107 else
2108 hi = lastRun;
2109 for (Node p = e; p != lastRun; p = p.next) {
2110 int ph = p.hash & HASH_BITS;
2111 Object pk = p.key, pv = p.val;
2112 if ((ph & bit) == 0)
2113 lo = new Node(ph, pk, pv, lo);
2114 else
2115 hi = new Node(ph, pk, pv, hi);
2116 }
2117 setTabAt(nextTab, i, lo);
2118 setTabAt(nextTab, i + bit, hi);
2119 }
2120
2121 /**
2122 * Splits a tree bin into lo and hi parts; installs in given table.
2123 */
2124 private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2125 int bit = nextTab.length >>> 1;
2126 TreeBin lt = new TreeBin();
2127 TreeBin ht = new TreeBin();
2128 int lc = 0, hc = 0;
2129 for (Node e = t.first; e != null; e = e.next) {
2130 int h = e.hash & HASH_BITS;
2131 Object k = e.key, v = e.val;
2132 if ((h & bit) == 0) {
2133 ++lc;
2134 lt.putTreeNode(h, k, v);
2135 }
2136 else {
2137 ++hc;
2138 ht.putTreeNode(h, k, v);
2139 }
2140 }
2141 Node ln, hn; // throw away trees if too small
2142 if (lc <= (TREE_THRESHOLD >>> 1)) {
2143 ln = null;
2144 for (Node p = lt.first; p != null; p = p.next)
2145 ln = new Node(p.hash, p.key, p.val, ln);
2146 }
2147 else
2148 ln = new Node(MOVED, lt, null, null);
2149 setTabAt(nextTab, i, ln);
2150 if (hc <= (TREE_THRESHOLD >>> 1)) {
2151 hn = null;
2152 for (Node p = ht.first; p != null; p = p.next)
2153 hn = new Node(p.hash, p.key, p.val, hn);
2154 }
2155 else
2156 hn = new Node(MOVED, ht, null, null);
2157 setTabAt(nextTab, i + bit, hn);
2158 }
2159
2160 /**
2161 * Implementation for clear. Steps through each bin, removing all
2162 * nodes.
2163 */
2164 private final void internalClear() {
2165 long delta = 0L; // negative number of deletions
2166 int i = 0;
2167 Node[] tab = table;
2168 while (tab != null && i < tab.length) {
2169 int fh; Object fk;
2170 Node f = tabAt(tab, i);
2171 if (f == null)
2172 ++i;
2173 else if ((fh = f.hash) == MOVED) {
2174 if ((fk = f.key) instanceof TreeBin) {
2175 TreeBin t = (TreeBin)fk;
2176 t.acquire(0);
2177 try {
2178 if (tabAt(tab, i) == f) {
2179 for (Node p = t.first; p != null; p = p.next) {
2180 p.val = null;
2181 --delta;
2182 }
2183 t.first = null;
2184 t.root = null;
2185 ++i;
2186 }
2187 } finally {
2188 t.release(0);
2189 }
2190 }
2191 else
2192 tab = (Node[])fk;
2193 }
2194 else if ((fh & LOCKED) != 0) {
2195 counter.add(delta); // opportunistically update count
2196 delta = 0L;
2197 f.tryAwaitLock(tab, i);
2198 }
2199 else if (f.casHash(fh, fh | LOCKED)) {
2200 try {
2201 if (tabAt(tab, i) == f) {
2202 for (Node e = f; e != null; e = e.next) {
2203 e.val = null;
2204 --delta;
2205 }
2206 setTabAt(tab, i, null);
2207 ++i;
2208 }
2209 } finally {
2210 if (!f.casHash(fh | LOCKED, fh)) {
2211 f.hash = fh;
2212 synchronized (f) { f.notifyAll(); };
2213 }
2214 }
2215 }
2216 }
2217 if (delta != 0)
2218 counter.add(delta);
2219 }
2220
2221 /* ----------------Table Traversal -------------- */
2222
2223 /**
2224 * Encapsulates traversal for methods such as containsValue; also
2225 * serves as a base class for other iterators.
2226 *
2227 * At each step, the iterator snapshots the key ("nextKey") and
2228 * value ("nextVal") of a valid node (i.e., one that, at point of
2229 * snapshot, has a non-null user value). Because val fields can
2230 * change (including to null, indicating deletion), field nextVal
2231 * might not be accurate at point of use, but still maintains the
2232 * weak consistency property of holding a value that was once
2233 * valid.
2234 *
2235 * Internal traversals directly access these fields, as in:
2236 * {@code while (it.advance() != null) { process(it.nextKey); }}
2237 *
2238 * Exported iterators must track whether the iterator has advanced
2239 * (in hasNext vs next) (by setting/checking/nulling field
2240 * nextVal), and then extract key, value, or key-value pairs as
2241 * return values of next().
2242 *
2243 * The iterator visits once each still-valid node that was
2244 * reachable upon iterator construction. It might miss some that
2245 * were added to a bin after the bin was visited, which is OK wrt
2246 * consistency guarantees. Maintaining this property in the face
2247 * of possible ongoing resizes requires a fair amount of
2248 * bookkeeping state that is difficult to optimize away amidst
2249 * volatile accesses. Even so, traversal maintains reasonable
2250 * throughput.
2251 *
2252 * Normally, iteration proceeds bin-by-bin traversing lists.
2253 * However, if the table has been resized, then all future steps
2254 * must traverse both the bin at the current index as well as at
2255 * (index + baseSize); and so on for further resizings. To
2256 * paranoically cope with potential sharing by users of iterators
2257 * across threads, iteration terminates if a bounds checks fails
2258 * for a table read.
2259 *
2260 * This class extends ForkJoinTask to streamline parallel
2261 * iteration in bulk operations (see BulkTask). This adds only an
2262 * int of space overhead, which is close enough to negligible in
2263 * cases where it is not needed to not worry about it.
2264 */
2265 static class Traverser<K,V,R> extends ForkJoinTask<R> {
2266 final ConcurrentHashMapV8<K, V> map;
2267 Node next; // the next entry to use
2268 Node last; // the last entry used
2269 Object nextKey; // cached key field of next
2270 Object nextVal; // cached val field of next
2271 Node[] tab; // current table; updated if resized
2272 int index; // index of bin to use next
2273 int baseIndex; // current index of initial table
2274 int baseLimit; // index bound for initial table
2275 final int baseSize; // initial table size
2276
2277 /** Creates iterator for all entries in the table. */
2278 Traverser(ConcurrentHashMapV8<K, V> map) {
2279 this.tab = (this.map = map).table;
2280 baseLimit = baseSize = (tab == null) ? 0 : tab.length;
2281 }
2282
2283 /** Creates iterator for split() methods */
2284 Traverser(Traverser<K,V,?> it, boolean split) {
2285 this.map = it.map;
2286 this.tab = it.tab;
2287 this.baseSize = it.baseSize;
2288 int lo = it.baseIndex;
2289 int hi = this.baseLimit = it.baseLimit;
2290 int i;
2291 if (split) // adjust parent
2292 i = it.baseLimit = (lo + hi + 1) >>> 1;
2293 else // clone parent
2294 i = lo;
2295 this.index = this.baseIndex = i;
2296 }
2297
2298 /**
2299 * Advances next; returns nextVal or null if terminated.
2300 * See above for explanation.
2301 */
2302 final Object advance() {
2303 Node e = last = next;
2304 Object ev = null;
2305 outer: do {
2306 if (e != null) // advance past used/skipped node
2307 e = e.next;
2308 while (e == null) { // get to next non-null bin
2309 Node[] t; int b, i, n; Object ek; // checks must use locals
2310 if ((b = baseIndex) >= baseLimit || (i = index) < 0 ||
2311 (t = tab) == null || i >= (n = t.length))
2312 break outer;
2313 else if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2314 if ((ek = e.key) instanceof TreeBin)
2315 e = ((TreeBin)ek).first;
2316 else {
2317 tab = (Node[])ek;
2318 continue; // restarts due to null val
2319 }
2320 } // visit upper slots if present
2321 index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2322 }
2323 nextKey = e.key;
2324 } while ((ev = e.val) == null); // skip deleted or special nodes
2325 next = e;
2326 return nextVal = ev;
2327 }
2328
2329 public final void remove() {
2330 if (nextVal == null && last == null)
2331 advance();
2332 Node e = last;
2333 if (e == null)
2334 throw new IllegalStateException();
2335 last = null;
2336 map.remove(e.key);
2337 }
2338
2339 public final boolean hasNext() {
2340 return nextVal != null || advance() != null;
2341 }
2342
2343 public final boolean hasMoreElements() { return hasNext(); }
2344 public final void setRawResult(Object x) { }
2345 public R getRawResult() { return null; }
2346 public boolean exec() { return true; }
2347 }
2348
2349 /* ---------------- Public operations -------------- */
2350
2351 /**
2352 * Creates a new, empty map with the default initial table size (16).
2353 */
2354 public ConcurrentHashMapV8() {
2355 this.counter = new LongAdder();
2356 }
2357
2358 /**
2359 * Creates a new, empty map with an initial table size
2360 * accommodating the specified number of elements without the need
2361 * to dynamically resize.
2362 *
2363 * @param initialCapacity The implementation performs internal
2364 * sizing to accommodate this many elements.
2365 * @throws IllegalArgumentException if the initial capacity of
2366 * elements is negative
2367 */
2368 public ConcurrentHashMapV8(int initialCapacity) {
2369 if (initialCapacity < 0)
2370 throw new IllegalArgumentException();
2371 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2372 MAXIMUM_CAPACITY :
2373 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2374 this.counter = new LongAdder();
2375 this.sizeCtl = cap;
2376 }
2377
2378 /**
2379 * Creates a new map with the same mappings as the given map.
2380 *
2381 * @param m the map
2382 */
2383 public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2384 this.counter = new LongAdder();
2385 this.sizeCtl = DEFAULT_CAPACITY;
2386 internalPutAll(m);
2387 }
2388
2389 /**
2390 * Creates a new, empty map with an initial table size based on
2391 * the given number of elements ({@code initialCapacity}) and
2392 * initial table density ({@code loadFactor}).
2393 *
2394 * @param initialCapacity the initial capacity. The implementation
2395 * performs internal sizing to accommodate this many elements,
2396 * given the specified load factor.
2397 * @param loadFactor the load factor (table density) for
2398 * establishing the initial table size
2399 * @throws IllegalArgumentException if the initial capacity of
2400 * elements is negative or the load factor is nonpositive
2401 *
2402 * @since 1.6
2403 */
2404 public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
2405 this(initialCapacity, loadFactor, 1);
2406 }
2407
2408 /**
2409 * Creates a new, empty map with an initial table size based on
2410 * the given number of elements ({@code initialCapacity}), table
2411 * density ({@code loadFactor}), and number of concurrently
2412 * updating threads ({@code concurrencyLevel}).
2413 *
2414 * @param initialCapacity the initial capacity. The implementation
2415 * performs internal sizing to accommodate this many elements,
2416 * given the specified load factor.
2417 * @param loadFactor the load factor (table density) for
2418 * establishing the initial table size
2419 * @param concurrencyLevel the estimated number of concurrently
2420 * updating threads. The implementation may use this value as
2421 * a sizing hint.
2422 * @throws IllegalArgumentException if the initial capacity is
2423 * negative or the load factor or concurrencyLevel are
2424 * nonpositive
2425 */
2426 public ConcurrentHashMapV8(int initialCapacity,
2427 float loadFactor, int concurrencyLevel) {
2428 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2429 throw new IllegalArgumentException();
2430 if (initialCapacity < concurrencyLevel) // Use at least as many bins
2431 initialCapacity = concurrencyLevel; // as estimated threads
2432 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2433 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2434 MAXIMUM_CAPACITY : tableSizeFor((int)size);
2435 this.counter = new LongAdder();
2436 this.sizeCtl = cap;
2437 }
2438
2439 /**
2440 * {@inheritDoc}
2441 */
2442 public boolean isEmpty() {
2443 return counter.sum() <= 0L; // ignore transient negative values
2444 }
2445
2446 /**
2447 * {@inheritDoc}
2448 */
2449 public int size() {
2450 long n = counter.sum();
2451 return ((n < 0L) ? 0 :
2452 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2453 (int)n);
2454 }
2455
2456 /**
2457 * Returns the number of mappings. This method should be used
2458 * instead of {@link #size} because a ConcurrentHashMap may
2459 * contain more mappings than can be represented as an int. The
2460 * value returned is a snapshot; the actual count may differ if
2461 * there are ongoing concurrent insertions of removals.
2462 *
2463 * @return the number of mappings
2464 */
2465 public long mappingCount() {
2466 long n = counter.sum();
2467 return (n < 0L) ? 0L : n;
2468 }
2469
2470 /**
2471 * Returns the value to which the specified key is mapped,
2472 * or {@code null} if this map contains no mapping for the key.
2473 *
2474 * <p>More formally, if this map contains a mapping from a key
2475 * {@code k} to a value {@code v} such that {@code key.equals(k)},
2476 * then this method returns {@code v}; otherwise it returns
2477 * {@code null}. (There can be at most one such mapping.)
2478 *
2479 * @throws NullPointerException if the specified key is null
2480 */
2481 @SuppressWarnings("unchecked")
2482 public V get(Object key) {
2483 if (key == null)
2484 throw new NullPointerException();
2485 return (V)internalGet(key);
2486 }
2487
2488 /**
2489 * Tests if the specified object is a key in this table.
2490 *
2491 * @param key possible key
2492 * @return {@code true} if and only if the specified object
2493 * is a key in this table, as determined by the
2494 * {@code equals} method; {@code false} otherwise
2495 * @throws NullPointerException if the specified key is null
2496 */
2497 public boolean containsKey(Object key) {
2498 if (key == null)
2499 throw new NullPointerException();
2500 return internalGet(key) != null;
2501 }
2502
2503 /**
2504 * Returns {@code true} if this map maps one or more keys to the
2505 * specified value. Note: This method may require a full traversal
2506 * of the map, and is much slower than method {@code containsKey}.
2507 *
2508 * @param value value whose presence in this map is to be tested
2509 * @return {@code true} if this map maps one or more keys to the
2510 * specified value
2511 * @throws NullPointerException if the specified value is null
2512 */
2513 public boolean containsValue(Object value) {
2514 if (value == null)
2515 throw new NullPointerException();
2516 Object v;
2517 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2518 while ((v = it.advance()) != null) {
2519 if (v == value || value.equals(v))
2520 return true;
2521 }
2522 return false;
2523 }
2524
2525 /**
2526 * Legacy method testing if some key maps into the specified value
2527 * in this table. This method is identical in functionality to
2528 * {@link #containsValue}, and exists solely to ensure
2529 * full compatibility with class {@link java.util.Hashtable},
2530 * which supported this method prior to introduction of the
2531 * Java Collections framework.
2532 *
2533 * @param value a value to search for
2534 * @return {@code true} if and only if some key maps to the
2535 * {@code value} argument in this table as
2536 * determined by the {@code equals} method;
2537 * {@code false} otherwise
2538 * @throws NullPointerException if the specified value is null
2539 */
2540 public boolean contains(Object value) {
2541 return containsValue(value);
2542 }
2543
2544 /**
2545 * Maps the specified key to the specified value in this table.
2546 * Neither the key nor the value can be null.
2547 *
2548 * <p> The value can be retrieved by calling the {@code get} method
2549 * with a key that is equal to the original key.
2550 *
2551 * @param key key with which the specified value is to be associated
2552 * @param value value to be associated with the specified key
2553 * @return the previous value associated with {@code key}, or
2554 * {@code null} if there was no mapping for {@code key}
2555 * @throws NullPointerException if the specified key or value is null
2556 */
2557 @SuppressWarnings("unchecked")
2558 public V put(K key, V value) {
2559 if (key == null || value == null)
2560 throw new NullPointerException();
2561 return (V)internalPut(key, value);
2562 }
2563
2564 /**
2565 * {@inheritDoc}
2566 *
2567 * @return the previous value associated with the specified key,
2568 * or {@code null} if there was no mapping for the key
2569 * @throws NullPointerException if the specified key or value is null
2570 */
2571 @SuppressWarnings("unchecked")
2572 public V putIfAbsent(K key, V value) {
2573 if (key == null || value == null)
2574 throw new NullPointerException();
2575 return (V)internalPutIfAbsent(key, value);
2576 }
2577
2578 /**
2579 * Copies all of the mappings from the specified map to this one.
2580 * These mappings replace any mappings that this map had for any of the
2581 * keys currently in the specified map.
2582 *
2583 * @param m mappings to be stored in this map
2584 */
2585 public void putAll(Map<? extends K, ? extends V> m) {
2586 internalPutAll(m);
2587 }
2588
2589 /**
2590 * If the specified key is not already associated with a value,
2591 * computes its value using the given mappingFunction and enters
2592 * it into the map unless null. This is equivalent to
2593 * <pre> {@code
2594 * if (map.containsKey(key))
2595 * return map.get(key);
2596 * value = mappingFunction.apply(key);
2597 * if (value != null)
2598 * map.put(key, value);
2599 * return value;}</pre>
2600 *
2601 * except that the action is performed atomically. If the
2602 * function returns {@code null} no mapping is recorded. If the
2603 * function itself throws an (unchecked) exception, the exception
2604 * is rethrown to its caller, and no mapping is recorded. Some
2605 * attempted update operations on this map by other threads may be
2606 * blocked while computation is in progress, so the computation
2607 * should be short and simple, and must not attempt to update any
2608 * other mappings of this Map. The most appropriate usage is to
2609 * construct a new object serving as an initial mapped value, or
2610 * memoized result, as in:
2611 *
2612 * <pre> {@code
2613 * map.computeIfAbsent(key, new Fun<K, V>() {
2614 * public V map(K k) { return new Value(f(k)); }});}</pre>
2615 *
2616 * @param key key with which the specified value is to be associated
2617 * @param mappingFunction the function to compute a value
2618 * @return the current (existing or computed) value associated with
2619 * the specified key, or null if the computed value is null.
2620 * @throws NullPointerException if the specified key or mappingFunction
2621 * is null
2622 * @throws IllegalStateException if the computation detectably
2623 * attempts a recursive update to this map that would
2624 * otherwise never complete
2625 * @throws RuntimeException or Error if the mappingFunction does so,
2626 * in which case the mapping is left unestablished
2627 */
2628 @SuppressWarnings("unchecked")
2629 public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) {
2630 if (key == null || mappingFunction == null)
2631 throw new NullPointerException();
2632 return (V)internalComputeIfAbsent(key, mappingFunction);
2633 }
2634
2635 /**
2636 * If the given key is present, computes a new mapping value given a key and
2637 * its current mapped value. This is equivalent to
2638 * <pre> {@code
2639 * if (map.containsKey(key)) {
2640 * value = remappingFunction.apply(key, map.get(key));
2641 * if (value != null)
2642 * map.put(key, value);
2643 * else
2644 * map.remove(key);
2645 * }
2646 * }</pre>
2647 *
2648 * except that the action is performed atomically. If the
2649 * function returns {@code null}, the mapping is removed. If the
2650 * function itself throws an (unchecked) exception, the exception
2651 * is rethrown to its caller, and the current mapping is left
2652 * unchanged. Some attempted update operations on this map by
2653 * other threads may be blocked while computation is in progress,
2654 * so the computation should be short and simple, and must not
2655 * attempt to update any other mappings of this Map. For example,
2656 * to either create or append new messages to a value mapping:
2657 *
2658 * @param key key with which the specified value is to be associated
2659 * @param remappingFunction the function to compute a value
2660 * @return the new value associated with the specified key, or null if none
2661 * @throws NullPointerException if the specified key or remappingFunction
2662 * is null
2663 * @throws IllegalStateException if the computation detectably
2664 * attempts a recursive update to this map that would
2665 * otherwise never complete
2666 * @throws RuntimeException or Error if the remappingFunction does so,
2667 * in which case the mapping is unchanged
2668 */
2669 public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2670 if (key == null || remappingFunction == null)
2671 throw new NullPointerException();
2672 return (V)internalCompute(key, true, remappingFunction);
2673 }
2674
2675 /**
2676 * Computes a new mapping value given a key and
2677 * its current mapped value (or {@code null} if there is no current
2678 * mapping). This is equivalent to
2679 * <pre> {@code
2680 * value = remappingFunction.apply(key, map.get(key));
2681 * if (value != null)
2682 * map.put(key, value);
2683 * else
2684 * map.remove(key);
2685 * }</pre>
2686 *
2687 * except that the action is performed atomically. If the
2688 * function returns {@code null}, the mapping is removed. If the
2689 * function itself throws an (unchecked) exception, the exception
2690 * is rethrown to its caller, and the current mapping is left
2691 * unchanged. Some attempted update operations on this map by
2692 * other threads may be blocked while computation is in progress,
2693 * so the computation should be short and simple, and must not
2694 * attempt to update any other mappings of this Map. For example,
2695 * to either create or append new messages to a value mapping:
2696 *
2697 * <pre> {@code
2698 * Map<Key, String> map = ...;
2699 * final String msg = ...;
2700 * map.compute(key, new BiFun<Key, String, String>() {
2701 * public String apply(Key k, String v) {
2702 * return (v == null) ? msg : v + msg;});}}</pre>
2703 *
2704 * @param key key with which the specified value is to be associated
2705 * @param remappingFunction the function to compute a value
2706 * @return the new value associated with the specified key, or null if none
2707 * @throws NullPointerException if the specified key or remappingFunction
2708 * is null
2709 * @throws IllegalStateException if the computation detectably
2710 * attempts a recursive update to this map that would
2711 * otherwise never complete
2712 * @throws RuntimeException or Error if the remappingFunction does so,
2713 * in which case the mapping is unchanged
2714 */
2715 // @SuppressWarnings("unchecked")
2716 public V compute(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2717 if (key == null || remappingFunction == null)
2718 throw new NullPointerException();
2719 return (V)internalCompute(key, false, remappingFunction);
2720 }
2721
2722 /**
2723 * If the specified key is not already associated
2724 * with a value, associate it with the given value.
2725 * Otherwise, replace the value with the results of
2726 * the given remapping function. This is equivalent to:
2727 * <pre> {@code
2728 * if (!map.containsKey(key))
2729 * map.put(value);
2730 * else {
2731 * newValue = remappingFunction.apply(map.get(key), value);
2732 * if (value != null)
2733 * map.put(key, value);
2734 * else
2735 * map.remove(key);
2736 * }
2737 * }</pre>
2738 * except that the action is performed atomically. If the
2739 * function returns {@code null}, the mapping is removed. If the
2740 * function itself throws an (unchecked) exception, the exception
2741 * is rethrown to its caller, and the current mapping is left
2742 * unchanged. Some attempted update operations on this map by
2743 * other threads may be blocked while computation is in progress,
2744 * so the computation should be short and simple, and must not
2745 * attempt to update any other mappings of this Map.
2746 */
2747 // @SuppressWarnings("unchecked")
2748 public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2749 if (key == null || value == null || remappingFunction == null)
2750 throw new NullPointerException();
2751 return (V)internalMerge(key, value, remappingFunction);
2752 }
2753
2754 /**
2755 * Removes the key (and its corresponding value) from this map.
2756 * This method does nothing if the key is not in the map.
2757 *
2758 * @param key the key that needs to be removed
2759 * @return the previous value associated with {@code key}, or
2760 * {@code null} if there was no mapping for {@code key}
2761 * @throws NullPointerException if the specified key is null
2762 */
2763 @SuppressWarnings("unchecked")
2764 public V remove(Object key) {
2765 if (key == null)
2766 throw new NullPointerException();
2767 return (V)internalReplace(key, null, null);
2768 }
2769
2770 /**
2771 * {@inheritDoc}
2772 *
2773 * @throws NullPointerException if the specified key is null
2774 */
2775 public boolean remove(Object key, Object value) {
2776 if (key == null)
2777 throw new NullPointerException();
2778 if (value == null)
2779 return false;
2780 return internalReplace(key, null, value) != null;
2781 }
2782
2783 /**
2784 * {@inheritDoc}
2785 *
2786 * @throws NullPointerException if any of the arguments are null
2787 */
2788 public boolean replace(K key, V oldValue, V newValue) {
2789 if (key == null || oldValue == null || newValue == null)
2790 throw new NullPointerException();
2791 return internalReplace(key, newValue, oldValue) != null;
2792 }
2793
2794 /**
2795 * {@inheritDoc}
2796 *
2797 * @return the previous value associated with the specified key,
2798 * or {@code null} if there was no mapping for the key
2799 * @throws NullPointerException if the specified key or value is null
2800 */
2801 @SuppressWarnings("unchecked")
2802 public V replace(K key, V value) {
2803 if (key == null || value == null)
2804 throw new NullPointerException();
2805 return (V)internalReplace(key, value, null);
2806 }
2807
2808 /**
2809 * Removes all of the mappings from this map.
2810 */
2811 public void clear() {
2812 internalClear();
2813 }
2814
2815 /**
2816 * Returns a {@link Set} view of the keys contained in this map.
2817 * The set is backed by the map, so changes to the map are
2818 * reflected in the set, and vice-versa. The set supports element
2819 * removal, which removes the corresponding mapping from this map,
2820 * via the {@code Iterator.remove}, {@code Set.remove},
2821 * {@code removeAll}, {@code retainAll}, and {@code clear}
2822 * operations. It does not support the {@code add} or
2823 * {@code addAll} operations.
2824 *
2825 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2826 * that will never throw {@link ConcurrentModificationException},
2827 * and guarantees to traverse elements as they existed upon
2828 * construction of the iterator, and may (but is not guaranteed to)
2829 * reflect any modifications subsequent to construction.
2830 */
2831 public Set<K> keySet() {
2832 KeySet<K,V> ks = keySet;
2833 return (ks != null) ? ks : (keySet = new KeySet<K,V>(this));
2834 }
2835
2836 /**
2837 * Returns a {@link Collection} view of the values contained in this map.
2838 * The collection is backed by the map, so changes to the map are
2839 * reflected in the collection, and vice-versa. The collection
2840 * supports element removal, which removes the corresponding
2841 * mapping from this map, via the {@code Iterator.remove},
2842 * {@code Collection.remove}, {@code removeAll},
2843 * {@code retainAll}, and {@code clear} operations. It does not
2844 * support the {@code add} or {@code addAll} operations.
2845 *
2846 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2847 * that will never throw {@link ConcurrentModificationException},
2848 * and guarantees to traverse elements as they existed upon
2849 * construction of the iterator, and may (but is not guaranteed to)
2850 * reflect any modifications subsequent to construction.
2851 */
2852 public Collection<V> values() {
2853 Values<K,V> vs = values;
2854 return (vs != null) ? vs : (values = new Values<K,V>(this));
2855 }
2856
2857 /**
2858 * Returns a {@link Set} view of the mappings contained in this map.
2859 * The set is backed by the map, so changes to the map are
2860 * reflected in the set, and vice-versa. The set supports element
2861 * removal, which removes the corresponding mapping from the map,
2862 * via the {@code Iterator.remove}, {@code Set.remove},
2863 * {@code removeAll}, {@code retainAll}, and {@code clear}
2864 * operations. It does not support the {@code add} or
2865 * {@code addAll} operations.
2866 *
2867 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2868 * that will never throw {@link ConcurrentModificationException},
2869 * and guarantees to traverse elements as they existed upon
2870 * construction of the iterator, and may (but is not guaranteed to)
2871 * reflect any modifications subsequent to construction.
2872 */
2873 public Set<Map.Entry<K,V>> entrySet() {
2874 EntrySet<K,V> es = entrySet;
2875 return (es != null) ? es : (entrySet = new EntrySet<K,V>(this));
2876 }
2877
2878 /**
2879 * Returns an enumeration of the keys in this table.
2880 *
2881 * @return an enumeration of the keys in this table
2882 * @see #keySet()
2883 */
2884 public Enumeration<K> keys() {
2885 return new KeyIterator<K,V>(this);
2886 }
2887
2888 /**
2889 * Returns an enumeration of the values in this table.
2890 *
2891 * @return an enumeration of the values in this table
2892 * @see #values()
2893 */
2894 public Enumeration<V> elements() {
2895 return new ValueIterator<K,V>(this);
2896 }
2897
2898 /**
2899 * Returns a partitionable iterator of the keys in this map.
2900 *
2901 * @return a partitionable iterator of the keys in this map
2902 */
2903 public Spliterator<K> keySpliterator() {
2904 return new KeyIterator<K,V>(this);
2905 }
2906
2907 /**
2908 * Returns a partitionable iterator of the values in this map.
2909 *
2910 * @return a partitionable iterator of the values in this map
2911 */
2912 public Spliterator<V> valueSpliterator() {
2913 return new ValueIterator<K,V>(this);
2914 }
2915
2916 /**
2917 * Returns a partitionable iterator of the entries in this map.
2918 *
2919 * @return a partitionable iterator of the entries in this map
2920 */
2921 public Spliterator<Map.Entry<K,V>> entrySpliterator() {
2922 return new EntryIterator<K,V>(this);
2923 }
2924
2925 /**
2926 * Returns the hash code value for this {@link Map}, i.e.,
2927 * the sum of, for each key-value pair in the map,
2928 * {@code key.hashCode() ^ value.hashCode()}.
2929 *
2930 * @return the hash code value for this map
2931 */
2932 public int hashCode() {
2933 int h = 0;
2934 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2935 Object v;
2936 while ((v = it.advance()) != null) {
2937 h += it.nextKey.hashCode() ^ v.hashCode();
2938 }
2939 return h;
2940 }
2941
2942 /**
2943 * Returns a string representation of this map. The string
2944 * representation consists of a list of key-value mappings (in no
2945 * particular order) enclosed in braces ("{@code {}}"). Adjacent
2946 * mappings are separated by the characters {@code ", "} (comma
2947 * and space). Each key-value mapping is rendered as the key
2948 * followed by an equals sign ("{@code =}") followed by the
2949 * associated value.
2950 *
2951 * @return a string representation of this map
2952 */
2953 public String toString() {
2954 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2955 StringBuilder sb = new StringBuilder();
2956 sb.append('{');
2957 Object v;
2958 if ((v = it.advance()) != null) {
2959 for (;;) {
2960 Object k = it.nextKey;
2961 sb.append(k == this ? "(this Map)" : k);
2962 sb.append('=');
2963 sb.append(v == this ? "(this Map)" : v);
2964 if ((v = it.advance()) == null)
2965 break;
2966 sb.append(',').append(' ');
2967 }
2968 }
2969 return sb.append('}').toString();
2970 }
2971
2972 /**
2973 * Compares the specified object with this map for equality.
2974 * Returns {@code true} if the given object is a map with the same
2975 * mappings as this map. This operation may return misleading
2976 * results if either map is concurrently modified during execution
2977 * of this method.
2978 *
2979 * @param o object to be compared for equality with this map
2980 * @return {@code true} if the specified object is equal to this map
2981 */
2982 public boolean equals(Object o) {
2983 if (o != this) {
2984 if (!(o instanceof Map))
2985 return false;
2986 Map<?,?> m = (Map<?,?>) o;
2987 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2988 Object val;
2989 while ((val = it.advance()) != null) {
2990 Object v = m.get(it.nextKey);
2991 if (v == null || (v != val && !v.equals(val)))
2992 return false;
2993 }
2994 for (Map.Entry<?,?> e : m.entrySet()) {
2995 Object mk, mv, v;
2996 if ((mk = e.getKey()) == null ||
2997 (mv = e.getValue()) == null ||
2998 (v = internalGet(mk)) == null ||
2999 (mv != v && !mv.equals(v)))
3000 return false;
3001 }
3002 }
3003 return true;
3004 }
3005
3006 /* ----------------Iterators -------------- */
3007
3008 static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3009 implements Spliterator<K>, Enumeration<K> {
3010 KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3011 KeyIterator(Traverser<K,V,Object> it, boolean split) {
3012 super(it, split);
3013 }
3014 public KeyIterator<K,V> split() {
3015 if (last != null || (next != null && nextVal == null))
3016 throw new IllegalStateException();
3017 return new KeyIterator<K,V>(this, true);
3018 }
3019 @SuppressWarnings("unchecked")
3020 public final K next() {
3021 if (nextVal == null && advance() == null)
3022 throw new NoSuchElementException();
3023 Object k = nextKey;
3024 nextVal = null;
3025 return (K) k;
3026 }
3027
3028 public final K nextElement() { return next(); }
3029 }
3030
3031 static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3032 implements Spliterator<V>, Enumeration<V> {
3033 ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3034 ValueIterator(Traverser<K,V,Object> it, boolean split) {
3035 super(it, split);
3036 }
3037 public ValueIterator<K,V> split() {
3038 if (last != null || (next != null && nextVal == null))
3039 throw new IllegalStateException();
3040 return new ValueIterator<K,V>(this, true);
3041 }
3042
3043 @SuppressWarnings("unchecked")
3044 public final V next() {
3045 Object v;
3046 if ((v = nextVal) == null && (v = advance()) == null)
3047 throw new NoSuchElementException();
3048 nextVal = null;
3049 return (V) v;
3050 }
3051
3052 public final V nextElement() { return next(); }
3053 }
3054
3055 static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3056 implements Spliterator<Map.Entry<K,V>> {
3057 EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3058 EntryIterator(Traverser<K,V,Object> it, boolean split) {
3059 super(it, split);
3060 }
3061 public EntryIterator<K,V> split() {
3062 if (last != null || (next != null && nextVal == null))
3063 throw new IllegalStateException();
3064 return new EntryIterator<K,V>(this, true);
3065 }
3066
3067 @SuppressWarnings("unchecked")
3068 public final Map.Entry<K,V> next() {
3069 Object v;
3070 if ((v = nextVal) == null && (v = advance()) == null)
3071 throw new NoSuchElementException();
3072 Object k = nextKey;
3073 nextVal = null;
3074 return new MapEntry<K,V>((K)k, (V)v, map);
3075 }
3076 }
3077
3078 /**
3079 * Exported Entry for iterators
3080 */
3081 static final class MapEntry<K,V> implements Map.Entry<K, V> {
3082 final K key; // non-null
3083 V val; // non-null
3084 final ConcurrentHashMapV8<K, V> map;
3085 MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) {
3086 this.key = key;
3087 this.val = val;
3088 this.map = map;
3089 }
3090 public final K getKey() { return key; }
3091 public final V getValue() { return val; }
3092 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3093 public final String toString(){ return key + "=" + val; }
3094
3095 public final boolean equals(Object o) {
3096 Object k, v; Map.Entry<?,?> e;
3097 return ((o instanceof Map.Entry) &&
3098 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3099 (v = e.getValue()) != null &&
3100 (k == key || k.equals(key)) &&
3101 (v == val || v.equals(val)));
3102 }
3103
3104 /**
3105 * Sets our entry's value and writes through to the map. The
3106 * value to return is somewhat arbitrary here. Since we do not
3107 * necessarily track asynchronous changes, the most recent
3108 * "previous" value could be different from what we return (or
3109 * could even have been removed in which case the put will
3110 * re-establish). We do not and cannot guarantee more.
3111 */
3112 public final V setValue(V value) {
3113 if (value == null) throw new NullPointerException();
3114 V v = val;
3115 val = value;
3116 map.put(key, value);
3117 return v;
3118 }
3119 }
3120
3121 /* ----------------Views -------------- */
3122
3123 /**
3124 * Base class for views.
3125 */
3126 static abstract class CHMView<K, V> {
3127 final ConcurrentHashMapV8<K, V> map;
3128 CHMView(ConcurrentHashMapV8<K, V> map) { this.map = map; }
3129 public final int size() { return map.size(); }
3130 public final boolean isEmpty() { return map.isEmpty(); }
3131 public final void clear() { map.clear(); }
3132
3133 // implementations below rely on concrete classes supplying these
3134 abstract public Iterator<?> iterator();
3135 abstract public boolean contains(Object o);
3136 abstract public boolean remove(Object o);
3137
3138 private static final String oomeMsg = "Required array size too large";
3139
3140 public final Object[] toArray() {
3141 long sz = map.mappingCount();
3142 if (sz > (long)(MAX_ARRAY_SIZE))
3143 throw new OutOfMemoryError(oomeMsg);
3144 int n = (int)sz;
3145 Object[] r = new Object[n];
3146 int i = 0;
3147 Iterator<?> it = iterator();
3148 while (it.hasNext()) {
3149 if (i == n) {
3150 if (n >= MAX_ARRAY_SIZE)
3151 throw new OutOfMemoryError(oomeMsg);
3152 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3153 n = MAX_ARRAY_SIZE;
3154 else
3155 n += (n >>> 1) + 1;
3156 r = Arrays.copyOf(r, n);
3157 }
3158 r[i++] = it.next();
3159 }
3160 return (i == n) ? r : Arrays.copyOf(r, i);
3161 }
3162
3163 @SuppressWarnings("unchecked")
3164 public final <T> T[] toArray(T[] a) {
3165 long sz = map.mappingCount();
3166 if (sz > (long)(MAX_ARRAY_SIZE))
3167 throw new OutOfMemoryError(oomeMsg);
3168 int m = (int)sz;
3169 T[] r = (a.length >= m) ? a :
3170 (T[])java.lang.reflect.Array
3171 .newInstance(a.getClass().getComponentType(), m);
3172 int n = r.length;
3173 int i = 0;
3174 Iterator<?> it = iterator();
3175 while (it.hasNext()) {
3176 if (i == n) {
3177 if (n >= MAX_ARRAY_SIZE)
3178 throw new OutOfMemoryError(oomeMsg);
3179 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3180 n = MAX_ARRAY_SIZE;
3181 else
3182 n += (n >>> 1) + 1;
3183 r = Arrays.copyOf(r, n);
3184 }
3185 r[i++] = (T)it.next();
3186 }
3187 if (a == r && i < n) {
3188 r[i] = null; // null-terminate
3189 return r;
3190 }
3191 return (i == n) ? r : Arrays.copyOf(r, i);
3192 }
3193
3194 public final int hashCode() {
3195 int h = 0;
3196 for (Iterator<?> it = iterator(); it.hasNext();)
3197 h += it.next().hashCode();
3198 return h;
3199 }
3200
3201 public final String toString() {
3202 StringBuilder sb = new StringBuilder();
3203 sb.append('[');
3204 Iterator<?> it = iterator();
3205 if (it.hasNext()) {
3206 for (;;) {
3207 Object e = it.next();
3208 sb.append(e == this ? "(this Collection)" : e);
3209 if (!it.hasNext())
3210 break;
3211 sb.append(',').append(' ');
3212 }
3213 }
3214 return sb.append(']').toString();
3215 }
3216
3217 public final boolean containsAll(Collection<?> c) {
3218 if (c != this) {
3219 for (Iterator<?> it = c.iterator(); it.hasNext();) {
3220 Object e = it.next();
3221 if (e == null || !contains(e))
3222 return false;
3223 }
3224 }
3225 return true;
3226 }
3227
3228 public final boolean removeAll(Collection<?> c) {
3229 boolean modified = false;
3230 for (Iterator<?> it = iterator(); it.hasNext();) {
3231 if (c.contains(it.next())) {
3232 it.remove();
3233 modified = true;
3234 }
3235 }
3236 return modified;
3237 }
3238
3239 public final boolean retainAll(Collection<?> c) {
3240 boolean modified = false;
3241 for (Iterator<?> it = iterator(); it.hasNext();) {
3242 if (!c.contains(it.next())) {
3243 it.remove();
3244 modified = true;
3245 }
3246 }
3247 return modified;
3248 }
3249
3250 }
3251
3252 static final class KeySet<K,V> extends CHMView<K,V> implements Set<K> {
3253 KeySet(ConcurrentHashMapV8<K, V> map) {
3254 super(map);
3255 }
3256 public final boolean contains(Object o) { return map.containsKey(o); }
3257 public final boolean remove(Object o) { return map.remove(o) != null; }
3258 public final Iterator<K> iterator() {
3259 return new KeyIterator<K,V>(map);
3260 }
3261 public final boolean add(K e) {
3262 throw new UnsupportedOperationException();
3263 }
3264 public final boolean addAll(Collection<? extends K> c) {
3265 throw new UnsupportedOperationException();
3266 }
3267 public boolean equals(Object o) {
3268 Set<?> c;
3269 return ((o instanceof Set) &&
3270 ((c = (Set<?>)o) == this ||
3271 (containsAll(c) && c.containsAll(this))));
3272 }
3273 }
3274
3275
3276 static final class Values<K,V> extends CHMView<K,V>
3277 implements Collection<V> {
3278 Values(ConcurrentHashMapV8<K, V> map) { super(map); }
3279 public final boolean contains(Object o) { return map.containsValue(o); }
3280 public final boolean remove(Object o) {
3281 if (o != null) {
3282 Iterator<V> it = new ValueIterator<K,V>(map);
3283 while (it.hasNext()) {
3284 if (o.equals(it.next())) {
3285 it.remove();
3286 return true;
3287 }
3288 }
3289 }
3290 return false;
3291 }
3292 public final Iterator<V> iterator() {
3293 return new ValueIterator<K,V>(map);
3294 }
3295 public final boolean add(V e) {
3296 throw new UnsupportedOperationException();
3297 }
3298 public final boolean addAll(Collection<? extends V> c) {
3299 throw new UnsupportedOperationException();
3300 }
3301
3302 }
3303
3304 static final class EntrySet<K,V> extends CHMView<K,V>
3305 implements Set<Map.Entry<K,V>> {
3306 EntrySet(ConcurrentHashMapV8<K, V> map) { super(map); }
3307 public final boolean contains(Object o) {
3308 Object k, v, r; Map.Entry<?,?> e;
3309 return ((o instanceof Map.Entry) &&
3310 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3311 (r = map.get(k)) != null &&
3312 (v = e.getValue()) != null &&
3313 (v == r || v.equals(r)));
3314 }
3315 public final boolean remove(Object o) {
3316 Object k, v; Map.Entry<?,?> e;
3317 return ((o instanceof Map.Entry) &&
3318 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3319 (v = e.getValue()) != null &&
3320 map.remove(k, v));
3321 }
3322 public final Iterator<Map.Entry<K,V>> iterator() {
3323 return new EntryIterator<K,V>(map);
3324 }
3325 public final boolean add(Entry<K,V> e) {
3326 throw new UnsupportedOperationException();
3327 }
3328 public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3329 throw new UnsupportedOperationException();
3330 }
3331 public boolean equals(Object o) {
3332 Set<?> c;
3333 return ((o instanceof Set) &&
3334 ((c = (Set<?>)o) == this ||
3335 (containsAll(c) && c.containsAll(this))));
3336 }
3337 }
3338
3339 /* ---------------- Serialization Support -------------- */
3340
3341 /**
3342 * Stripped-down version of helper class used in previous version,
3343 * declared for the sake of serialization compatibility
3344 */
3345 static class Segment<K,V> implements Serializable {
3346 private static final long serialVersionUID = 2249069246763182397L;
3347 final float loadFactor;
3348 Segment(float lf) { this.loadFactor = lf; }
3349 }
3350
3351 /**
3352 * Saves the state of the {@code ConcurrentHashMapV8} instance to a
3353 * stream (i.e., serializes it).
3354 * @param s the stream
3355 * @serialData
3356 * the key (Object) and value (Object)
3357 * for each key-value mapping, followed by a null pair.
3358 * The key-value mappings are emitted in no particular order.
3359 */
3360 @SuppressWarnings("unchecked")
3361 private void writeObject(java.io.ObjectOutputStream s)
3362 throws java.io.IOException {
3363 if (segments == null) { // for serialization compatibility
3364 segments = (Segment<K,V>[])
3365 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3366 for (int i = 0; i < segments.length; ++i)
3367 segments[i] = new Segment<K,V>(LOAD_FACTOR);
3368 }
3369 s.defaultWriteObject();
3370 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3371 Object v;
3372 while ((v = it.advance()) != null) {
3373 s.writeObject(it.nextKey);
3374 s.writeObject(v);
3375 }
3376 s.writeObject(null);
3377 s.writeObject(null);
3378 segments = null; // throw away
3379 }
3380
3381 /**
3382 * Reconstitutes the instance from a stream (that is, deserializes it).
3383 * @param s the stream
3384 */
3385 @SuppressWarnings("unchecked")
3386 private void readObject(java.io.ObjectInputStream s)
3387 throws java.io.IOException, ClassNotFoundException {
3388 s.defaultReadObject();
3389 this.segments = null; // unneeded
3390 // initialize transient final field
3391 UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3392
3393 // Create all nodes, then place in table once size is known
3394 long size = 0L;
3395 Node p = null;
3396 for (;;) {
3397 K k = (K) s.readObject();
3398 V v = (V) s.readObject();
3399 if (k != null && v != null) {
3400 int h = spread(k.hashCode());
3401 p = new Node(h, k, v, p);
3402 ++size;
3403 }
3404 else
3405 break;
3406 }
3407 if (p != null) {
3408 boolean init = false;
3409 int n;
3410 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3411 n = MAXIMUM_CAPACITY;
3412 else {
3413 int sz = (int)size;
3414 n = tableSizeFor(sz + (sz >>> 1) + 1);
3415 }
3416 int sc = sizeCtl;
3417 boolean collide = false;
3418 if (n > sc &&
3419 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3420 try {
3421 if (table == null) {
3422 init = true;
3423 Node[] tab = new Node[n];
3424 int mask = n - 1;
3425 while (p != null) {
3426 int j = p.hash & mask;
3427 Node next = p.next;
3428 Node q = p.next = tabAt(tab, j);
3429 setTabAt(tab, j, p);
3430 if (!collide && q != null && q.hash == p.hash)
3431 collide = true;
3432 p = next;
3433 }
3434 table = tab;
3435 counter.add(size);
3436 sc = n - (n >>> 2);
3437 }
3438 } finally {
3439 sizeCtl = sc;
3440 }
3441 if (collide) { // rescan and convert to TreeBins
3442 Node[] tab = table;
3443 for (int i = 0; i < tab.length; ++i) {
3444 int c = 0;
3445 for (Node e = tabAt(tab, i); e != null; e = e.next) {
3446 if (++c > TREE_THRESHOLD &&
3447 (e.key instanceof Comparable)) {
3448 replaceWithTreeBin(tab, i, e.key);
3449 break;
3450 }
3451 }
3452 }
3453 }
3454 }
3455 if (!init) { // Can only happen if unsafely published.
3456 while (p != null) {
3457 internalPut(p.key, p.val);
3458 p = p.next;
3459 }
3460 }
3461 }
3462 }
3463
3464
3465 // -------------------------------------------------------
3466
3467 // Sams
3468 /** Interface describing a void action of one argument */
3469 public interface Action<A> { void apply(A a); }
3470 /** Interface describing a void action of two arguments */
3471 public interface BiAction<A,B> { void apply(A a, B b); }
3472 /** Interface describing a function of one argument */
3473 public interface Fun<A,T> { T apply(A a); }
3474 /** Interface describing a function of two arguments */
3475 public interface BiFun<A,B,T> { T apply(A a, B b); }
3476 /** Interface describing a function of no arguments */
3477 public interface Generator<T> { T apply(); }
3478 /** Interface describing a function mapping its argument to a double */
3479 public interface ObjectToDouble<A> { double apply(A a); }
3480 /** Interface describing a function mapping its argument to a long */
3481 public interface ObjectToLong<A> { long apply(A a); }
3482 /** Interface describing a function mapping its argument to an int */
3483 public interface ObjectToInt<A> {int apply(A a); }
3484 /** Interface describing a function mapping two arguments to a double */
3485 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3486 /** Interface describing a function mapping two arguments to a long */
3487 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3488 /** Interface describing a function mapping two arguments to an int */
3489 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3490 /** Interface describing a function mapping a double to a double */
3491 public interface DoubleToDouble { double apply(double a); }
3492 /** Interface describing a function mapping a long to a long */
3493 public interface LongToLong { long apply(long a); }
3494 /** Interface describing a function mapping an int to an int */
3495 public interface IntToInt { int apply(int a); }
3496 /** Interface describing a function mapping two doubles to a double */
3497 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3498 /** Interface describing a function mapping two longs to a long */
3499 public interface LongByLongToLong { long apply(long a, long b); }
3500 /** Interface describing a function mapping two ints to an int */
3501 public interface IntByIntToInt { int apply(int a, int b); }
3502
3503
3504 // -------------------------------------------------------
3505
3506 /**
3507 * Returns an extended {@link Parallel} view of this map using the
3508 * given executor for bulk parallel operations.
3509 *
3510 * @param executor the executor
3511 * @return a parallel view
3512 */
3513 public Parallel parallel(ForkJoinPool executor) {
3514 return new Parallel(executor);
3515 }
3516
3517 /**
3518 * An extended view of a ConcurrentHashMap supporting bulk
3519 * parallel operations. These operations are designed to be
3520 * safely, and often sensibly, applied even with maps that are
3521 * being concurrently updated by other threads; for example, when
3522 * computing a snapshot summary of the values in a shared
3523 * registry. There are three kinds of operation, each with four
3524 * forms, accepting functions with Keys, Values, Entries, and
3525 * (Key, Value) arguments and/or return values. Because the
3526 * elements of a ConcurrentHashMap are not ordered in any
3527 * particular way, and may be processed in different orders in
3528 * different parallel executions, the correctness of supplied
3529 * functions should not depend on any ordering, or on any other
3530 * objects or values that may transiently change while computation
3531 * is in progress; and except for forEach actions, should ideally
3532 * be side-effect-free.
3533 *
3534 * <ul>
3535 * <li> forEach: Perform a given action on each element.
3536 * A variant form applies a given transformation on each element
3537 * before performing the action.</li>
3538 *
3539 * <li> search: Return the first available non-null result of
3540 * applying a given function on each element; skipping further
3541 * search when a result is found.</li>
3542 *
3543 * <li> reduce: Accumulate each element. The supplied reduction
3544 * function cannot rely on ordering (more formally, it should be
3545 * both associative and commutative). There are five variants:
3546 *
3547 * <ul>
3548 *
3549 * <li> Plain reductions. (There is not a form of this method for
3550 * (key, value) function arguments since there is no corresponding
3551 * return type.)</li>
3552 *
3553 * <li> Mapped reductions that accumulate the results of a given
3554 * function applied to each element.</li>
3555 *
3556 * <li> Reductions to scalar doubles, longs, and ints, using a
3557 * given basis value.</li>
3558 *
3559 * </li>
3560 * </ul>
3561 * </ul>
3562 *
3563 * <p>The concurrency properties of the bulk operations follow
3564 * from those of ConcurrentHashMap: Any non-null result returned
3565 * from {@code get(key)} and related access methods bears a
3566 * happens-before relation with the associated insertion or
3567 * update. The result of any bulk operation reflects the
3568 * composition of these per-element relations (but is not
3569 * necessarily atomic with respect to the map as a whole unless it
3570 * is somehow known to be quiescent). Conversely, because keys
3571 * and values in the map are never null, null serves as a reliable
3572 * atomic indicator of the current lack of any result. To
3573 * maintain this property, null serves as an implicit basis for
3574 * all non-scalar reduction operations. For the double, long, and
3575 * int versions, the basis should be one that, when combined with
3576 * any other value, returns that other value (more formally, it
3577 * should be the identity element for the reduction). Most common
3578 * reductions have these properties; for example, computing a sum
3579 * with basis 0 or a minimum with basis MAX_VALUE.
3580 *
3581 * <p>Search and transformation functions provided as arguments
3582 * should similarly return null to indicate the lack of any result
3583 * (in which case it is not used). In the case of mapped
3584 * reductions, this also enables transformations to serve as
3585 * filters, returning null (or, in the case of primitive
3586 * specializations, the identity basis) if the element should not
3587 * be combined. You can create compound transformations and
3588 * filterings by composing them yourself under this "null means
3589 * there is nothing there now" rule before using them in search or
3590 * reduce operations.
3591 *
3592 * <p>Methods accepting and/or returning Entry arguments maintain
3593 * key-value associations. They may be useful for example when
3594 * finding the key for the greatest value. Note that "plain" Entry
3595 * arguments can be supplied using {@code new
3596 * AbstractMap.SimpleEntry(k,v)}.
3597 *
3598 * <p> Bulk operations may complete abruptly, throwing an
3599 * exception encountered in the application of a supplied
3600 * function. Bear in mind when handling such exceptions that other
3601 * concurrently executing functions could also have thrown
3602 * exceptions, or would have done so if the first exception had
3603 * not occurred.
3604 *
3605 * <p>Parallel speedups compared to sequential processing are
3606 * common but not guaranteed. Operations involving brief
3607 * functions on small maps may execute more slowly than sequential
3608 * loops if the underlying work to parallelize the computation is
3609 * more expensive than the computation itself. Similarly,
3610 * parallelization may not lead to much actual parallelism if all
3611 * processors are busy performing unrelated tasks.
3612 *
3613 * <p> All arguments to all task methods must be non-null.
3614 *
3615 * <p><em>jsr166e note: During transition, this class
3616 * uses nested functional interfaces with different names but the
3617 * same forms as those expected for JDK8.<em>
3618 */
3619 public class Parallel {
3620 final ForkJoinPool fjp;
3621
3622 /**
3623 * Returns an extended view of this map using the given
3624 * executor for bulk parallel operations.
3625 *
3626 * @param executor the executor
3627 */
3628 public Parallel(ForkJoinPool executor) {
3629 this.fjp = executor;
3630 }
3631
3632 /**
3633 * Performs the given action for each (key, value).
3634 *
3635 * @param action the action
3636 */
3637 public void forEach(BiAction<K,V> action) {
3638 fjp.invoke(ForkJoinTasks.forEach
3639 (ConcurrentHashMapV8.this, action));
3640 }
3641
3642 /**
3643 * Performs the given action for each non-null transformation
3644 * of each (key, value).
3645 *
3646 * @param transformer a function returning the transformation
3647 * for an element, or null of there is no transformation (in
3648 * which case the action is not applied).
3649 * @param action the action
3650 */
3651 public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3652 Action<U> action) {
3653 fjp.invoke(ForkJoinTasks.forEach
3654 (ConcurrentHashMapV8.this, transformer, action));
3655 }
3656
3657 /**
3658 * Returns a non-null result from applying the given search
3659 * function on each (key, value), or null if none. Further
3660 * element processing is suppressed upon success. However,
3661 * this method does not return until other in-progress
3662 * parallel invocations of the search function also complete.
3663 *
3664 * @param searchFunction a function returning a non-null
3665 * result on success, else null
3666 * @return a non-null result from applying the given search
3667 * function on each (key, value), or null if none
3668 */
3669 public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3670 return fjp.invoke(ForkJoinTasks.search
3671 (ConcurrentHashMapV8.this, searchFunction));
3672 }
3673
3674 /**
3675 * Returns the result of accumulating the given transformation
3676 * of all (key, value) pairs using the given reducer to
3677 * combine values, or null if none.
3678 *
3679 * @param transformer a function returning the transformation
3680 * for an element, or null of there is no transformation (in
3681 * which case it is not combined).
3682 * @param reducer a commutative associative combining function
3683 * @return the result of accumulating the given transformation
3684 * of all (key, value) pairs
3685 */
3686 public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3687 BiFun<? super U, ? super U, ? extends U> reducer) {
3688 return fjp.invoke(ForkJoinTasks.reduce
3689 (ConcurrentHashMapV8.this, transformer, reducer));
3690 }
3691
3692 /**
3693 * Returns the result of accumulating the given transformation
3694 * of all (key, value) pairs using the given reducer to
3695 * combine values, and the given basis as an identity value.
3696 *
3697 * @param transformer a function returning the transformation
3698 * for an element
3699 * @param basis the identity (initial default value) for the reduction
3700 * @param reducer a commutative associative combining function
3701 * @return the result of accumulating the given transformation
3702 * of all (key, value) pairs
3703 */
3704 public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3705 double basis,
3706 DoubleByDoubleToDouble reducer) {
3707 return fjp.invoke(ForkJoinTasks.reduceToDouble
3708 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3709 }
3710
3711 /**
3712 * Returns the result of accumulating the given transformation
3713 * of all (key, value) pairs using the given reducer to
3714 * combine values, and the given basis as an identity value.
3715 *
3716 * @param transformer a function returning the transformation
3717 * for an element
3718 * @param basis the identity (initial default value) for the reduction
3719 * @param reducer a commutative associative combining function
3720 * @return the result of accumulating the given transformation
3721 * of all (key, value) pairs using the given reducer to
3722 * combine values, and the given basis as an identity value.
3723 */
3724 public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3725 long basis,
3726 LongByLongToLong reducer) {
3727 return fjp.invoke(ForkJoinTasks.reduceToLong
3728 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3729 }
3730
3731 /**
3732 * Returns the result of accumulating the given transformation
3733 * of all (key, value) pairs using the given reducer to
3734 * combine values, and the given basis as an identity value.
3735 *
3736 * @param transformer a function returning the transformation
3737 * for an element
3738 * @param basis the identity (initial default value) for the reduction
3739 * @param reducer a commutative associative combining function
3740 * @return the result of accumulating the given transformation
3741 * of all (key, value) pairs
3742 */
3743 public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3744 int basis,
3745 IntByIntToInt reducer) {
3746 return fjp.invoke(ForkJoinTasks.reduceToInt
3747 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3748 }
3749
3750 /**
3751 * Performs the given action for each key.
3752 *
3753 * @param action the action
3754 */
3755 public void forEachKey(Action<K> action) {
3756 fjp.invoke(ForkJoinTasks.forEachKey
3757 (ConcurrentHashMapV8.this, action));
3758 }
3759
3760 /**
3761 * Performs the given action for each non-null transformation
3762 * of each key.
3763 *
3764 * @param transformer a function returning the transformation
3765 * for an element, or null of there is no transformation (in
3766 * which case the action is not applied).
3767 * @param action the action
3768 */
3769 public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3770 Action<U> action) {
3771 fjp.invoke(ForkJoinTasks.forEachKey
3772 (ConcurrentHashMapV8.this, transformer, action));
3773 }
3774
3775 /**
3776 * Returns a non-null result from applying the given search
3777 * function on each key, or null if none. Further element
3778 * processing is suppressed upon success. However, this method
3779 * does not return until other in-progress parallel
3780 * invocations of the search function also complete.
3781 *
3782 * @param searchFunction a function returning a non-null
3783 * result on success, else null
3784 * @return a non-null result from applying the given search
3785 * function on each key, or null if none
3786 */
3787 public <U> U searchKeys(Fun<? super K, ? extends U> searchFunction) {
3788 return fjp.invoke(ForkJoinTasks.searchKeys
3789 (ConcurrentHashMapV8.this, searchFunction));
3790 }
3791
3792 /**
3793 * Returns the result of accumulating all keys using the given
3794 * reducer to combine values, or null if none.
3795 *
3796 * @param reducer a commutative associative combining function
3797 * @return the result of accumulating all keys using the given
3798 * reducer to combine values, or null if none
3799 */
3800 public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3801 return fjp.invoke(ForkJoinTasks.reduceKeys
3802 (ConcurrentHashMapV8.this, reducer));
3803 }
3804
3805 /**
3806 * Returns the result of accumulating the given transformation
3807 * of all keys using the given reducer to combine values, or
3808 * null if none.
3809 *
3810 * @param transformer a function returning the transformation
3811 * for an element, or null of there is no transformation (in
3812 * which case it is not combined).
3813 * @param reducer a commutative associative combining function
3814 * @return the result of accumulating the given transformation
3815 * of all keys
3816 */
3817 public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3818 BiFun<? super U, ? super U, ? extends U> reducer) {
3819 return fjp.invoke(ForkJoinTasks.reduceKeys
3820 (ConcurrentHashMapV8.this, transformer, reducer));
3821 }
3822
3823 /**
3824 * Returns the result of accumulating the given transformation
3825 * of all keys using the given reducer to combine values, and
3826 * the given basis as an identity value.
3827 *
3828 * @param transformer a function returning the transformation
3829 * for an element
3830 * @param basis the identity (initial default value) for the reduction
3831 * @param reducer a commutative associative combining function
3832 * @return the result of accumulating the given transformation
3833 * of all keys
3834 */
3835 public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3836 double basis,
3837 DoubleByDoubleToDouble reducer) {
3838 return fjp.invoke(ForkJoinTasks.reduceKeysToDouble
3839 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3840 }
3841
3842 /**
3843 * Returns the result of accumulating the given transformation
3844 * of all keys using the given reducer to combine values, and
3845 * the given basis as an identity value.
3846 *
3847 * @param transformer a function returning the transformation
3848 * for an element
3849 * @param basis the identity (initial default value) for the reduction
3850 * @param reducer a commutative associative combining function
3851 * @return the result of accumulating the given transformation
3852 * of all keys
3853 */
3854 public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3855 long basis,
3856 LongByLongToLong reducer) {
3857 return fjp.invoke(ForkJoinTasks.reduceKeysToLong
3858 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3859 }
3860
3861 /**
3862 * Returns the result of accumulating the given transformation
3863 * of all keys using the given reducer to combine values, and
3864 * the given basis as an identity value.
3865 *
3866 * @param transformer a function returning the transformation
3867 * for an element
3868 * @param basis the identity (initial default value) for the reduction
3869 * @param reducer a commutative associative combining function
3870 * @return the result of accumulating the given transformation
3871 * of all keys
3872 */
3873 public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3874 int basis,
3875 IntByIntToInt reducer) {
3876 return fjp.invoke(ForkJoinTasks.reduceKeysToInt
3877 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3878 }
3879
3880 /**
3881 * Performs the given action for each value.
3882 *
3883 * @param action the action
3884 */
3885 public void forEachValue(Action<V> action) {
3886 fjp.invoke(ForkJoinTasks.forEachValue
3887 (ConcurrentHashMapV8.this, action));
3888 }
3889
3890 /**
3891 * Performs the given action for each non-null transformation
3892 * of each value.
3893 *
3894 * @param transformer a function returning the transformation
3895 * for an element, or null of there is no transformation (in
3896 * which case the action is not applied).
3897 */
3898 public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3899 Action<U> action) {
3900 fjp.invoke(ForkJoinTasks.forEachValue
3901 (ConcurrentHashMapV8.this, transformer, action));
3902 }
3903
3904 /**
3905 * Returns a non-null result from applying the given search
3906 * function on each value, or null if none. Further element
3907 * processing is suppressed upon success. However, this method
3908 * does not return until other in-progress parallel
3909 * invocations of the search function also complete.
3910 *
3911 * @param searchFunction a function returning a non-null
3912 * result on success, else null
3913 * @return a non-null result from applying the given search
3914 * function on each value, or null if none
3915 *
3916 */
3917 public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
3918 return fjp.invoke(ForkJoinTasks.searchValues
3919 (ConcurrentHashMapV8.this, searchFunction));
3920 }
3921
3922 /**
3923 * Returns the result of accumulating all values using the
3924 * given reducer to combine values, or null if none.
3925 *
3926 * @param reducer a commutative associative combining function
3927 * @return the result of accumulating all values
3928 */
3929 public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
3930 return fjp.invoke(ForkJoinTasks.reduceValues
3931 (ConcurrentHashMapV8.this, reducer));
3932 }
3933
3934 /**
3935 * Returns the result of accumulating the given transformation
3936 * of all values using the given reducer to combine values, or
3937 * null if none.
3938 *
3939 * @param transformer a function returning the transformation
3940 * for an element, or null of there is no transformation (in
3941 * which case it is not combined).
3942 * @param reducer a commutative associative combining function
3943 * @return the result of accumulating the given transformation
3944 * of all values
3945 */
3946 public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
3947 BiFun<? super U, ? super U, ? extends U> reducer) {
3948 return fjp.invoke(ForkJoinTasks.reduceValues
3949 (ConcurrentHashMapV8.this, transformer, reducer));
3950 }
3951
3952 /**
3953 * Returns the result of accumulating the given transformation
3954 * of all values using the given reducer to combine values,
3955 * and the given basis as an identity value.
3956 *
3957 * @param transformer a function returning the transformation
3958 * for an element
3959 * @param basis the identity (initial default value) for the reduction
3960 * @param reducer a commutative associative combining function
3961 * @return the result of accumulating the given transformation
3962 * of all values
3963 */
3964 public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
3965 double basis,
3966 DoubleByDoubleToDouble reducer) {
3967 return fjp.invoke(ForkJoinTasks.reduceValuesToDouble
3968 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3969 }
3970
3971 /**
3972 * Returns the result of accumulating the given transformation
3973 * of all values using the given reducer to combine values,
3974 * and the given basis as an identity value.
3975 *
3976 * @param transformer a function returning the transformation
3977 * for an element
3978 * @param basis the identity (initial default value) for the reduction
3979 * @param reducer a commutative associative combining function
3980 * @return the result of accumulating the given transformation
3981 * of all values
3982 */
3983 public long reduceValuesToLong(ObjectToLong<? super V> transformer,
3984 long basis,
3985 LongByLongToLong reducer) {
3986 return fjp.invoke(ForkJoinTasks.reduceValuesToLong
3987 (ConcurrentHashMapV8.this, transformer, basis, reducer));
3988 }
3989
3990 /**
3991 * Returns the result of accumulating the given transformation
3992 * of all values using the given reducer to combine values,
3993 * and the given basis as an identity value.
3994 *
3995 * @param transformer a function returning the transformation
3996 * for an element
3997 * @param basis the identity (initial default value) for the reduction
3998 * @param reducer a commutative associative combining function
3999 * @return the result of accumulating the given transformation
4000 * of all values
4001 */
4002 public int reduceValuesToInt(ObjectToInt<? super V> transformer,
4003 int basis,
4004 IntByIntToInt reducer) {
4005 return fjp.invoke(ForkJoinTasks.reduceValuesToInt
4006 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4007 }
4008
4009 /**
4010 * Performs the given action for each entry.
4011 *
4012 * @param action the action
4013 */
4014 public void forEachEntry(Action<Map.Entry<K,V>> action) {
4015 fjp.invoke(ForkJoinTasks.forEachEntry
4016 (ConcurrentHashMapV8.this, action));
4017 }
4018
4019 /**
4020 * Performs the given action for each non-null transformation
4021 * of each entry.
4022 *
4023 * @param transformer a function returning the transformation
4024 * for an element, or null of there is no transformation (in
4025 * which case the action is not applied).
4026 * @param action the action
4027 */
4028 public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
4029 Action<U> action) {
4030 fjp.invoke(ForkJoinTasks.forEachEntry
4031 (ConcurrentHashMapV8.this, transformer, action));
4032 }
4033
4034 /**
4035 * Returns a non-null result from applying the given search
4036 * function on each entry, or null if none. Further element
4037 * processing is suppressed upon success. However, this method
4038 * does not return until other in-progress parallel
4039 * invocations of the search function also complete.
4040 *
4041 * @param searchFunction a function returning a non-null
4042 * result on success, else null
4043 * @return a non-null result from applying the given search
4044 * function on each entry, or null if none
4045 */
4046 public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4047 return fjp.invoke(ForkJoinTasks.searchEntries
4048 (ConcurrentHashMapV8.this, searchFunction));
4049 }
4050
4051 /**
4052 * Returns the result of accumulating all entries using the
4053 * given reducer to combine values, or null if none.
4054 *
4055 * @param reducer a commutative associative combining function
4056 * @return the result of accumulating all entries
4057 */
4058 public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4059 return fjp.invoke(ForkJoinTasks.reduceEntries
4060 (ConcurrentHashMapV8.this, reducer));
4061 }
4062
4063 /**
4064 * Returns the result of accumulating the given transformation
4065 * of all entries using the given reducer to combine values,
4066 * or null if none.
4067 *
4068 * @param transformer a function returning the transformation
4069 * for an element, or null of there is no transformation (in
4070 * which case it is not combined).
4071 * @param reducer a commutative associative combining function
4072 * @return the result of accumulating the given transformation
4073 * of all entries
4074 */
4075 public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
4076 BiFun<? super U, ? super U, ? extends U> reducer) {
4077 return fjp.invoke(ForkJoinTasks.reduceEntries
4078 (ConcurrentHashMapV8.this, transformer, reducer));
4079 }
4080
4081 /**
4082 * Returns the result of accumulating the given transformation
4083 * of all entries using the given reducer to combine values,
4084 * and the given basis as an identity value.
4085 *
4086 * @param transformer a function returning the transformation
4087 * for an element
4088 * @param basis the identity (initial default value) for the reduction
4089 * @param reducer a commutative associative combining function
4090 * @return the result of accumulating the given transformation
4091 * of all entries
4092 */
4093 public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4094 double basis,
4095 DoubleByDoubleToDouble reducer) {
4096 return fjp.invoke(ForkJoinTasks.reduceEntriesToDouble
4097 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4098 }
4099
4100 /**
4101 * Returns the result of accumulating the given transformation
4102 * of all entries using the given reducer to combine values,
4103 * and the given basis as an identity value.
4104 *
4105 * @param transformer a function returning the transformation
4106 * for an element
4107 * @param basis the identity (initial default value) for the reduction
4108 * @param reducer a commutative associative combining function
4109 * @return the result of accumulating the given transformation
4110 * of all entries
4111 */
4112 public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4113 long basis,
4114 LongByLongToLong reducer) {
4115 return fjp.invoke(ForkJoinTasks.reduceEntriesToLong
4116 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4117 }
4118
4119 /**
4120 * Returns the result of accumulating the given transformation
4121 * of all entries using the given reducer to combine values,
4122 * and the given basis as an identity value.
4123 *
4124 * @param transformer a function returning the transformation
4125 * for an element
4126 * @param basis the identity (initial default value) for the reduction
4127 * @param reducer a commutative associative combining function
4128 * @return the result of accumulating the given transformation
4129 * of all entries
4130 */
4131 public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4132 int basis,
4133 IntByIntToInt reducer) {
4134 return fjp.invoke(ForkJoinTasks.reduceEntriesToInt
4135 (ConcurrentHashMapV8.this, transformer, basis, reducer));
4136 }
4137 }
4138
4139 // ---------------------------------------------------------------------
4140
4141 /**
4142 * Predefined tasks for performing bulk parallel operations on
4143 * ConcurrentHashMaps. These tasks follow the forms and rules used
4144 * in class {@link Parallel}. Each method has the same name, but
4145 * returns a task rather than invoking it. These methods may be
4146 * useful in custom applications such as submitting a task without
4147 * waiting for completion, or combining with other tasks.
4148 */
4149 public static class ForkJoinTasks {
4150 private ForkJoinTasks() {}
4151
4152 /**
4153 * Returns a task that when invoked, performs the given
4154 * action for each (key, value)
4155 *
4156 * @param map the map
4157 * @param action the action
4158 * @return the task
4159 */
4160 public static <K,V> ForkJoinTask<Void> forEach
4161 (ConcurrentHashMapV8<K,V> map,
4162 BiAction<K,V> action) {
4163 if (action == null) throw new NullPointerException();
4164 return new ForEachMappingTask<K,V>(map, action);
4165 }
4166
4167 /**
4168 * Returns a task that when invoked, performs the given
4169 * action for each non-null transformation of each (key, value)
4170 *
4171 * @param map the map
4172 * @param transformer a function returning the transformation
4173 * for an element, or null of there is no transformation (in
4174 * which case the action is not applied).
4175 * @param action the action
4176 * @return the task
4177 */
4178 public static <K,V,U> ForkJoinTask<Void> forEach
4179 (ConcurrentHashMapV8<K,V> map,
4180 BiFun<? super K, ? super V, ? extends U> transformer,
4181 Action<U> action) {
4182 if (transformer == null || action == null)
4183 throw new NullPointerException();
4184 return new ForEachTransformedMappingTask<K,V,U>
4185 (map, transformer, action);
4186 }
4187
4188 /**
4189 * Returns a task that when invoked, returns a non-null
4190 * result from applying the given search function on each
4191 * (key, value), or null if none. Further element processing
4192 * is suppressed upon success. However, this method does not
4193 * return until other in-progress parallel invocations of the
4194 * search function also complete.
4195 *
4196 * @param map the map
4197 * @param searchFunction a function returning a non-null
4198 * result on success, else null
4199 * @return the task
4200 */
4201 public static <K,V,U> ForkJoinTask<U> search
4202 (ConcurrentHashMapV8<K,V> map,
4203 BiFun<? super K, ? super V, ? extends U> searchFunction) {
4204 if (searchFunction == null) throw new NullPointerException();
4205 return new SearchMappingsTask<K,V,U>
4206 (map, searchFunction,
4207 new AtomicReference<U>());
4208 }
4209
4210 /**
4211 * Returns a task that when invoked, returns the result of
4212 * accumulating the given transformation of all (key, value) pairs
4213 * using the given reducer to combine values, or null if none.
4214 *
4215 * @param map the map
4216 * @param transformer a function returning the transformation
4217 * for an element, or null of there is no transformation (in
4218 * which case it is not combined).
4219 * @param reducer a commutative associative combining function
4220 * @return the task
4221 */
4222 public static <K,V,U> ForkJoinTask<U> reduce
4223 (ConcurrentHashMapV8<K,V> map,
4224 BiFun<? super K, ? super V, ? extends U> transformer,
4225 BiFun<? super U, ? super U, ? extends U> reducer) {
4226 if (transformer == null || reducer == null)
4227 throw new NullPointerException();
4228 return new MapReduceMappingsTask<K,V,U>
4229 (map, transformer, reducer);
4230 }
4231
4232 /**
4233 * Returns a task that when invoked, returns the result of
4234 * accumulating the given transformation of all (key, value) pairs
4235 * using the given reducer to combine values, and the given
4236 * basis as an identity value.
4237 *
4238 * @param map the map
4239 * @param transformer a function returning the transformation
4240 * for an element
4241 * @param basis the identity (initial default value) for the reduction
4242 * @param reducer a commutative associative combining function
4243 * @return the task
4244 */
4245 public static <K,V> ForkJoinTask<Double> reduceToDouble
4246 (ConcurrentHashMapV8<K,V> map,
4247 ObjectByObjectToDouble<? super K, ? super V> transformer,
4248 double basis,
4249 DoubleByDoubleToDouble reducer) {
4250 if (transformer == null || reducer == null)
4251 throw new NullPointerException();
4252 return new MapReduceMappingsToDoubleTask<K,V>
4253 (map, transformer, basis, reducer);
4254 }
4255
4256 /**
4257 * Returns a task that when invoked, returns the result of
4258 * accumulating the given transformation of all (key, value) pairs
4259 * using the given reducer to combine values, and the given
4260 * basis as an identity value.
4261 *
4262 * @param map the map
4263 * @param transformer a function returning the transformation
4264 * for an element
4265 * @param basis the identity (initial default value) for the reduction
4266 * @param reducer a commutative associative combining function
4267 * @return the task
4268 */
4269 public static <K,V> ForkJoinTask<Long> reduceToLong
4270 (ConcurrentHashMapV8<K,V> map,
4271 ObjectByObjectToLong<? super K, ? super V> transformer,
4272 long basis,
4273 LongByLongToLong reducer) {
4274 if (transformer == null || reducer == null)
4275 throw new NullPointerException();
4276 return new MapReduceMappingsToLongTask<K,V>
4277 (map, transformer, basis, reducer);
4278 }
4279
4280 /**
4281 * Returns a task that when invoked, returns the result of
4282 * accumulating the given transformation of all (key, value) pairs
4283 * using the given reducer to combine values, and the given
4284 * basis as an identity value.
4285 *
4286 * @param transformer a function returning the transformation
4287 * for an element
4288 * @param basis the identity (initial default value) for the reduction
4289 * @param reducer a commutative associative combining function
4290 * @return the task
4291 */
4292 public static <K,V> ForkJoinTask<Integer> reduceToInt
4293 (ConcurrentHashMapV8<K,V> map,
4294 ObjectByObjectToInt<? super K, ? super V> transformer,
4295 int basis,
4296 IntByIntToInt reducer) {
4297 if (transformer == null || reducer == null)
4298 throw new NullPointerException();
4299 return new MapReduceMappingsToIntTask<K,V>
4300 (map, transformer, basis, reducer);
4301 }
4302
4303 /**
4304 * Returns a task that when invoked, performs the given action
4305 * for each key.
4306 *
4307 * @param map the map
4308 * @param action the action
4309 * @return the task
4310 */
4311 public static <K,V> ForkJoinTask<Void> forEachKey
4312 (ConcurrentHashMapV8<K,V> map,
4313 Action<K> action) {
4314 if (action == null) throw new NullPointerException();
4315 return new ForEachKeyTask<K,V>(map, action);
4316 }
4317
4318 /**
4319 * Returns a task that when invoked, performs the given action
4320 * for each non-null transformation of each key.
4321 *
4322 * @param map the map
4323 * @param transformer a function returning the transformation
4324 * for an element, or null of there is no transformation (in
4325 * which case the action is not applied).
4326 * @param action the action
4327 * @return the task
4328 */
4329 public static <K,V,U> ForkJoinTask<Void> forEachKey
4330 (ConcurrentHashMapV8<K,V> map,
4331 Fun<? super K, ? extends U> transformer,
4332 Action<U> action) {
4333 if (transformer == null || action == null)
4334 throw new NullPointerException();
4335 return new ForEachTransformedKeyTask<K,V,U>
4336 (map, transformer, action);
4337 }
4338
4339 /**
4340 * Returns a task that when invoked, returns a non-null result
4341 * from applying the given search function on each key, or
4342 * null if none. Further element processing is suppressed
4343 * upon success. However, this method does not return until
4344 * other in-progress parallel invocations of the search
4345 * function also complete.
4346 *
4347 * @param map the map
4348 * @param searchFunction a function returning a non-null
4349 * result on success, else null
4350 * @return the task
4351 */
4352 public static <K,V,U> ForkJoinTask<U> searchKeys
4353 (ConcurrentHashMapV8<K,V> map,
4354 Fun<? super K, ? extends U> searchFunction) {
4355 if (searchFunction == null) throw new NullPointerException();
4356 return new SearchKeysTask<K,V,U>
4357 (map, searchFunction,
4358 new AtomicReference<U>());
4359 }
4360
4361 /**
4362 * Returns a task that when invoked, returns the result of
4363 * accumulating all keys using the given reducer to combine
4364 * values, or null if none.
4365 *
4366 * @param map the map
4367 * @param reducer a commutative associative combining function
4368 * @return the task
4369 */
4370 public static <K,V> ForkJoinTask<K> reduceKeys
4371 (ConcurrentHashMapV8<K,V> map,
4372 BiFun<? super K, ? super K, ? extends K> reducer) {
4373 if (reducer == null) throw new NullPointerException();
4374 return new ReduceKeysTask<K,V>
4375 (map, reducer);
4376 }
4377
4378 /**
4379 * Returns a task that when invoked, returns the result of
4380 * accumulating the given transformation of all keys using the given
4381 * reducer to combine values, or null if none.
4382 *
4383 * @param map the map
4384 * @param transformer a function returning the transformation
4385 * for an element, or null of there is no transformation (in
4386 * which case it is not combined).
4387 * @param reducer a commutative associative combining function
4388 * @return the task
4389 */
4390 public static <K,V,U> ForkJoinTask<U> reduceKeys
4391 (ConcurrentHashMapV8<K,V> map,
4392 Fun<? super K, ? extends U> transformer,
4393 BiFun<? super U, ? super U, ? extends U> reducer) {
4394 if (transformer == null || reducer == null)
4395 throw new NullPointerException();
4396 return new MapReduceKeysTask<K,V,U>
4397 (map, transformer, reducer);
4398 }
4399
4400 /**
4401 * Returns a task that when invoked, returns the result of
4402 * accumulating the given transformation of all keys using the given
4403 * reducer to combine values, and the given basis as an
4404 * identity value.
4405 *
4406 * @param map the map
4407 * @param transformer a function returning the transformation
4408 * for an element
4409 * @param basis the identity (initial default value) for the reduction
4410 * @param reducer a commutative associative combining function
4411 * @return the task
4412 */
4413 public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4414 (ConcurrentHashMapV8<K,V> map,
4415 ObjectToDouble<? super K> transformer,
4416 double basis,
4417 DoubleByDoubleToDouble reducer) {
4418 if (transformer == null || reducer == null)
4419 throw new NullPointerException();
4420 return new MapReduceKeysToDoubleTask<K,V>
4421 (map, transformer, basis, reducer);
4422 }
4423
4424 /**
4425 * Returns a task that when invoked, returns the result of
4426 * accumulating the given transformation of all keys using the given
4427 * reducer to combine values, and the given basis as an
4428 * identity value.
4429 *
4430 * @param map the map
4431 * @param transformer a function returning the transformation
4432 * for an element
4433 * @param basis the identity (initial default value) for the reduction
4434 * @param reducer a commutative associative combining function
4435 * @return the task
4436 */
4437 public static <K,V> ForkJoinTask<Long> reduceKeysToLong
4438 (ConcurrentHashMapV8<K,V> map,
4439 ObjectToLong<? super K> transformer,
4440 long basis,
4441 LongByLongToLong reducer) {
4442 if (transformer == null || reducer == null)
4443 throw new NullPointerException();
4444 return new MapReduceKeysToLongTask<K,V>
4445 (map, transformer, basis, reducer);
4446 }
4447
4448 /**
4449 * Returns a task that when invoked, returns the result of
4450 * accumulating the given transformation of all keys using the given
4451 * reducer to combine values, and the given basis as an
4452 * identity value.
4453 *
4454 * @param map the map
4455 * @param transformer a function returning the transformation
4456 * for an element
4457 * @param basis the identity (initial default value) for the reduction
4458 * @param reducer a commutative associative combining function
4459 * @return the task
4460 */
4461 public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
4462 (ConcurrentHashMapV8<K,V> map,
4463 ObjectToInt<? super K> transformer,
4464 int basis,
4465 IntByIntToInt reducer) {
4466 if (transformer == null || reducer == null)
4467 throw new NullPointerException();
4468 return new MapReduceKeysToIntTask<K,V>
4469 (map, transformer, basis, reducer);
4470 }
4471
4472 /**
4473 * Returns a task that when invoked, performs the given action
4474 * for each value.
4475 *
4476 * @param map the map
4477 * @param action the action
4478 */
4479 public static <K,V> ForkJoinTask<Void> forEachValue
4480 (ConcurrentHashMapV8<K,V> map,
4481 Action<V> action) {
4482 if (action == null) throw new NullPointerException();
4483 return new ForEachValueTask<K,V>(map, action);
4484 }
4485
4486 /**
4487 * Returns a task that when invoked, performs the given action
4488 * for each non-null transformation of each value.
4489 *
4490 * @param map the map
4491 * @param transformer a function returning the transformation
4492 * for an element, or null of there is no transformation (in
4493 * which case the action is not applied).
4494 * @param action the action
4495 */
4496 public static <K,V,U> ForkJoinTask<Void> forEachValue
4497 (ConcurrentHashMapV8<K,V> map,
4498 Fun<? super V, ? extends U> transformer,
4499 Action<U> action) {
4500 if (transformer == null || action == null)
4501 throw new NullPointerException();
4502 return new ForEachTransformedValueTask<K,V,U>
4503 (map, transformer, action);
4504 }
4505
4506 /**
4507 * Returns a task that when invoked, returns a non-null result
4508 * from applying the given search function on each value, or
4509 * null if none. Further element processing is suppressed
4510 * upon success. However, this method does not return until
4511 * other in-progress parallel invocations of the search
4512 * function also complete.
4513 *
4514 * @param map the map
4515 * @param searchFunction a function returning a non-null
4516 * result on success, else null
4517 * @return the task
4518 *
4519 */
4520 public static <K,V,U> ForkJoinTask<U> searchValues
4521 (ConcurrentHashMapV8<K,V> map,
4522 Fun<? super V, ? extends U> searchFunction) {
4523 if (searchFunction == null) throw new NullPointerException();
4524 return new SearchValuesTask<K,V,U>
4525 (map, searchFunction,
4526 new AtomicReference<U>());
4527 }
4528
4529 /**
4530 * Returns a task that when invoked, returns the result of
4531 * accumulating all values using the given reducer to combine
4532 * values, or null if none.
4533 *
4534 * @param map the map
4535 * @param reducer a commutative associative combining function
4536 * @return the task
4537 */
4538 public static <K,V> ForkJoinTask<V> reduceValues
4539 (ConcurrentHashMapV8<K,V> map,
4540 BiFun<? super V, ? super V, ? extends V> reducer) {
4541 if (reducer == null) throw new NullPointerException();
4542 return new ReduceValuesTask<K,V>
4543 (map, reducer);
4544 }
4545
4546 /**
4547 * Returns a task that when invoked, returns the result of
4548 * accumulating the given transformation of all values using the
4549 * given reducer to combine values, or null if none.
4550 *
4551 * @param map the map
4552 * @param transformer a function returning the transformation
4553 * for an element, or null of there is no transformation (in
4554 * which case it is not combined).
4555 * @param reducer a commutative associative combining function
4556 * @return the task
4557 */
4558 public static <K,V,U> ForkJoinTask<U> reduceValues
4559 (ConcurrentHashMapV8<K,V> map,
4560 Fun<? super V, ? extends U> transformer,
4561 BiFun<? super U, ? super U, ? extends U> reducer) {
4562 if (transformer == null || reducer == null)
4563 throw new NullPointerException();
4564 return new MapReduceValuesTask<K,V,U>
4565 (map, transformer, reducer);
4566 }
4567
4568 /**
4569 * Returns a task that when invoked, returns the result of
4570 * accumulating the given transformation of all values using the
4571 * given reducer to combine values, and the given basis as an
4572 * identity value.
4573 *
4574 * @param map the map
4575 * @param transformer a function returning the transformation
4576 * for an element
4577 * @param basis the identity (initial default value) for the reduction
4578 * @param reducer a commutative associative combining function
4579 * @return the task
4580 */
4581 public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
4582 (ConcurrentHashMapV8<K,V> map,
4583 ObjectToDouble<? super V> transformer,
4584 double basis,
4585 DoubleByDoubleToDouble reducer) {
4586 if (transformer == null || reducer == null)
4587 throw new NullPointerException();
4588 return new MapReduceValuesToDoubleTask<K,V>
4589 (map, transformer, basis, reducer);
4590 }
4591
4592 /**
4593 * Returns a task that when invoked, returns the result of
4594 * accumulating the given transformation of all values using the
4595 * given reducer to combine values, and the given basis as an
4596 * identity value.
4597 *
4598 * @param map the map
4599 * @param transformer a function returning the transformation
4600 * for an element
4601 * @param basis the identity (initial default value) for the reduction
4602 * @param reducer a commutative associative combining function
4603 * @return the task
4604 */
4605 public static <K,V> ForkJoinTask<Long> reduceValuesToLong
4606 (ConcurrentHashMapV8<K,V> map,
4607 ObjectToLong<? super V> transformer,
4608 long basis,
4609 LongByLongToLong reducer) {
4610 if (transformer == null || reducer == null)
4611 throw new NullPointerException();
4612 return new MapReduceValuesToLongTask<K,V>
4613 (map, transformer, basis, reducer);
4614 }
4615
4616 /**
4617 * Returns a task that when invoked, returns the result of
4618 * accumulating the given transformation of all values using the
4619 * given reducer to combine values, and the given basis as an
4620 * identity value.
4621 *
4622 * @param map the map
4623 * @param transformer a function returning the transformation
4624 * for an element
4625 * @param basis the identity (initial default value) for the reduction
4626 * @param reducer a commutative associative combining function
4627 * @return the task
4628 */
4629 public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
4630 (ConcurrentHashMapV8<K,V> map,
4631 ObjectToInt<? super V> transformer,
4632 int basis,
4633 IntByIntToInt reducer) {
4634 if (transformer == null || reducer == null)
4635 throw new NullPointerException();
4636 return new MapReduceValuesToIntTask<K,V>
4637 (map, transformer, basis, reducer);
4638 }
4639
4640 /**
4641 * Returns a task that when invoked, perform the given action
4642 * for each entry.
4643 *
4644 * @param map the map
4645 * @param action the action
4646 */
4647 public static <K,V> ForkJoinTask<Void> forEachEntry
4648 (ConcurrentHashMapV8<K,V> map,
4649 Action<Map.Entry<K,V>> action) {
4650 if (action == null) throw new NullPointerException();
4651 return new ForEachEntryTask<K,V>(map, action);
4652 }
4653
4654 /**
4655 * Returns a task that when invoked, perform the given action
4656 * for each non-null transformation of each entry.
4657 *
4658 * @param map the map
4659 * @param transformer a function returning the transformation
4660 * for an element, or null of there is no transformation (in
4661 * which case the action is not applied).
4662 * @param action the action
4663 */
4664 public static <K,V,U> ForkJoinTask<Void> forEachEntry
4665 (ConcurrentHashMapV8<K,V> map,
4666 Fun<Map.Entry<K,V>, ? extends U> transformer,
4667 Action<U> action) {
4668 if (transformer == null || action == null)
4669 throw new NullPointerException();
4670 return new ForEachTransformedEntryTask<K,V,U>
4671 (map, transformer, action);
4672 }
4673
4674 /**
4675 * Returns a task that when invoked, returns a non-null result
4676 * from applying the given search function on each entry, or
4677 * null if none. Further element processing is suppressed
4678 * upon success. However, this method does not return until
4679 * other in-progress parallel invocations of the search
4680 * function also complete.
4681 *
4682 * @param map the map
4683 * @param searchFunction a function returning a non-null
4684 * result on success, else null
4685 * @return the task
4686 *
4687 */
4688 public static <K,V,U> ForkJoinTask<U> searchEntries
4689 (ConcurrentHashMapV8<K,V> map,
4690 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4691 if (searchFunction == null) throw new NullPointerException();
4692 return new SearchEntriesTask<K,V,U>
4693 (map, searchFunction,
4694 new AtomicReference<U>());
4695 }
4696
4697 /**
4698 * Returns a task that when invoked, returns the result of
4699 * accumulating all entries using the given reducer to combine
4700 * values, or null if none.
4701 *
4702 * @param map the map
4703 * @param reducer a commutative associative combining function
4704 * @return the task
4705 */
4706 public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
4707 (ConcurrentHashMapV8<K,V> map,
4708 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4709 if (reducer == null) throw new NullPointerException();
4710 return new ReduceEntriesTask<K,V>
4711 (map, reducer);
4712 }
4713
4714 /**
4715 * Returns a task that when invoked, returns the result of
4716 * accumulating the given transformation of all entries using the
4717 * given reducer to combine values, or null if none.
4718 *
4719 * @param map the map
4720 * @param transformer a function returning the transformation
4721 * for an element, or null of there is no transformation (in
4722 * which case it is not combined).
4723 * @param reducer a commutative associative combining function
4724 * @return the task
4725 */
4726 public static <K,V,U> ForkJoinTask<U> reduceEntries
4727 (ConcurrentHashMapV8<K,V> map,
4728 Fun<Map.Entry<K,V>, ? extends U> transformer,
4729 BiFun<? super U, ? super U, ? extends U> reducer) {
4730 if (transformer == null || reducer == null)
4731 throw new NullPointerException();
4732 return new MapReduceEntriesTask<K,V,U>
4733 (map, transformer, reducer);
4734 }
4735
4736 /**
4737 * Returns a task that when invoked, returns the result of
4738 * accumulating the given transformation of all entries using the
4739 * given reducer to combine values, and the given basis as an
4740 * identity value.
4741 *
4742 * @param map the map
4743 * @param transformer a function returning the transformation
4744 * for an element
4745 * @param basis the identity (initial default value) for the reduction
4746 * @param reducer a commutative associative combining function
4747 * @return the task
4748 */
4749 public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
4750 (ConcurrentHashMapV8<K,V> map,
4751 ObjectToDouble<Map.Entry<K,V>> transformer,
4752 double basis,
4753 DoubleByDoubleToDouble reducer) {
4754 if (transformer == null || reducer == null)
4755 throw new NullPointerException();
4756 return new MapReduceEntriesToDoubleTask<K,V>
4757 (map, transformer, basis, reducer);
4758 }
4759
4760 /**
4761 * Returns a task that when invoked, returns the result of
4762 * accumulating the given transformation of all entries using the
4763 * given reducer to combine values, and the given basis as an
4764 * identity value.
4765 *
4766 * @param map the map
4767 * @param transformer a function returning the transformation
4768 * for an element
4769 * @param basis the identity (initial default value) for the reduction
4770 * @param reducer a commutative associative combining function
4771 * @return the task
4772 */
4773 public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
4774 (ConcurrentHashMapV8<K,V> map,
4775 ObjectToLong<Map.Entry<K,V>> transformer,
4776 long basis,
4777 LongByLongToLong reducer) {
4778 if (transformer == null || reducer == null)
4779 throw new NullPointerException();
4780 return new MapReduceEntriesToLongTask<K,V>
4781 (map, transformer, basis, reducer);
4782 }
4783
4784 /**
4785 * Returns a task that when invoked, returns the result of
4786 * accumulating the given transformation of all entries using the
4787 * given reducer to combine values, and the given basis as an
4788 * identity value.
4789 *
4790 * @param map the map
4791 * @param transformer a function returning the transformation
4792 * for an element
4793 * @param basis the identity (initial default value) for the reduction
4794 * @param reducer a commutative associative combining function
4795 * @return the task
4796 */
4797 public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
4798 (ConcurrentHashMapV8<K,V> map,
4799 ObjectToInt<Map.Entry<K,V>> transformer,
4800 int basis,
4801 IntByIntToInt reducer) {
4802 if (transformer == null || reducer == null)
4803 throw new NullPointerException();
4804 return new MapReduceEntriesToIntTask<K,V>
4805 (map, transformer, basis, reducer);
4806 }
4807 }
4808
4809 // -------------------------------------------------------
4810
4811 /**
4812 * Base for FJ tasks for bulk operations. This adds a variant of
4813 * CountedCompleters and some split and merge bookkeeping to
4814 * iterator functionality. The forEach and reduce methods are
4815 * similar to those illustrated in CountedCompleter documentation,
4816 * except that bottom-up reduction completions perform them within
4817 * their compute methods. The search methods are like forEach
4818 * except they continually poll for success and exit early. Also,
4819 * exceptions are handled in a simpler manner, by just trying to
4820 * complete root task exceptionally.
4821 */
4822 static abstract class BulkTask<K,V,R> extends Traverser<K,V,R> {
4823 final BulkTask<K,V,?> parent; // completion target
4824 int batch; // split control
4825 int pending; // completion control
4826
4827 /** Constructor for root tasks */
4828 BulkTask(ConcurrentHashMapV8<K,V> map) {
4829 super(map);
4830 this.parent = null;
4831 this.batch = -1; // force call to batch() on execution
4832 }
4833
4834 /** Constructor for subtasks */
4835 BulkTask(BulkTask<K,V,?> parent, int batch, boolean split) {
4836 super(parent, split);
4837 this.parent = parent;
4838 this.batch = batch;
4839 }
4840
4841 // FJ methods
4842
4843 /**
4844 * Propagates completion. Note that all reduce actions
4845 * bypass this method to combine while completing.
4846 */
4847 final void tryComplete() {
4848 BulkTask<K,V,?> a = this, s = a;
4849 for (int c;;) {
4850 if ((c = a.pending) == 0) {
4851 if ((a = (s = a).parent) == null) {
4852 s.quietlyComplete();
4853 break;
4854 }
4855 }
4856 else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
4857 break;
4858 }
4859 }
4860
4861 /**
4862 * Forces root task to throw exception unless already complete.
4863 */
4864 final void tryAbortComputation(Throwable ex) {
4865 for (BulkTask<K,V,?> a = this;;) {
4866 BulkTask<K,V,?> p = a.parent;
4867 if (p == null) {
4868 a.completeExceptionally(ex);
4869 break;
4870 }
4871 a = p;
4872 }
4873 }
4874
4875 public final boolean exec() {
4876 try {
4877 compute();
4878 }
4879 catch (Throwable ex) {
4880 tryAbortComputation(ex);
4881 }
4882 return false;
4883 }
4884
4885 public abstract void compute();
4886
4887 // utilities
4888
4889 /** CompareAndSet pending count */
4890 final boolean casPending(int cmp, int val) {
4891 return U.compareAndSwapInt(this, PENDING, cmp, val);
4892 }
4893
4894 /**
4895 * Returns approx exp2 of the number of times (minus one) to
4896 * split task by two before executing leaf action. This value
4897 * is faster to compute and more convenient to use as a guide
4898 * to splitting than is the depth, since it is used while
4899 * dividing by two anyway.
4900 */
4901 final int batch() {
4902 int b = batch;
4903 if (b < 0) {
4904 long n = map.counter.sum();
4905 int sp = getPool().getParallelism() << 3; // slack of 8
4906 b = batch = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
4907 }
4908 return b;
4909 }
4910
4911 /**
4912 * Error message for hoisted null checks of functions
4913 */
4914 static final String NullFunctionMessage =
4915 "Unexpected null function";
4916
4917 /**
4918 * Returns exportable snapshot entry.
4919 */
4920 static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
4921 return new AbstractMap.SimpleEntry(k, v);
4922 }
4923
4924 // Unsafe mechanics
4925 private static final sun.misc.Unsafe U;
4926 private static final long PENDING;
4927 static {
4928 try {
4929 U = sun.misc.Unsafe.getUnsafe();
4930 PENDING = U.objectFieldOffset
4931 (BulkTask.class.getDeclaredField("pending"));
4932 } catch (Exception e) {
4933 throw new Error(e);
4934 }
4935 }
4936 }
4937
4938 /*
4939 * Task classes. Coded in a regular but ugly format/style to
4940 * simplify checks that each variant differs in the right way from
4941 * others.
4942 */
4943
4944 static final class ForEachKeyTask<K,V>
4945 extends BulkTask<K,V,Void> {
4946 final Action<K> action;
4947 ForEachKeyTask
4948 (ConcurrentHashMapV8<K,V> m,
4949 Action<K> action) {
4950 super(m);
4951 this.action = action;
4952 }
4953 ForEachKeyTask
4954 (BulkTask<K,V,?> p, int b, boolean split,
4955 Action<K> action) {
4956 super(p, b, split);
4957 this.action = action;
4958 }
4959 public final void compute() {
4960 final Action<K> action = this.action;
4961 if (action == null)
4962 throw new Error(NullFunctionMessage);
4963 int b = batch(), c;
4964 while (b > 1 && baseIndex != baseLimit) {
4965 do {} while (!casPending(c = pending, c+1));
4966 new ForEachKeyTask<K,V>(this, b >>>= 1, true, action).fork();
4967 }
4968 while (advance() != null)
4969 action.apply((K)nextKey);
4970 tryComplete();
4971 }
4972 }
4973
4974 static final class ForEachValueTask<K,V>
4975 extends BulkTask<K,V,Void> {
4976 final Action<V> action;
4977 ForEachValueTask
4978 (ConcurrentHashMapV8<K,V> m,
4979 Action<V> action) {
4980 super(m);
4981 this.action = action;
4982 }
4983 ForEachValueTask
4984 (BulkTask<K,V,?> p, int b, boolean split,
4985 Action<V> action) {
4986 super(p, b, split);
4987 this.action = action;
4988 }
4989 public final void compute() {
4990 final Action<V> action = this.action;
4991 if (action == null)
4992 throw new Error(NullFunctionMessage);
4993 int b = batch(), c;
4994 while (b > 1 && baseIndex != baseLimit) {
4995 do {} while (!casPending(c = pending, c+1));
4996 new ForEachValueTask<K,V>(this, b >>>= 1, true, action).fork();
4997 }
4998 Object v;
4999 while ((v = advance()) != null)
5000 action.apply((V)v);
5001 tryComplete();
5002 }
5003 }
5004
5005 static final class ForEachEntryTask<K,V>
5006 extends BulkTask<K,V,Void> {
5007 final Action<Entry<K,V>> action;
5008 ForEachEntryTask
5009 (ConcurrentHashMapV8<K,V> m,
5010 Action<Entry<K,V>> action) {
5011 super(m);
5012 this.action = action;
5013 }
5014 ForEachEntryTask
5015 (BulkTask<K,V,?> p, int b, boolean split,
5016 Action<Entry<K,V>> action) {
5017 super(p, b, split);
5018 this.action = action;
5019 }
5020 public final void compute() {
5021 final Action<Entry<K,V>> action = this.action;
5022 if (action == null)
5023 throw new Error(NullFunctionMessage);
5024 int b = batch(), c;
5025 while (b > 1 && baseIndex != baseLimit) {
5026 do {} while (!casPending(c = pending, c+1));
5027 new ForEachEntryTask<K,V>(this, b >>>= 1, true, action).fork();
5028 }
5029 Object v;
5030 while ((v = advance()) != null)
5031 action.apply(entryFor((K)nextKey, (V)v));
5032 tryComplete();
5033 }
5034 }
5035
5036 static final class ForEachMappingTask<K,V>
5037 extends BulkTask<K,V,Void> {
5038 final BiAction<K,V> action;
5039 ForEachMappingTask
5040 (ConcurrentHashMapV8<K,V> m,
5041 BiAction<K,V> action) {
5042 super(m);
5043 this.action = action;
5044 }
5045 ForEachMappingTask
5046 (BulkTask<K,V,?> p, int b, boolean split,
5047 BiAction<K,V> action) {
5048 super(p, b, split);
5049 this.action = action;
5050 }
5051
5052 public final void compute() {
5053 final BiAction<K,V> action = this.action;
5054 if (action == null)
5055 throw new Error(NullFunctionMessage);
5056 int b = batch(), c;
5057 while (b > 1 && baseIndex != baseLimit) {
5058 do {} while (!casPending(c = pending, c+1));
5059 new ForEachMappingTask<K,V>(this, b >>>= 1, true,
5060 action).fork();
5061 }
5062 Object v;
5063 while ((v = advance()) != null)
5064 action.apply((K)nextKey, (V)v);
5065 tryComplete();
5066 }
5067 }
5068
5069 static final class ForEachTransformedKeyTask<K,V,U>
5070 extends BulkTask<K,V,Void> {
5071 final Fun<? super K, ? extends U> transformer;
5072 final Action<U> action;
5073 ForEachTransformedKeyTask
5074 (ConcurrentHashMapV8<K,V> m,
5075 Fun<? super K, ? extends U> transformer,
5076 Action<U> action) {
5077 super(m);
5078 this.transformer = transformer;
5079 this.action = action;
5080
5081 }
5082 ForEachTransformedKeyTask
5083 (BulkTask<K,V,?> p, int b, boolean split,
5084 Fun<? super K, ? extends U> transformer,
5085 Action<U> action) {
5086 super(p, b, split);
5087 this.transformer = transformer;
5088 this.action = action;
5089 }
5090 public final void compute() {
5091 final Fun<? super K, ? extends U> transformer =
5092 this.transformer;
5093 final Action<U> action = this.action;
5094 if (transformer == null || action == null)
5095 throw new Error(NullFunctionMessage);
5096 int b = batch(), c;
5097 while (b > 1 && baseIndex != baseLimit) {
5098 do {} while (!casPending(c = pending, c+1));
5099 new ForEachTransformedKeyTask<K,V,U>
5100 (this, b >>>= 1, true, transformer, action).fork();
5101 }
5102 U u;
5103 while (advance() != null) {
5104 if ((u = transformer.apply((K)nextKey)) != null)
5105 action.apply(u);
5106 }
5107 tryComplete();
5108 }
5109 }
5110
5111 static final class ForEachTransformedValueTask<K,V,U>
5112 extends BulkTask<K,V,Void> {
5113 final Fun<? super V, ? extends U> transformer;
5114 final Action<U> action;
5115 ForEachTransformedValueTask
5116 (ConcurrentHashMapV8<K,V> m,
5117 Fun<? super V, ? extends U> transformer,
5118 Action<U> action) {
5119 super(m);
5120 this.transformer = transformer;
5121 this.action = action;
5122
5123 }
5124 ForEachTransformedValueTask
5125 (BulkTask<K,V,?> p, int b, boolean split,
5126 Fun<? super V, ? extends U> transformer,
5127 Action<U> action) {
5128 super(p, b, split);
5129 this.transformer = transformer;
5130 this.action = action;
5131 }
5132 public final void compute() {
5133 final Fun<? super V, ? extends U> transformer =
5134 this.transformer;
5135 final Action<U> action = this.action;
5136 if (transformer == null || action == null)
5137 throw new Error(NullFunctionMessage);
5138 int b = batch(), c;
5139 while (b > 1 && baseIndex != baseLimit) {
5140 do {} while (!casPending(c = pending, c+1));
5141 new ForEachTransformedValueTask<K,V,U>
5142 (this, b >>>= 1, true, transformer, action).fork();
5143 }
5144 Object v; U u;
5145 while ((v = advance()) != null) {
5146 if ((u = transformer.apply((V)v)) != null)
5147 action.apply(u);
5148 }
5149 tryComplete();
5150 }
5151 }
5152
5153 static final class ForEachTransformedEntryTask<K,V,U>
5154 extends BulkTask<K,V,Void> {
5155 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5156 final Action<U> action;
5157 ForEachTransformedEntryTask
5158 (ConcurrentHashMapV8<K,V> m,
5159 Fun<Map.Entry<K,V>, ? extends U> transformer,
5160 Action<U> action) {
5161 super(m);
5162 this.transformer = transformer;
5163 this.action = action;
5164
5165 }
5166 ForEachTransformedEntryTask
5167 (BulkTask<K,V,?> p, int b, boolean split,
5168 Fun<Map.Entry<K,V>, ? extends U> transformer,
5169 Action<U> action) {
5170 super(p, b, split);
5171 this.transformer = transformer;
5172 this.action = action;
5173 }
5174 public final void compute() {
5175 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5176 this.transformer;
5177 final Action<U> action = this.action;
5178 if (transformer == null || action == null)
5179 throw new Error(NullFunctionMessage);
5180 int b = batch(), c;
5181 while (b > 1 && baseIndex != baseLimit) {
5182 do {} while (!casPending(c = pending, c+1));
5183 new ForEachTransformedEntryTask<K,V,U>
5184 (this, b >>>= 1, true, transformer, action).fork();
5185 }
5186 Object v; U u;
5187 while ((v = advance()) != null) {
5188 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5189 action.apply(u);
5190 }
5191 tryComplete();
5192 }
5193 }
5194
5195 static final class ForEachTransformedMappingTask<K,V,U>
5196 extends BulkTask<K,V,Void> {
5197 final BiFun<? super K, ? super V, ? extends U> transformer;
5198 final Action<U> action;
5199 ForEachTransformedMappingTask
5200 (ConcurrentHashMapV8<K,V> m,
5201 BiFun<? super K, ? super V, ? extends U> transformer,
5202 Action<U> action) {
5203 super(m);
5204 this.transformer = transformer;
5205 this.action = action;
5206
5207 }
5208 ForEachTransformedMappingTask
5209 (BulkTask<K,V,?> p, int b, boolean split,
5210 BiFun<? super K, ? super V, ? extends U> transformer,
5211 Action<U> action) {
5212 super(p, b, split);
5213 this.transformer = transformer;
5214 this.action = action;
5215 }
5216 public final void compute() {
5217 final BiFun<? super K, ? super V, ? extends U> transformer =
5218 this.transformer;
5219 final Action<U> action = this.action;
5220 if (transformer == null || action == null)
5221 throw new Error(NullFunctionMessage);
5222 int b = batch(), c;
5223 while (b > 1 && baseIndex != baseLimit) {
5224 do {} while (!casPending(c = pending, c+1));
5225 new ForEachTransformedMappingTask<K,V,U>
5226 (this, b >>>= 1, true, transformer, action).fork();
5227 }
5228 Object v; U u;
5229 while ((v = advance()) != null) {
5230 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5231 action.apply(u);
5232 }
5233 tryComplete();
5234 }
5235 }
5236
5237 static final class SearchKeysTask<K,V,U>
5238 extends BulkTask<K,V,U> {
5239 final Fun<? super K, ? extends U> searchFunction;
5240 final AtomicReference<U> result;
5241 SearchKeysTask
5242 (ConcurrentHashMapV8<K,V> m,
5243 Fun<? super K, ? extends U> searchFunction,
5244 AtomicReference<U> result) {
5245 super(m);
5246 this.searchFunction = searchFunction; this.result = result;
5247 }
5248 SearchKeysTask
5249 (BulkTask<K,V,?> p, int b, boolean split,
5250 Fun<? super K, ? extends U> searchFunction,
5251 AtomicReference<U> result) {
5252 super(p, b, split);
5253 this.searchFunction = searchFunction; this.result = result;
5254 }
5255 public final void compute() {
5256 AtomicReference<U> result = this.result;
5257 final Fun<? super K, ? extends U> searchFunction =
5258 this.searchFunction;
5259 if (searchFunction == null || result == null)
5260 throw new Error(NullFunctionMessage);
5261 int b = batch(), c;
5262 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5263 do {} while (!casPending(c = pending, c+1));
5264 new SearchKeysTask<K,V,U>(this, b >>>= 1, true,
5265 searchFunction, result).fork();
5266 }
5267 U u;
5268 while (result.get() == null && advance() != null) {
5269 if ((u = searchFunction.apply((K)nextKey)) != null) {
5270 result.compareAndSet(null, u);
5271 break;
5272 }
5273 }
5274 tryComplete();
5275 }
5276 public final U getRawResult() { return result.get(); }
5277 }
5278
5279 static final class SearchValuesTask<K,V,U>
5280 extends BulkTask<K,V,U> {
5281 final Fun<? super V, ? extends U> searchFunction;
5282 final AtomicReference<U> result;
5283 SearchValuesTask
5284 (ConcurrentHashMapV8<K,V> m,
5285 Fun<? super V, ? extends U> searchFunction,
5286 AtomicReference<U> result) {
5287 super(m);
5288 this.searchFunction = searchFunction; this.result = result;
5289 }
5290 SearchValuesTask
5291 (BulkTask<K,V,?> p, int b, boolean split,
5292 Fun<? super V, ? extends U> searchFunction,
5293 AtomicReference<U> result) {
5294 super(p, b, split);
5295 this.searchFunction = searchFunction; this.result = result;
5296 }
5297 public final void compute() {
5298 AtomicReference<U> result = this.result;
5299 final Fun<? super V, ? extends U> searchFunction =
5300 this.searchFunction;
5301 if (searchFunction == null || result == null)
5302 throw new Error(NullFunctionMessage);
5303 int b = batch(), c;
5304 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5305 do {} while (!casPending(c = pending, c+1));
5306 new SearchValuesTask<K,V,U>(this, b >>>= 1, true,
5307 searchFunction, result).fork();
5308 }
5309 Object v; U u;
5310 while (result.get() == null && (v = advance()) != null) {
5311 if ((u = searchFunction.apply((V)v)) != null) {
5312 result.compareAndSet(null, u);
5313 break;
5314 }
5315 }
5316 tryComplete();
5317 }
5318 public final U getRawResult() { return result.get(); }
5319 }
5320
5321 static final class SearchEntriesTask<K,V,U>
5322 extends BulkTask<K,V,U> {
5323 final Fun<Entry<K,V>, ? extends U> searchFunction;
5324 final AtomicReference<U> result;
5325 SearchEntriesTask
5326 (ConcurrentHashMapV8<K,V> m,
5327 Fun<Entry<K,V>, ? extends U> searchFunction,
5328 AtomicReference<U> result) {
5329 super(m);
5330 this.searchFunction = searchFunction; this.result = result;
5331 }
5332 SearchEntriesTask
5333 (BulkTask<K,V,?> p, int b, boolean split,
5334 Fun<Entry<K,V>, ? extends U> searchFunction,
5335 AtomicReference<U> result) {
5336 super(p, b, split);
5337 this.searchFunction = searchFunction; this.result = result;
5338 }
5339 public final void compute() {
5340 AtomicReference<U> result = this.result;
5341 final Fun<Entry<K,V>, ? extends U> searchFunction =
5342 this.searchFunction;
5343 if (searchFunction == null || result == null)
5344 throw new Error(NullFunctionMessage);
5345 int b = batch(), c;
5346 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5347 do {} while (!casPending(c = pending, c+1));
5348 new SearchEntriesTask<K,V,U>(this, b >>>= 1, true,
5349 searchFunction, result).fork();
5350 }
5351 Object v; U u;
5352 while (result.get() == null && (v = advance()) != null) {
5353 if ((u = searchFunction.apply(entryFor((K)nextKey, (V)v))) != null) {
5354 result.compareAndSet(null, u);
5355 break;
5356 }
5357 }
5358 tryComplete();
5359 }
5360 public final U getRawResult() { return result.get(); }
5361 }
5362
5363 static final class SearchMappingsTask<K,V,U>
5364 extends BulkTask<K,V,U> {
5365 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5366 final AtomicReference<U> result;
5367 SearchMappingsTask
5368 (ConcurrentHashMapV8<K,V> m,
5369 BiFun<? super K, ? super V, ? extends U> searchFunction,
5370 AtomicReference<U> result) {
5371 super(m);
5372 this.searchFunction = searchFunction; this.result = result;
5373 }
5374 SearchMappingsTask
5375 (BulkTask<K,V,?> p, int b, boolean split,
5376 BiFun<? super K, ? super V, ? extends U> searchFunction,
5377 AtomicReference<U> result) {
5378 super(p, b, split);
5379 this.searchFunction = searchFunction; this.result = result;
5380 }
5381 public final void compute() {
5382 AtomicReference<U> result = this.result;
5383 final BiFun<? super K, ? super V, ? extends U> searchFunction =
5384 this.searchFunction;
5385 if (searchFunction == null || result == null)
5386 throw new Error(NullFunctionMessage);
5387 int b = batch(), c;
5388 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5389 do {} while (!casPending(c = pending, c+1));
5390 new SearchMappingsTask<K,V,U>(this, b >>>= 1, true,
5391 searchFunction, result).fork();
5392 }
5393 Object v; U u;
5394 while (result.get() == null && (v = advance()) != null) {
5395 if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5396 result.compareAndSet(null, u);
5397 break;
5398 }
5399 }
5400 tryComplete();
5401 }
5402 public final U getRawResult() { return result.get(); }
5403 }
5404
5405 static final class ReduceKeysTask<K,V>
5406 extends BulkTask<K,V,K> {
5407 final BiFun<? super K, ? super K, ? extends K> reducer;
5408 K result;
5409 ReduceKeysTask<K,V> sibling;
5410 ReduceKeysTask
5411 (ConcurrentHashMapV8<K,V> m,
5412 BiFun<? super K, ? super K, ? extends K> reducer) {
5413 super(m);
5414 this.reducer = reducer;
5415 }
5416 ReduceKeysTask
5417 (BulkTask<K,V,?> p, int b, boolean split,
5418 BiFun<? super K, ? super K, ? extends K> reducer) {
5419 super(p, b, split);
5420 this.reducer = reducer;
5421 }
5422
5423 public final void compute() {
5424 ReduceKeysTask<K,V> t = this;
5425 final BiFun<? super K, ? super K, ? extends K> reducer =
5426 this.reducer;
5427 if (reducer == null)
5428 throw new Error(NullFunctionMessage);
5429 int b = batch();
5430 while (b > 1 && t.baseIndex != t.baseLimit) {
5431 b >>>= 1;
5432 t.pending = 1;
5433 ReduceKeysTask<K,V> rt =
5434 new ReduceKeysTask<K,V>
5435 (t, b, true, reducer);
5436 t = new ReduceKeysTask<K,V>
5437 (t, b, false, reducer);
5438 t.sibling = rt;
5439 rt.sibling = t;
5440 rt.fork();
5441 }
5442 K r = null;
5443 while (t.advance() != null) {
5444 K u = (K)t.nextKey;
5445 r = (r == null) ? u : reducer.apply(r, u);
5446 }
5447 t.result = r;
5448 for (;;) {
5449 int c; BulkTask<K,V,?> par; ReduceKeysTask<K,V> s, p; K u;
5450 if ((par = t.parent) == null ||
5451 !(par instanceof ReduceKeysTask)) {
5452 t.quietlyComplete();
5453 break;
5454 }
5455 else if ((c = (p = (ReduceKeysTask<K,V>)par).pending) == 0) {
5456 if ((s = t.sibling) != null && (u = s.result) != null)
5457 r = (r == null) ? u : reducer.apply(r, u);
5458 (t = p).result = r;
5459 }
5460 else if (p.casPending(c, 0))
5461 break;
5462 }
5463 }
5464 public final K getRawResult() { return result; }
5465 }
5466
5467 static final class ReduceValuesTask<K,V>
5468 extends BulkTask<K,V,V> {
5469 final BiFun<? super V, ? super V, ? extends V> reducer;
5470 V result;
5471 ReduceValuesTask<K,V> sibling;
5472 ReduceValuesTask
5473 (ConcurrentHashMapV8<K,V> m,
5474 BiFun<? super V, ? super V, ? extends V> reducer) {
5475 super(m);
5476 this.reducer = reducer;
5477 }
5478 ReduceValuesTask
5479 (BulkTask<K,V,?> p, int b, boolean split,
5480 BiFun<? super V, ? super V, ? extends V> reducer) {
5481 super(p, b, split);
5482 this.reducer = reducer;
5483 }
5484
5485 public final void compute() {
5486 ReduceValuesTask<K,V> t = this;
5487 final BiFun<? super V, ? super V, ? extends V> reducer =
5488 this.reducer;
5489 if (reducer == null)
5490 throw new Error(NullFunctionMessage);
5491 int b = batch();
5492 while (b > 1 && t.baseIndex != t.baseLimit) {
5493 b >>>= 1;
5494 t.pending = 1;
5495 ReduceValuesTask<K,V> rt =
5496 new ReduceValuesTask<K,V>
5497 (t, b, true, reducer);
5498 t = new ReduceValuesTask<K,V>
5499 (t, b, false, reducer);
5500 t.sibling = rt;
5501 rt.sibling = t;
5502 rt.fork();
5503 }
5504 V r = null;
5505 Object v;
5506 while ((v = t.advance()) != null) {
5507 V u = (V)v;
5508 r = (r == null) ? u : reducer.apply(r, u);
5509 }
5510 t.result = r;
5511 for (;;) {
5512 int c; BulkTask<K,V,?> par; ReduceValuesTask<K,V> s, p; V u;
5513 if ((par = t.parent) == null ||
5514 !(par instanceof ReduceValuesTask)) {
5515 t.quietlyComplete();
5516 break;
5517 }
5518 else if ((c = (p = (ReduceValuesTask<K,V>)par).pending) == 0) {
5519 if ((s = t.sibling) != null && (u = s.result) != null)
5520 r = (r == null) ? u : reducer.apply(r, u);
5521 (t = p).result = r;
5522 }
5523 else if (p.casPending(c, 0))
5524 break;
5525 }
5526 }
5527 public final V getRawResult() { return result; }
5528 }
5529
5530 static final class ReduceEntriesTask<K,V>
5531 extends BulkTask<K,V,Map.Entry<K,V>> {
5532 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5533 Map.Entry<K,V> result;
5534 ReduceEntriesTask<K,V> sibling;
5535 ReduceEntriesTask
5536 (ConcurrentHashMapV8<K,V> m,
5537 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5538 super(m);
5539 this.reducer = reducer;
5540 }
5541 ReduceEntriesTask
5542 (BulkTask<K,V,?> p, int b, boolean split,
5543 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5544 super(p, b, split);
5545 this.reducer = reducer;
5546 }
5547
5548 public final void compute() {
5549 ReduceEntriesTask<K,V> t = this;
5550 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer =
5551 this.reducer;
5552 if (reducer == null)
5553 throw new Error(NullFunctionMessage);
5554 int b = batch();
5555 while (b > 1 && t.baseIndex != t.baseLimit) {
5556 b >>>= 1;
5557 t.pending = 1;
5558 ReduceEntriesTask<K,V> rt =
5559 new ReduceEntriesTask<K,V>
5560 (t, b, true, reducer);
5561 t = new ReduceEntriesTask<K,V>
5562 (t, b, false, reducer);
5563 t.sibling = rt;
5564 rt.sibling = t;
5565 rt.fork();
5566 }
5567 Map.Entry<K,V> r = null;
5568 Object v;
5569 while ((v = t.advance()) != null) {
5570 Map.Entry<K,V> u = entryFor((K)t.nextKey, (V)v);
5571 r = (r == null) ? u : reducer.apply(r, u);
5572 }
5573 t.result = r;
5574 for (;;) {
5575 int c; BulkTask<K,V,?> par; ReduceEntriesTask<K,V> s, p;
5576 Map.Entry<K,V> u;
5577 if ((par = t.parent) == null ||
5578 !(par instanceof ReduceEntriesTask)) {
5579 t.quietlyComplete();
5580 break;
5581 }
5582 else if ((c = (p = (ReduceEntriesTask<K,V>)par).pending) == 0) {
5583 if ((s = t.sibling) != null && (u = s.result) != null)
5584 r = (r == null) ? u : reducer.apply(r, u);
5585 (t = p).result = r;
5586 }
5587 else if (p.casPending(c, 0))
5588 break;
5589 }
5590 }
5591 public final Map.Entry<K,V> getRawResult() { return result; }
5592 }
5593
5594 static final class MapReduceKeysTask<K,V,U>
5595 extends BulkTask<K,V,U> {
5596 final Fun<? super K, ? extends U> transformer;
5597 final BiFun<? super U, ? super U, ? extends U> reducer;
5598 U result;
5599 MapReduceKeysTask<K,V,U> sibling;
5600 MapReduceKeysTask
5601 (ConcurrentHashMapV8<K,V> m,
5602 Fun<? super K, ? extends U> transformer,
5603 BiFun<? super U, ? super U, ? extends U> reducer) {
5604 super(m);
5605 this.transformer = transformer;
5606 this.reducer = reducer;
5607 }
5608 MapReduceKeysTask
5609 (BulkTask<K,V,?> p, int b, boolean split,
5610 Fun<? super K, ? extends U> transformer,
5611 BiFun<? super U, ? super U, ? extends U> reducer) {
5612 super(p, b, split);
5613 this.transformer = transformer;
5614 this.reducer = reducer;
5615 }
5616 public final void compute() {
5617 MapReduceKeysTask<K,V,U> t = this;
5618 final Fun<? super K, ? extends U> transformer =
5619 this.transformer;
5620 final BiFun<? super U, ? super U, ? extends U> reducer =
5621 this.reducer;
5622 if (transformer == null || reducer == null)
5623 throw new Error(NullFunctionMessage);
5624 int b = batch();
5625 while (b > 1 && t.baseIndex != t.baseLimit) {
5626 b >>>= 1;
5627 t.pending = 1;
5628 MapReduceKeysTask<K,V,U> rt =
5629 new MapReduceKeysTask<K,V,U>
5630 (t, b, true, transformer, reducer);
5631 t = new MapReduceKeysTask<K,V,U>
5632 (t, b, false, transformer, reducer);
5633 t.sibling = rt;
5634 rt.sibling = t;
5635 rt.fork();
5636 }
5637 U r = null, u;
5638 while (t.advance() != null) {
5639 if ((u = transformer.apply((K)t.nextKey)) != null)
5640 r = (r == null) ? u : reducer.apply(r, u);
5641 }
5642 t.result = r;
5643 for (;;) {
5644 int c; BulkTask<K,V,?> par; MapReduceKeysTask<K,V,U> s, p;
5645 if ((par = t.parent) == null ||
5646 !(par instanceof MapReduceKeysTask)) {
5647 t.quietlyComplete();
5648 break;
5649 }
5650 else if ((c = (p = (MapReduceKeysTask<K,V,U>)par).pending) == 0) {
5651 if ((s = t.sibling) != null && (u = s.result) != null)
5652 r = (r == null) ? u : reducer.apply(r, u);
5653 (t = p).result = r;
5654 }
5655 else if (p.casPending(c, 0))
5656 break;
5657 }
5658 }
5659 public final U getRawResult() { return result; }
5660 }
5661
5662 static final class MapReduceValuesTask<K,V,U>
5663 extends BulkTask<K,V,U> {
5664 final Fun<? super V, ? extends U> transformer;
5665 final BiFun<? super U, ? super U, ? extends U> reducer;
5666 U result;
5667 MapReduceValuesTask<K,V,U> sibling;
5668 MapReduceValuesTask
5669 (ConcurrentHashMapV8<K,V> m,
5670 Fun<? super V, ? extends U> transformer,
5671 BiFun<? super U, ? super U, ? extends U> reducer) {
5672 super(m);
5673 this.transformer = transformer;
5674 this.reducer = reducer;
5675 }
5676 MapReduceValuesTask
5677 (BulkTask<K,V,?> p, int b, boolean split,
5678 Fun<? super V, ? extends U> transformer,
5679 BiFun<? super U, ? super U, ? extends U> reducer) {
5680 super(p, b, split);
5681 this.transformer = transformer;
5682 this.reducer = reducer;
5683 }
5684 public final void compute() {
5685 MapReduceValuesTask<K,V,U> t = this;
5686 final Fun<? super V, ? extends U> transformer =
5687 this.transformer;
5688 final BiFun<? super U, ? super U, ? extends U> reducer =
5689 this.reducer;
5690 if (transformer == null || reducer == null)
5691 throw new Error(NullFunctionMessage);
5692 int b = batch();
5693 while (b > 1 && t.baseIndex != t.baseLimit) {
5694 b >>>= 1;
5695 t.pending = 1;
5696 MapReduceValuesTask<K,V,U> rt =
5697 new MapReduceValuesTask<K,V,U>
5698 (t, b, true, transformer, reducer);
5699 t = new MapReduceValuesTask<K,V,U>
5700 (t, b, false, transformer, reducer);
5701 t.sibling = rt;
5702 rt.sibling = t;
5703 rt.fork();
5704 }
5705 U r = null, u;
5706 Object v;
5707 while ((v = t.advance()) != null) {
5708 if ((u = transformer.apply((V)v)) != null)
5709 r = (r == null) ? u : reducer.apply(r, u);
5710 }
5711 t.result = r;
5712 for (;;) {
5713 int c; BulkTask<K,V,?> par; MapReduceValuesTask<K,V,U> s, p;
5714 if ((par = t.parent) == null ||
5715 !(par instanceof MapReduceValuesTask)) {
5716 t.quietlyComplete();
5717 break;
5718 }
5719 else if ((c = (p = (MapReduceValuesTask<K,V,U>)par).pending) == 0) {
5720 if ((s = t.sibling) != null && (u = s.result) != null)
5721 r = (r == null) ? u : reducer.apply(r, u);
5722 (t = p).result = r;
5723 }
5724 else if (p.casPending(c, 0))
5725 break;
5726 }
5727 }
5728 public final U getRawResult() { return result; }
5729 }
5730
5731 static final class MapReduceEntriesTask<K,V,U>
5732 extends BulkTask<K,V,U> {
5733 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5734 final BiFun<? super U, ? super U, ? extends U> reducer;
5735 U result;
5736 MapReduceEntriesTask<K,V,U> sibling;
5737 MapReduceEntriesTask
5738 (ConcurrentHashMapV8<K,V> m,
5739 Fun<Map.Entry<K,V>, ? extends U> transformer,
5740 BiFun<? super U, ? super U, ? extends U> reducer) {
5741 super(m);
5742 this.transformer = transformer;
5743 this.reducer = reducer;
5744 }
5745 MapReduceEntriesTask
5746 (BulkTask<K,V,?> p, int b, boolean split,
5747 Fun<Map.Entry<K,V>, ? extends U> transformer,
5748 BiFun<? super U, ? super U, ? extends U> reducer) {
5749 super(p, b, split);
5750 this.transformer = transformer;
5751 this.reducer = reducer;
5752 }
5753 public final void compute() {
5754 MapReduceEntriesTask<K,V,U> t = this;
5755 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5756 this.transformer;
5757 final BiFun<? super U, ? super U, ? extends U> reducer =
5758 this.reducer;
5759 if (transformer == null || reducer == null)
5760 throw new Error(NullFunctionMessage);
5761 int b = batch();
5762 while (b > 1 && t.baseIndex != t.baseLimit) {
5763 b >>>= 1;
5764 t.pending = 1;
5765 MapReduceEntriesTask<K,V,U> rt =
5766 new MapReduceEntriesTask<K,V,U>
5767 (t, b, true, transformer, reducer);
5768 t = new MapReduceEntriesTask<K,V,U>
5769 (t, b, false, transformer, reducer);
5770 t.sibling = rt;
5771 rt.sibling = t;
5772 rt.fork();
5773 }
5774 U r = null, u;
5775 Object v;
5776 while ((v = t.advance()) != null) {
5777 if ((u = transformer.apply(entryFor((K)t.nextKey, (V)v))) != null)
5778 r = (r == null) ? u : reducer.apply(r, u);
5779 }
5780 t.result = r;
5781 for (;;) {
5782 int c; BulkTask<K,V,?> par; MapReduceEntriesTask<K,V,U> s, p;
5783 if ((par = t.parent) == null ||
5784 !(par instanceof MapReduceEntriesTask)) {
5785 t.quietlyComplete();
5786 break;
5787 }
5788 else if ((c = (p = (MapReduceEntriesTask<K,V,U>)par).pending) == 0) {
5789 if ((s = t.sibling) != null && (u = s.result) != null)
5790 r = (r == null) ? u : reducer.apply(r, u);
5791 (t = p).result = r;
5792 }
5793 else if (p.casPending(c, 0))
5794 break;
5795 }
5796 }
5797 public final U getRawResult() { return result; }
5798 }
5799
5800 static final class MapReduceMappingsTask<K,V,U>
5801 extends BulkTask<K,V,U> {
5802 final BiFun<? super K, ? super V, ? extends U> transformer;
5803 final BiFun<? super U, ? super U, ? extends U> reducer;
5804 U result;
5805 MapReduceMappingsTask<K,V,U> sibling;
5806 MapReduceMappingsTask
5807 (ConcurrentHashMapV8<K,V> m,
5808 BiFun<? super K, ? super V, ? extends U> transformer,
5809 BiFun<? super U, ? super U, ? extends U> reducer) {
5810 super(m);
5811 this.transformer = transformer;
5812 this.reducer = reducer;
5813 }
5814 MapReduceMappingsTask
5815 (BulkTask<K,V,?> p, int b, boolean split,
5816 BiFun<? super K, ? super V, ? extends U> transformer,
5817 BiFun<? super U, ? super U, ? extends U> reducer) {
5818 super(p, b, split);
5819 this.transformer = transformer;
5820 this.reducer = reducer;
5821 }
5822 public final void compute() {
5823 MapReduceMappingsTask<K,V,U> t = this;
5824 final BiFun<? super K, ? super V, ? extends U> transformer =
5825 this.transformer;
5826 final BiFun<? super U, ? super U, ? extends U> reducer =
5827 this.reducer;
5828 if (transformer == null || reducer == null)
5829 throw new Error(NullFunctionMessage);
5830 int b = batch();
5831 while (b > 1 && t.baseIndex != t.baseLimit) {
5832 b >>>= 1;
5833 t.pending = 1;
5834 MapReduceMappingsTask<K,V,U> rt =
5835 new MapReduceMappingsTask<K,V,U>
5836 (t, b, true, transformer, reducer);
5837 t = new MapReduceMappingsTask<K,V,U>
5838 (t, b, false, transformer, reducer);
5839 t.sibling = rt;
5840 rt.sibling = t;
5841 rt.fork();
5842 }
5843 U r = null, u;
5844 Object v;
5845 while ((v = t.advance()) != null) {
5846 if ((u = transformer.apply((K)t.nextKey, (V)v)) != null)
5847 r = (r == null) ? u : reducer.apply(r, u);
5848 }
5849 for (;;) {
5850 int c; BulkTask<K,V,?> par; MapReduceMappingsTask<K,V,U> s, p;
5851 if ((par = t.parent) == null ||
5852 !(par instanceof MapReduceMappingsTask)) {
5853 t.quietlyComplete();
5854 break;
5855 }
5856 else if ((c = (p = (MapReduceMappingsTask<K,V,U>)par).pending) == 0) {
5857 if ((s = t.sibling) != null && (u = s.result) != null)
5858 r = (r == null) ? u : reducer.apply(r, u);
5859 (t = p).result = r;
5860 }
5861 else if (p.casPending(c, 0))
5862 break;
5863 }
5864 }
5865 public final U getRawResult() { return result; }
5866 }
5867
5868 static final class MapReduceKeysToDoubleTask<K,V>
5869 extends BulkTask<K,V,Double> {
5870 final ObjectToDouble<? super K> transformer;
5871 final DoubleByDoubleToDouble reducer;
5872 final double basis;
5873 double result;
5874 MapReduceKeysToDoubleTask<K,V> sibling;
5875 MapReduceKeysToDoubleTask
5876 (ConcurrentHashMapV8<K,V> m,
5877 ObjectToDouble<? super K> transformer,
5878 double basis,
5879 DoubleByDoubleToDouble reducer) {
5880 super(m);
5881 this.transformer = transformer;
5882 this.basis = basis; this.reducer = reducer;
5883 }
5884 MapReduceKeysToDoubleTask
5885 (BulkTask<K,V,?> p, int b, boolean split,
5886 ObjectToDouble<? super K> transformer,
5887 double basis,
5888 DoubleByDoubleToDouble reducer) {
5889 super(p, b, split);
5890 this.transformer = transformer;
5891 this.basis = basis; this.reducer = reducer;
5892 }
5893 public final void compute() {
5894 MapReduceKeysToDoubleTask<K,V> t = this;
5895 final ObjectToDouble<? super K> transformer =
5896 this.transformer;
5897 final DoubleByDoubleToDouble reducer = this.reducer;
5898 if (transformer == null || reducer == null)
5899 throw new Error(NullFunctionMessage);
5900 final double id = this.basis;
5901 int b = batch();
5902 while (b > 1 && t.baseIndex != t.baseLimit) {
5903 b >>>= 1;
5904 t.pending = 1;
5905 MapReduceKeysToDoubleTask<K,V> rt =
5906 new MapReduceKeysToDoubleTask<K,V>
5907 (t, b, true, transformer, id, reducer);
5908 t = new MapReduceKeysToDoubleTask<K,V>
5909 (t, b, false, transformer, id, reducer);
5910 t.sibling = rt;
5911 rt.sibling = t;
5912 rt.fork();
5913 }
5914 double r = id;
5915 while (t.advance() != null)
5916 r = reducer.apply(r, transformer.apply((K)t.nextKey));
5917 t.result = r;
5918 for (;;) {
5919 int c; BulkTask<K,V,?> par; MapReduceKeysToDoubleTask<K,V> s, p;
5920 if ((par = t.parent) == null ||
5921 !(par instanceof MapReduceKeysToDoubleTask)) {
5922 t.quietlyComplete();
5923 break;
5924 }
5925 else if ((c = (p = (MapReduceKeysToDoubleTask<K,V>)par).pending) == 0) {
5926 if ((s = t.sibling) != null)
5927 r = reducer.apply(r, s.result);
5928 (t = p).result = r;
5929 }
5930 else if (p.casPending(c, 0))
5931 break;
5932 }
5933 }
5934 public final Double getRawResult() { return result; }
5935 }
5936
5937 static final class MapReduceValuesToDoubleTask<K,V>
5938 extends BulkTask<K,V,Double> {
5939 final ObjectToDouble<? super V> transformer;
5940 final DoubleByDoubleToDouble reducer;
5941 final double basis;
5942 double result;
5943 MapReduceValuesToDoubleTask<K,V> sibling;
5944 MapReduceValuesToDoubleTask
5945 (ConcurrentHashMapV8<K,V> m,
5946 ObjectToDouble<? super V> transformer,
5947 double basis,
5948 DoubleByDoubleToDouble reducer) {
5949 super(m);
5950 this.transformer = transformer;
5951 this.basis = basis; this.reducer = reducer;
5952 }
5953 MapReduceValuesToDoubleTask
5954 (BulkTask<K,V,?> p, int b, boolean split,
5955 ObjectToDouble<? super V> transformer,
5956 double basis,
5957 DoubleByDoubleToDouble reducer) {
5958 super(p, b, split);
5959 this.transformer = transformer;
5960 this.basis = basis; this.reducer = reducer;
5961 }
5962 public final void compute() {
5963 MapReduceValuesToDoubleTask<K,V> t = this;
5964 final ObjectToDouble<? super V> transformer =
5965 this.transformer;
5966 final DoubleByDoubleToDouble reducer = this.reducer;
5967 if (transformer == null || reducer == null)
5968 throw new Error(NullFunctionMessage);
5969 final double id = this.basis;
5970 int b = batch();
5971 while (b > 1 && t.baseIndex != t.baseLimit) {
5972 b >>>= 1;
5973 t.pending = 1;
5974 MapReduceValuesToDoubleTask<K,V> rt =
5975 new MapReduceValuesToDoubleTask<K,V>
5976 (t, b, true, transformer, id, reducer);
5977 t = new MapReduceValuesToDoubleTask<K,V>
5978 (t, b, false, transformer, id, reducer);
5979 t.sibling = rt;
5980 rt.sibling = t;
5981 rt.fork();
5982 }
5983 double r = id;
5984 Object v;
5985 while ((v = t.advance()) != null)
5986 r = reducer.apply(r, transformer.apply((V)v));
5987 t.result = r;
5988 for (;;) {
5989 int c; BulkTask<K,V,?> par; MapReduceValuesToDoubleTask<K,V> s, p;
5990 if ((par = t.parent) == null ||
5991 !(par instanceof MapReduceValuesToDoubleTask)) {
5992 t.quietlyComplete();
5993 break;
5994 }
5995 else if ((c = (p = (MapReduceValuesToDoubleTask<K,V>)par).pending) == 0) {
5996 if ((s = t.sibling) != null)
5997 r = reducer.apply(r, s.result);
5998 (t = p).result = r;
5999 }
6000 else if (p.casPending(c, 0))
6001 break;
6002 }
6003 }
6004 public final Double getRawResult() { return result; }
6005 }
6006
6007 static final class MapReduceEntriesToDoubleTask<K,V>
6008 extends BulkTask<K,V,Double> {
6009 final ObjectToDouble<Map.Entry<K,V>> transformer;
6010 final DoubleByDoubleToDouble reducer;
6011 final double basis;
6012 double result;
6013 MapReduceEntriesToDoubleTask<K,V> sibling;
6014 MapReduceEntriesToDoubleTask
6015 (ConcurrentHashMapV8<K,V> m,
6016 ObjectToDouble<Map.Entry<K,V>> transformer,
6017 double basis,
6018 DoubleByDoubleToDouble reducer) {
6019 super(m);
6020 this.transformer = transformer;
6021 this.basis = basis; this.reducer = reducer;
6022 }
6023 MapReduceEntriesToDoubleTask
6024 (BulkTask<K,V,?> p, int b, boolean split,
6025 ObjectToDouble<Map.Entry<K,V>> transformer,
6026 double basis,
6027 DoubleByDoubleToDouble reducer) {
6028 super(p, b, split);
6029 this.transformer = transformer;
6030 this.basis = basis; this.reducer = reducer;
6031 }
6032 public final void compute() {
6033 MapReduceEntriesToDoubleTask<K,V> t = this;
6034 final ObjectToDouble<Map.Entry<K,V>> transformer =
6035 this.transformer;
6036 final DoubleByDoubleToDouble reducer = this.reducer;
6037 if (transformer == null || reducer == null)
6038 throw new Error(NullFunctionMessage);
6039 final double id = this.basis;
6040 int b = batch();
6041 while (b > 1 && t.baseIndex != t.baseLimit) {
6042 b >>>= 1;
6043 t.pending = 1;
6044 MapReduceEntriesToDoubleTask<K,V> rt =
6045 new MapReduceEntriesToDoubleTask<K,V>
6046 (t, b, true, transformer, id, reducer);
6047 t = new MapReduceEntriesToDoubleTask<K,V>
6048 (t, b, false, transformer, id, reducer);
6049 t.sibling = rt;
6050 rt.sibling = t;
6051 rt.fork();
6052 }
6053 double r = id;
6054 Object v;
6055 while ((v = t.advance()) != null)
6056 r = reducer.apply(r, transformer.apply(entryFor((K)t.nextKey, (V)v)));
6057 t.result = r;
6058 for (;;) {
6059 int c; BulkTask<K,V,?> par; MapReduceEntriesToDoubleTask<K,V> s, p;
6060 if ((par = t.parent) == null ||
6061 !(par instanceof MapReduceEntriesToDoubleTask)) {
6062 t.quietlyComplete();
6063 break;
6064 }
6065 else if ((c = (p = (MapReduceEntriesToDoubleTask<K,V>)par).pending) == 0) {
6066 if ((s = t.sibling) != null)
6067 r = reducer.apply(r, s.result);
6068 (t = p).result = r;
6069 }
6070 else if (p.casPending(c, 0))
6071 break;
6072 }
6073 }
6074 public final Double getRawResult() { return result; }
6075 }
6076
6077 static final class MapReduceMappingsToDoubleTask<K,V>
6078 extends BulkTask<K,V,Double> {
6079 final ObjectByObjectToDouble<? super K, ? super V> transformer;
6080 final DoubleByDoubleToDouble reducer;
6081 final double basis;
6082 double result;
6083 MapReduceMappingsToDoubleTask<K,V> sibling;
6084 MapReduceMappingsToDoubleTask
6085 (ConcurrentHashMapV8<K,V> m,
6086 ObjectByObjectToDouble<? super K, ? super V> transformer,
6087 double basis,
6088 DoubleByDoubleToDouble reducer) {
6089 super(m);
6090 this.transformer = transformer;
6091 this.basis = basis; this.reducer = reducer;
6092 }
6093 MapReduceMappingsToDoubleTask
6094 (BulkTask<K,V,?> p, int b, boolean split,
6095 ObjectByObjectToDouble<? super K, ? super V> transformer,
6096 double basis,
6097 DoubleByDoubleToDouble reducer) {
6098 super(p, b, split);
6099 this.transformer = transformer;
6100 this.basis = basis; this.reducer = reducer;
6101 }
6102 public final void compute() {
6103 MapReduceMappingsToDoubleTask<K,V> t = this;
6104 final ObjectByObjectToDouble<? super K, ? super V> transformer =
6105 this.transformer;
6106 final DoubleByDoubleToDouble reducer = this.reducer;
6107 if (transformer == null || reducer == null)
6108 throw new Error(NullFunctionMessage);
6109 final double id = this.basis;
6110 int b = batch();
6111 while (b > 1 && t.baseIndex != t.baseLimit) {
6112 b >>>= 1;
6113 t.pending = 1;
6114 MapReduceMappingsToDoubleTask<K,V> rt =
6115 new MapReduceMappingsToDoubleTask<K,V>
6116 (t, b, true, transformer, id, reducer);
6117 t = new MapReduceMappingsToDoubleTask<K,V>
6118 (t, b, false, transformer, id, reducer);
6119 t.sibling = rt;
6120 rt.sibling = t;
6121 rt.fork();
6122 }
6123 double r = id;
6124 Object v;
6125 while ((v = t.advance()) != null)
6126 r = reducer.apply(r, transformer.apply((K)t.nextKey, (V)v));
6127 t.result = r;
6128 for (;;) {
6129 int c; BulkTask<K,V,?> par; MapReduceMappingsToDoubleTask<K,V> s, p;
6130 if ((par = t.parent) == null ||
6131 !(par instanceof MapReduceMappingsToDoubleTask)) {
6132 t.quietlyComplete();
6133 break;
6134 }
6135 else if ((c = (p = (MapReduceMappingsToDoubleTask<K,V>)par).pending) == 0) {
6136 if ((s = t.sibling) != null)
6137 r = reducer.apply(r, s.result);
6138 (t = p).result = r;
6139 }
6140 else if (p.casPending(c, 0))
6141 break;
6142 }
6143 }
6144 public final Double getRawResult() { return result; }
6145 }
6146
6147 static final class MapReduceKeysToLongTask<K,V>
6148 extends BulkTask<K,V,Long> {
6149 final ObjectToLong<? super K> transformer;
6150 final LongByLongToLong reducer;
6151 final long basis;
6152 long result;
6153 MapReduceKeysToLongTask<K,V> sibling;
6154 MapReduceKeysToLongTask
6155 (ConcurrentHashMapV8<K,V> m,
6156 ObjectToLong<? super K> transformer,
6157 long basis,
6158 LongByLongToLong reducer) {
6159 super(m);
6160 this.transformer = transformer;
6161 this.basis = basis; this.reducer = reducer;
6162 }
6163 MapReduceKeysToLongTask
6164 (BulkTask<K,V,?> p, int b, boolean split,
6165 ObjectToLong<? super K> transformer,
6166 long basis,
6167 LongByLongToLong reducer) {
6168 super(p, b, split);
6169 this.transformer = transformer;
6170 this.basis = basis; this.reducer = reducer;
6171 }
6172 public final void compute() {
6173 MapReduceKeysToLongTask<K,V> t = this;
6174 final ObjectToLong<? super K> transformer =
6175 this.transformer;
6176 final LongByLongToLong reducer = this.reducer;
6177 if (transformer == null || reducer == null)
6178 throw new Error(NullFunctionMessage);
6179 final long id = this.basis;
6180 int b = batch();
6181 while (b > 1 && t.baseIndex != t.baseLimit) {
6182 b >>>= 1;
6183 t.pending = 1;
6184 MapReduceKeysToLongTask<K,V> rt =
6185 new MapReduceKeysToLongTask<K,V>
6186 (t, b, true, transformer, id, reducer);
6187 t = new MapReduceKeysToLongTask<K,V>
6188 (t, b, false, transformer, id, reducer);
6189 t.sibling = rt;
6190 rt.sibling = t;
6191 rt.fork();
6192 }
6193 long r = id;
6194 while (t.advance() != null)
6195 r = reducer.apply(r, transformer.apply((K)t.nextKey));
6196 t.result = r;
6197 for (;;) {
6198 int c; BulkTask<K,V,?> par; MapReduceKeysToLongTask<K,V> s, p;
6199 if ((par = t.parent) == null ||
6200 !(par instanceof MapReduceKeysToLongTask)) {
6201 t.quietlyComplete();
6202 break;
6203 }
6204 else if ((c = (p = (MapReduceKeysToLongTask<K,V>)par).pending) == 0) {
6205 if ((s = t.sibling) != null)
6206 r = reducer.apply(r, s.result);
6207 (t = p).result = r;
6208 }
6209 else if (p.casPending(c, 0))
6210 break;
6211 }
6212 }
6213 public final Long getRawResult() { return result; }
6214 }
6215
6216 static final class MapReduceValuesToLongTask<K,V>
6217 extends BulkTask<K,V,Long> {
6218 final ObjectToLong<? super V> transformer;
6219 final LongByLongToLong reducer;
6220 final long basis;
6221 long result;
6222 MapReduceValuesToLongTask<K,V> sibling;
6223 MapReduceValuesToLongTask
6224 (ConcurrentHashMapV8<K,V> m,
6225 ObjectToLong<? super V> transformer,
6226 long basis,
6227 LongByLongToLong reducer) {
6228 super(m);
6229 this.transformer = transformer;
6230 this.basis = basis; this.reducer = reducer;
6231 }
6232 MapReduceValuesToLongTask
6233 (BulkTask<K,V,?> p, int b, boolean split,
6234 ObjectToLong<? super V> transformer,
6235 long basis,
6236 LongByLongToLong reducer) {
6237 super(p, b, split);
6238 this.transformer = transformer;
6239 this.basis = basis; this.reducer = reducer;
6240 }
6241 public final void compute() {
6242 MapReduceValuesToLongTask<K,V> t = this;
6243 final ObjectToLong<? super V> transformer =
6244 this.transformer;
6245 final LongByLongToLong reducer = this.reducer;
6246 if (transformer == null || reducer == null)
6247 throw new Error(NullFunctionMessage);
6248 final long id = this.basis;
6249 int b = batch();
6250 while (b > 1 && t.baseIndex != t.baseLimit) {
6251 b >>>= 1;
6252 t.pending = 1;
6253 MapReduceValuesToLongTask<K,V> rt =
6254 new MapReduceValuesToLongTask<K,V>
6255 (t, b, true, transformer, id, reducer);
6256 t = new MapReduceValuesToLongTask<K,V>
6257 (t, b, false, transformer, id, reducer);
6258 t.sibling = rt;
6259 rt.sibling = t;
6260 rt.fork();
6261 }
6262 long r = id;
6263 Object v;
6264 while ((v = t.advance()) != null)
6265 r = reducer.apply(r, transformer.apply((V)v));
6266 t.result = r;
6267 for (;;) {
6268 int c; BulkTask<K,V,?> par; MapReduceValuesToLongTask<K,V> s, p;
6269 if ((par = t.parent) == null ||
6270 !(par instanceof MapReduceValuesToLongTask)) {
6271 t.quietlyComplete();
6272 break;
6273 }
6274 else if ((c = (p = (MapReduceValuesToLongTask<K,V>)par).pending) == 0) {
6275 if ((s = t.sibling) != null)
6276 r = reducer.apply(r, s.result);
6277 (t = p).result = r;
6278 }
6279 else if (p.casPending(c, 0))
6280 break;
6281 }
6282 }
6283 public final Long getRawResult() { return result; }
6284 }
6285
6286 static final class MapReduceEntriesToLongTask<K,V>
6287 extends BulkTask<K,V,Long> {
6288 final ObjectToLong<Map.Entry<K,V>> transformer;
6289 final LongByLongToLong reducer;
6290 final long basis;
6291 long result;
6292 MapReduceEntriesToLongTask<K,V> sibling;
6293 MapReduceEntriesToLongTask
6294 (ConcurrentHashMapV8<K,V> m,
6295 ObjectToLong<Map.Entry<K,V>> transformer,
6296 long basis,
6297 LongByLongToLong reducer) {
6298 super(m);
6299 this.transformer = transformer;
6300 this.basis = basis; this.reducer = reducer;
6301 }
6302 MapReduceEntriesToLongTask
6303 (BulkTask<K,V,?> p, int b, boolean split,
6304 ObjectToLong<Map.Entry<K,V>> transformer,
6305 long basis,
6306 LongByLongToLong reducer) {
6307 super(p, b, split);
6308 this.transformer = transformer;
6309 this.basis = basis; this.reducer = reducer;
6310 }
6311 public final void compute() {
6312 MapReduceEntriesToLongTask<K,V> t = this;
6313 final ObjectToLong<Map.Entry<K,V>> transformer =
6314 this.transformer;
6315 final LongByLongToLong reducer = this.reducer;
6316 if (transformer == null || reducer == null)
6317 throw new Error(NullFunctionMessage);
6318 final long id = this.basis;
6319 int b = batch();
6320 while (b > 1 && t.baseIndex != t.baseLimit) {
6321 b >>>= 1;
6322 t.pending = 1;
6323 MapReduceEntriesToLongTask<K,V> rt =
6324 new MapReduceEntriesToLongTask<K,V>
6325 (t, b, true, transformer, id, reducer);
6326 t = new MapReduceEntriesToLongTask<K,V>
6327 (t, b, false, transformer, id, reducer);
6328 t.sibling = rt;
6329 rt.sibling = t;
6330 rt.fork();
6331 }
6332 long r = id;
6333 Object v;
6334 while ((v = t.advance()) != null)
6335 r = reducer.apply(r, transformer.apply(entryFor((K)t.nextKey, (V)v)));
6336 t.result = r;
6337 for (;;) {
6338 int c; BulkTask<K,V,?> par; MapReduceEntriesToLongTask<K,V> s, p;
6339 if ((par = t.parent) == null ||
6340 !(par instanceof MapReduceEntriesToLongTask)) {
6341 t.quietlyComplete();
6342 break;
6343 }
6344 else if ((c = (p = (MapReduceEntriesToLongTask<K,V>)par).pending) == 0) {
6345 if ((s = t.sibling) != null)
6346 r = reducer.apply(r, s.result);
6347 (t = p).result = r;
6348 }
6349 else if (p.casPending(c, 0))
6350 break;
6351 }
6352 }
6353 public final Long getRawResult() { return result; }
6354 }
6355
6356 static final class MapReduceMappingsToLongTask<K,V>
6357 extends BulkTask<K,V,Long> {
6358 final ObjectByObjectToLong<? super K, ? super V> transformer;
6359 final LongByLongToLong reducer;
6360 final long basis;
6361 long result;
6362 MapReduceMappingsToLongTask<K,V> sibling;
6363 MapReduceMappingsToLongTask
6364 (ConcurrentHashMapV8<K,V> m,
6365 ObjectByObjectToLong<? super K, ? super V> transformer,
6366 long basis,
6367 LongByLongToLong reducer) {
6368 super(m);
6369 this.transformer = transformer;
6370 this.basis = basis; this.reducer = reducer;
6371 }
6372 MapReduceMappingsToLongTask
6373 (BulkTask<K,V,?> p, int b, boolean split,
6374 ObjectByObjectToLong<? super K, ? super V> transformer,
6375 long basis,
6376 LongByLongToLong reducer) {
6377 super(p, b, split);
6378 this.transformer = transformer;
6379 this.basis = basis; this.reducer = reducer;
6380 }
6381 public final void compute() {
6382 MapReduceMappingsToLongTask<K,V> t = this;
6383 final ObjectByObjectToLong<? super K, ? super V> transformer =
6384 this.transformer;
6385 final LongByLongToLong reducer = this.reducer;
6386 if (transformer == null || reducer == null)
6387 throw new Error(NullFunctionMessage);
6388 final long id = this.basis;
6389 int b = batch();
6390 while (b > 1 && t.baseIndex != t.baseLimit) {
6391 b >>>= 1;
6392 t.pending = 1;
6393 MapReduceMappingsToLongTask<K,V> rt =
6394 new MapReduceMappingsToLongTask<K,V>
6395 (t, b, true, transformer, id, reducer);
6396 t = new MapReduceMappingsToLongTask<K,V>
6397 (t, b, false, transformer, id, reducer);
6398 t.sibling = rt;
6399 rt.sibling = t;
6400 rt.fork();
6401 }
6402 long r = id;
6403 Object v;
6404 while ((v = t.advance()) != null)
6405 r = reducer.apply(r, transformer.apply((K)t.nextKey, (V)v));
6406 t.result = r;
6407 for (;;) {
6408 int c; BulkTask<K,V,?> par; MapReduceMappingsToLongTask<K,V> s, p;
6409 if ((par = t.parent) == null ||
6410 !(par instanceof MapReduceMappingsToLongTask)) {
6411 t.quietlyComplete();
6412 break;
6413 }
6414 else if ((c = (p = (MapReduceMappingsToLongTask<K,V>)par).pending) == 0) {
6415 if ((s = t.sibling) != null)
6416 r = reducer.apply(r, s.result);
6417 (t = p).result = r;
6418 }
6419 else if (p.casPending(c, 0))
6420 break;
6421 }
6422 }
6423 public final Long getRawResult() { return result; }
6424 }
6425
6426 static final class MapReduceKeysToIntTask<K,V>
6427 extends BulkTask<K,V,Integer> {
6428 final ObjectToInt<? super K> transformer;
6429 final IntByIntToInt reducer;
6430 final int basis;
6431 int result;
6432 MapReduceKeysToIntTask<K,V> sibling;
6433 MapReduceKeysToIntTask
6434 (ConcurrentHashMapV8<K,V> m,
6435 ObjectToInt<? super K> transformer,
6436 int basis,
6437 IntByIntToInt reducer) {
6438 super(m);
6439 this.transformer = transformer;
6440 this.basis = basis; this.reducer = reducer;
6441 }
6442 MapReduceKeysToIntTask
6443 (BulkTask<K,V,?> p, int b, boolean split,
6444 ObjectToInt<? super K> transformer,
6445 int basis,
6446 IntByIntToInt reducer) {
6447 super(p, b, split);
6448 this.transformer = transformer;
6449 this.basis = basis; this.reducer = reducer;
6450 }
6451 public final void compute() {
6452 MapReduceKeysToIntTask<K,V> t = this;
6453 final ObjectToInt<? super K> transformer =
6454 this.transformer;
6455 final IntByIntToInt reducer = this.reducer;
6456 if (transformer == null || reducer == null)
6457 throw new Error(NullFunctionMessage);
6458 final int id = this.basis;
6459 int b = batch();
6460 while (b > 1 && t.baseIndex != t.baseLimit) {
6461 b >>>= 1;
6462 t.pending = 1;
6463 MapReduceKeysToIntTask<K,V> rt =
6464 new MapReduceKeysToIntTask<K,V>
6465 (t, b, true, transformer, id, reducer);
6466 t = new MapReduceKeysToIntTask<K,V>
6467 (t, b, false, transformer, id, reducer);
6468 t.sibling = rt;
6469 rt.sibling = t;
6470 rt.fork();
6471 }
6472 int r = id;
6473 while (t.advance() != null)
6474 r = reducer.apply(r, transformer.apply((K)t.nextKey));
6475 t.result = r;
6476 for (;;) {
6477 int c; BulkTask<K,V,?> par; MapReduceKeysToIntTask<K,V> s, p;
6478 if ((par = t.parent) == null ||
6479 !(par instanceof MapReduceKeysToIntTask)) {
6480 t.quietlyComplete();
6481 break;
6482 }
6483 else if ((c = (p = (MapReduceKeysToIntTask<K,V>)par).pending) == 0) {
6484 if ((s = t.sibling) != null)
6485 r = reducer.apply(r, s.result);
6486 (t = p).result = r;
6487 }
6488 else if (p.casPending(c, 0))
6489 break;
6490 }
6491 }
6492 public final Integer getRawResult() { return result; }
6493 }
6494
6495 static final class MapReduceValuesToIntTask<K,V>
6496 extends BulkTask<K,V,Integer> {
6497 final ObjectToInt<? super V> transformer;
6498 final IntByIntToInt reducer;
6499 final int basis;
6500 int result;
6501 MapReduceValuesToIntTask<K,V> sibling;
6502 MapReduceValuesToIntTask
6503 (ConcurrentHashMapV8<K,V> m,
6504 ObjectToInt<? super V> transformer,
6505 int basis,
6506 IntByIntToInt reducer) {
6507 super(m);
6508 this.transformer = transformer;
6509 this.basis = basis; this.reducer = reducer;
6510 }
6511 MapReduceValuesToIntTask
6512 (BulkTask<K,V,?> p, int b, boolean split,
6513 ObjectToInt<? super V> transformer,
6514 int basis,
6515 IntByIntToInt reducer) {
6516 super(p, b, split);
6517 this.transformer = transformer;
6518 this.basis = basis; this.reducer = reducer;
6519 }
6520 public final void compute() {
6521 MapReduceValuesToIntTask<K,V> t = this;
6522 final ObjectToInt<? super V> transformer =
6523 this.transformer;
6524 final IntByIntToInt reducer = this.reducer;
6525 if (transformer == null || reducer == null)
6526 throw new Error(NullFunctionMessage);
6527 final int id = this.basis;
6528 int b = batch();
6529 while (b > 1 && t.baseIndex != t.baseLimit) {
6530 b >>>= 1;
6531 t.pending = 1;
6532 MapReduceValuesToIntTask<K,V> rt =
6533 new MapReduceValuesToIntTask<K,V>
6534 (t, b, true, transformer, id, reducer);
6535 t = new MapReduceValuesToIntTask<K,V>
6536 (t, b, false, transformer, id, reducer);
6537 t.sibling = rt;
6538 rt.sibling = t;
6539 rt.fork();
6540 }
6541 int r = id;
6542 Object v;
6543 while ((v = t.advance()) != null)
6544 r = reducer.apply(r, transformer.apply((V)v));
6545 t.result = r;
6546 for (;;) {
6547 int c; BulkTask<K,V,?> par; MapReduceValuesToIntTask<K,V> s, p;
6548 if ((par = t.parent) == null ||
6549 !(par instanceof MapReduceValuesToIntTask)) {
6550 t.quietlyComplete();
6551 break;
6552 }
6553 else if ((c = (p = (MapReduceValuesToIntTask<K,V>)par).pending) == 0) {
6554 if ((s = t.sibling) != null)
6555 r = reducer.apply(r, s.result);
6556 (t = p).result = r;
6557 }
6558 else if (p.casPending(c, 0))
6559 break;
6560 }
6561 }
6562 public final Integer getRawResult() { return result; }
6563 }
6564
6565 static final class MapReduceEntriesToIntTask<K,V>
6566 extends BulkTask<K,V,Integer> {
6567 final ObjectToInt<Map.Entry<K,V>> transformer;
6568 final IntByIntToInt reducer;
6569 final int basis;
6570 int result;
6571 MapReduceEntriesToIntTask<K,V> sibling;
6572 MapReduceEntriesToIntTask
6573 (ConcurrentHashMapV8<K,V> m,
6574 ObjectToInt<Map.Entry<K,V>> transformer,
6575 int basis,
6576 IntByIntToInt reducer) {
6577 super(m);
6578 this.transformer = transformer;
6579 this.basis = basis; this.reducer = reducer;
6580 }
6581 MapReduceEntriesToIntTask
6582 (BulkTask<K,V,?> p, int b, boolean split,
6583 ObjectToInt<Map.Entry<K,V>> transformer,
6584 int basis,
6585 IntByIntToInt reducer) {
6586 super(p, b, split);
6587 this.transformer = transformer;
6588 this.basis = basis; this.reducer = reducer;
6589 }
6590 public final void compute() {
6591 MapReduceEntriesToIntTask<K,V> t = this;
6592 final ObjectToInt<Map.Entry<K,V>> transformer =
6593 this.transformer;
6594 final IntByIntToInt reducer = this.reducer;
6595 if (transformer == null || reducer == null)
6596 throw new Error(NullFunctionMessage);
6597 final int id = this.basis;
6598 int b = batch();
6599 while (b > 1 && t.baseIndex != t.baseLimit) {
6600 b >>>= 1;
6601 t.pending = 1;
6602 MapReduceEntriesToIntTask<K,V> rt =
6603 new MapReduceEntriesToIntTask<K,V>
6604 (t, b, true, transformer, id, reducer);
6605 t = new MapReduceEntriesToIntTask<K,V>
6606 (t, b, false, transformer, id, reducer);
6607 t.sibling = rt;
6608 rt.sibling = t;
6609 rt.fork();
6610 }
6611 int r = id;
6612 Object v;
6613 while ((v = t.advance()) != null)
6614 r = reducer.apply(r, transformer.apply(entryFor((K)t.nextKey, (V)v)));
6615 t.result = r;
6616 for (;;) {
6617 int c; BulkTask<K,V,?> par; MapReduceEntriesToIntTask<K,V> s, p;
6618 if ((par = t.parent) == null ||
6619 !(par instanceof MapReduceEntriesToIntTask)) {
6620 t.quietlyComplete();
6621 break;
6622 }
6623 else if ((c = (p = (MapReduceEntriesToIntTask<K,V>)par).pending) == 0) {
6624 if ((s = t.sibling) != null)
6625 r = reducer.apply(r, s.result);
6626 (t = p).result = r;
6627 }
6628 else if (p.casPending(c, 0))
6629 break;
6630 }
6631 }
6632 public final Integer getRawResult() { return result; }
6633 }
6634
6635 static final class MapReduceMappingsToIntTask<K,V>
6636 extends BulkTask<K,V,Integer> {
6637 final ObjectByObjectToInt<? super K, ? super V> transformer;
6638 final IntByIntToInt reducer;
6639 final int basis;
6640 int result;
6641 MapReduceMappingsToIntTask<K,V> sibling;
6642 MapReduceMappingsToIntTask
6643 (ConcurrentHashMapV8<K,V> m,
6644 ObjectByObjectToInt<? super K, ? super V> transformer,
6645 int basis,
6646 IntByIntToInt reducer) {
6647 super(m);
6648 this.transformer = transformer;
6649 this.basis = basis; this.reducer = reducer;
6650 }
6651 MapReduceMappingsToIntTask
6652 (BulkTask<K,V,?> p, int b, boolean split,
6653 ObjectByObjectToInt<? super K, ? super V> transformer,
6654 int basis,
6655 IntByIntToInt reducer) {
6656 super(p, b, split);
6657 this.transformer = transformer;
6658 this.basis = basis; this.reducer = reducer;
6659 }
6660 public final void compute() {
6661 MapReduceMappingsToIntTask<K,V> t = this;
6662 final ObjectByObjectToInt<? super K, ? super V> transformer =
6663 this.transformer;
6664 final IntByIntToInt reducer = this.reducer;
6665 if (transformer == null || reducer == null)
6666 throw new Error(NullFunctionMessage);
6667 final int id = this.basis;
6668 int b = batch();
6669 while (b > 1 && t.baseIndex != t.baseLimit) {
6670 b >>>= 1;
6671 t.pending = 1;
6672 MapReduceMappingsToIntTask<K,V> rt =
6673 new MapReduceMappingsToIntTask<K,V>
6674 (t, b, true, transformer, id, reducer);
6675 t = new MapReduceMappingsToIntTask<K,V>
6676 (t, b, false, transformer, id, reducer);
6677 t.sibling = rt;
6678 rt.sibling = t;
6679 rt.fork();
6680 }
6681 int r = id;
6682 Object v;
6683 while ((v = t.advance()) != null)
6684 r = reducer.apply(r, transformer.apply((K)t.nextKey, (V)v));
6685 t.result = r;
6686 for (;;) {
6687 int c; BulkTask<K,V,?> par; MapReduceMappingsToIntTask<K,V> s, p;
6688 if ((par = t.parent) == null ||
6689 !(par instanceof MapReduceMappingsToIntTask)) {
6690 t.quietlyComplete();
6691 break;
6692 }
6693 else if ((c = (p = (MapReduceMappingsToIntTask<K,V>)par).pending) == 0) {
6694 if ((s = t.sibling) != null)
6695 r = reducer.apply(r, s.result);
6696 (t = p).result = r;
6697 }
6698 else if (p.casPending(c, 0))
6699 break;
6700 }
6701 }
6702 public final Integer getRawResult() { return result; }
6703 }
6704
6705
6706 // Unsafe mechanics
6707 private static final sun.misc.Unsafe UNSAFE;
6708 private static final long counterOffset;
6709 private static final long sizeCtlOffset;
6710 private static final long ABASE;
6711 private static final int ASHIFT;
6712
6713 static {
6714 int ss;
6715 try {
6716 UNSAFE = getUnsafe();
6717 Class<?> k = ConcurrentHashMapV8.class;
6718 counterOffset = UNSAFE.objectFieldOffset
6719 (k.getDeclaredField("counter"));
6720 sizeCtlOffset = UNSAFE.objectFieldOffset
6721 (k.getDeclaredField("sizeCtl"));
6722 Class<?> sc = Node[].class;
6723 ABASE = UNSAFE.arrayBaseOffset(sc);
6724 ss = UNSAFE.arrayIndexScale(sc);
6725 } catch (Exception e) {
6726 throw new Error(e);
6727 }
6728 if ((ss & (ss-1)) != 0)
6729 throw new Error("data type scale not a power of two");
6730 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
6731 }
6732
6733 /**
6734 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6735 * Replace with a simple call to Unsafe.getUnsafe when integrating
6736 * into a jdk.
6737 *
6738 * @return a sun.misc.Unsafe
6739 */
6740 private static sun.misc.Unsafe getUnsafe() {
6741 try {
6742 return sun.misc.Unsafe.getUnsafe();
6743 } catch (SecurityException se) {
6744 try {
6745 return java.security.AccessController.doPrivileged
6746 (new java.security
6747 .PrivilegedExceptionAction<sun.misc.Unsafe>() {
6748 public sun.misc.Unsafe run() throws Exception {
6749 java.lang.reflect.Field f = sun.misc
6750 .Unsafe.class.getDeclaredField("theUnsafe");
6751 f.setAccessible(true);
6752 return (sun.misc.Unsafe) f.get(null);
6753 }});
6754 } catch (java.security.PrivilegedActionException e) {
6755 throw new RuntimeException("Could not initialize intrinsics",
6756 e.getCause());
6757 }
6758 }
6759 }
6760 }