ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/jsr166e/ConcurrentHashMapV8.java
Revision: 1.70
Committed: Sun Oct 28 22:35:45 2012 UTC (11 years, 6 months ago) by dl
Branch: MAIN
Changes since 1.69: +874 -690 lines
Log Message:
Introduce ForkJoinPool.commonPool

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package jsr166e;
8 import jsr166e.LongAdder;
9 import jsr166e.ForkJoinPool;
10 import jsr166e.ForkJoinTask;
11 import java.util.Comparator;
12 import java.util.Arrays;
13 import java.util.Map;
14 import java.util.Set;
15 import java.util.Collection;
16 import java.util.AbstractMap;
17 import java.util.AbstractSet;
18 import java.util.AbstractCollection;
19 import java.util.Hashtable;
20 import java.util.HashMap;
21 import java.util.Iterator;
22 import java.util.Enumeration;
23 import java.util.ConcurrentModificationException;
24 import java.util.NoSuchElementException;
25 import java.util.concurrent.ConcurrentMap;
26 import java.util.concurrent.ThreadLocalRandom;
27 import java.util.concurrent.locks.LockSupport;
28 import java.util.concurrent.locks.AbstractQueuedSynchronizer;
29 import java.util.concurrent.atomic.AtomicReference;
30
31 import java.io.Serializable;
32
33 /**
34 * A hash table supporting full concurrency of retrievals and
35 * high expected concurrency for updates. This class obeys the
36 * same functional specification as {@link java.util.Hashtable}, and
37 * includes versions of methods corresponding to each method of
38 * {@code Hashtable}. However, even though all operations are
39 * thread-safe, retrieval operations do <em>not</em> entail locking,
40 * and there is <em>not</em> any support for locking the entire table
41 * in a way that prevents all access. This class is fully
42 * interoperable with {@code Hashtable} in programs that rely on its
43 * thread safety but not on its synchronization details.
44 *
45 * <p> Retrieval operations (including {@code get}) generally do not
46 * block, so may overlap with update operations (including {@code put}
47 * and {@code remove}). Retrievals reflect the results of the most
48 * recently <em>completed</em> update operations holding upon their
49 * onset. (More formally, an update operation for a given key bears a
50 * <em>happens-before</em> relation with any (non-null) retrieval for
51 * that key reporting the updated value.) For aggregate operations
52 * such as {@code putAll} and {@code clear}, concurrent retrievals may
53 * reflect insertion or removal of only some entries. Similarly,
54 * Iterators and Enumerations return elements reflecting the state of
55 * the hash table at some point at or since the creation of the
56 * iterator/enumeration. They do <em>not</em> throw {@link
57 * ConcurrentModificationException}. However, iterators are designed
58 * to be used by only one thread at a time. Bear in mind that the
59 * results of aggregate status methods including {@code size}, {@code
60 * isEmpty}, and {@code containsValue} are typically useful only when
61 * a map is not undergoing concurrent updates in other threads.
62 * Otherwise the results of these methods reflect transient states
63 * that may be adequate for monitoring or estimation purposes, but not
64 * for program control.
65 *
66 * <p> The table is dynamically expanded when there are too many
67 * collisions (i.e., keys that have distinct hash codes but fall into
68 * the same slot modulo the table size), with the expected average
69 * effect of maintaining roughly two bins per mapping (corresponding
70 * to a 0.75 load factor threshold for resizing). There may be much
71 * variance around this average as mappings are added and removed, but
72 * overall, this maintains a commonly accepted time/space tradeoff for
73 * hash tables. However, resizing this or any other kind of hash
74 * table may be a relatively slow operation. When possible, it is a
75 * good idea to provide a size estimate as an optional {@code
76 * initialCapacity} constructor argument. An additional optional
77 * {@code loadFactor} constructor argument provides a further means of
78 * customizing initial table capacity by specifying the table density
79 * to be used in calculating the amount of space to allocate for the
80 * given number of elements. Also, for compatibility with previous
81 * versions of this class, constructors may optionally specify an
82 * expected {@code concurrencyLevel} as an additional hint for
83 * internal sizing. Note that using many keys with exactly the same
84 * {@code hashCode()} is a sure way to slow down performance of any
85 * hash table.
86 *
87 * <p> A {@link Set} projection of a ConcurrentHashMap may be created
88 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
89 * (using {@link #keySet(Object)} when only keys are of interest, and the
90 * mapped values are (perhaps transiently) not used or all take the
91 * same mapping value.
92 *
93 * <p> A ConcurrentHashMapV8 can be used as scalable frequency map (a
94 * form of histogram or multiset) by using {@link LongAdder} values
95 * and initializing via {@link #computeIfAbsent}. For example, to add
96 * a count to a {@code ConcurrentHashMapV8<String,LongAdder> freqs}, you
97 * can use {@code freqs.computeIfAbsent(k -> new
98 * LongAdder()).increment();}
99 *
100 * <p>This class and its views and iterators implement all of the
101 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
102 * interfaces.
103 *
104 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
105 * does <em>not</em> allow {@code null} to be used as a key or value.
106 *
107 * <p>ConcurrentHashMapV8s support parallel operations using the {@link
108 * ForkJoinPool#commonPool}. (Task that may be used in other contexts
109 * are available in class {@link ForkJoinTasks}). These operations are
110 * designed to be safely, and often sensibly, applied even with maps
111 * that are being concurrently updated by other threads; for example,
112 * when computing a snapshot summary of the values in a shared
113 * registry. There are three kinds of operation, each with four
114 * forms, accepting functions with Keys, Values, Entries, and (Key,
115 * Value) arguments and/or return values. Because the elements of a
116 * ConcurrentHashMapV8 are not ordered in any particular way, and may be
117 * processed in different orders in different parallel executions, the
118 * correctness of supplied functions should not depend on any
119 * ordering, or on any other objects or values that may transiently
120 * change while computation is in progress; and except for forEach
121 * actions, should ideally be side-effect-free.
122 *
123 * <ul>
124 * <li> forEach: Perform a given action on each element.
125 * A variant form applies a given transformation on each element
126 * before performing the action.</li>
127 *
128 * <li> search: Return the first available non-null result of
129 * applying a given function on each element; skipping further
130 * search when a result is found.</li>
131 *
132 * <li> reduce: Accumulate each element. The supplied reduction
133 * function cannot rely on ordering (more formally, it should be
134 * both associative and commutative). There are five variants:
135 *
136 * <ul>
137 *
138 * <li> Plain reductions. (There is not a form of this method for
139 * (key, value) function arguments since there is no corresponding
140 * return type.)</li>
141 *
142 * <li> Mapped reductions that accumulate the results of a given
143 * function applied to each element.</li>
144 *
145 * <li> Reductions to scalar doubles, longs, and ints, using a
146 * given basis value.</li>
147 *
148 * </li>
149 * </ul>
150 * </ul>
151 *
152 * <p>The concurrency properties of bulk operations follow
153 * from those of ConcurrentHashMapV8: Any non-null result returned
154 * from {@code get(key)} and related access methods bears a
155 * happens-before relation with the associated insertion or
156 * update. The result of any bulk operation reflects the
157 * composition of these per-element relations (but is not
158 * necessarily atomic with respect to the map as a whole unless it
159 * is somehow known to be quiescent). Conversely, because keys
160 * and values in the map are never null, null serves as a reliable
161 * atomic indicator of the current lack of any result. To
162 * maintain this property, null serves as an implicit basis for
163 * all non-scalar reduction operations. For the double, long, and
164 * int versions, the basis should be one that, when combined with
165 * any other value, returns that other value (more formally, it
166 * should be the identity element for the reduction). Most common
167 * reductions have these properties; for example, computing a sum
168 * with basis 0 or a minimum with basis MAX_VALUE.
169 *
170 * <p>Search and transformation functions provided as arguments
171 * should similarly return null to indicate the lack of any result
172 * (in which case it is not used). In the case of mapped
173 * reductions, this also enables transformations to serve as
174 * filters, returning null (or, in the case of primitive
175 * specializations, the identity basis) if the element should not
176 * be combined. You can create compound transformations and
177 * filterings by composing them yourself under this "null means
178 * there is nothing there now" rule before using them in search or
179 * reduce operations.
180 *
181 * <p>Methods accepting and/or returning Entry arguments maintain
182 * key-value associations. They may be useful for example when
183 * finding the key for the greatest value. Note that "plain" Entry
184 * arguments can be supplied using {@code new
185 * AbstractMap.SimpleEntry(k,v)}.
186 *
187 * <p> Bulk operations may complete abruptly, throwing an
188 * exception encountered in the application of a supplied
189 * function. Bear in mind when handling such exceptions that other
190 * concurrently executing functions could also have thrown
191 * exceptions, or would have done so if the first exception had
192 * not occurred.
193 *
194 * <p>Parallel speedups for bulk operations compared to sequential
195 * processing are common but not guaranteed. Operations involving
196 * brief functions on small maps may execute more slowly than
197 * sequential loops if the underlying work to parallelize the
198 * computation is more expensive than the computation
199 * itself. Similarly, parallelization may not lead to much actual
200 * parallelism if all processors are busy performing unrelated tasks.
201 *
202 * <p> All arguments to all task methods must be non-null.
203 *
204 * <p><em>jsr166e note: During transition, this class
205 * uses nested functional interfaces with different names but the
206 * same forms as those expected for JDK8.<em>
207 *
208 * <p>This class is a member of the
209 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
210 * Java Collections Framework</a>.
211 *
212 * @since 1.5
213 * @author Doug Lea
214 * @param <K> the type of keys maintained by this map
215 * @param <V> the type of mapped values
216 */
217 public class ConcurrentHashMapV8<K, V>
218 implements ConcurrentMap<K, V>, Serializable {
219 private static final long serialVersionUID = 7249069246763182397L;
220
221 /**
222 * A partitionable iterator. A Spliterator can be traversed
223 * directly, but can also be partitioned (before traversal) by
224 * creating another Spliterator that covers a non-overlapping
225 * portion of the elements, and so may be amenable to parallel
226 * execution.
227 *
228 * <p> This interface exports a subset of expected JDK8
229 * functionality.
230 *
231 * <p>Sample usage: Here is one (of the several) ways to compute
232 * the sum of the values held in a map using the ForkJoin
233 * framework. As illustrated here, Spliterators are well suited to
234 * designs in which a task repeatedly splits off half its work
235 * into forked subtasks until small enough to process directly,
236 * and then joins these subtasks. Variants of this style can also
237 * be used in completion-based designs.
238 *
239 * <pre>
240 * {@code ConcurrentHashMapV8<String, Long> m = ...
241 * // split as if have 8 * parallelism, for load balance
242 * int n = m.size();
243 * int p = aForkJoinPool.getParallelism() * 8;
244 * int split = (n < p)? n : p;
245 * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
246 * // ...
247 * static class SumValues extends RecursiveTask<Long> {
248 * final Spliterator<Long> s;
249 * final int split; // split while > 1
250 * final SumValues nextJoin; // records forked subtasks to join
251 * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
252 * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
253 * }
254 * public Long compute() {
255 * long sum = 0;
256 * SumValues subtasks = null; // fork subtasks
257 * for (int s = split >>> 1; s > 0; s >>>= 1)
258 * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
259 * while (s.hasNext()) // directly process remaining elements
260 * sum += s.next();
261 * for (SumValues t = subtasks; t != null; t = t.nextJoin)
262 * sum += t.join(); // collect subtask results
263 * return sum;
264 * }
265 * }
266 * }</pre>
267 */
268 public static interface Spliterator<T> extends Iterator<T> {
269 /**
270 * Returns a Spliterator covering approximately half of the
271 * elements, guaranteed not to overlap with those subsequently
272 * returned by this Spliterator. After invoking this method,
273 * the current Spliterator will <em>not</em> produce any of
274 * the elements of the returned Spliterator, but the two
275 * Spliterators together will produce all of the elements that
276 * would have been produced by this Spliterator had this
277 * method not been called. The exact number of elements
278 * produced by the returned Spliterator is not guaranteed, and
279 * may be zero (i.e., with {@code hasNext()} reporting {@code
280 * false}) if this Spliterator cannot be further split.
281 *
282 * @return a Spliterator covering approximately half of the
283 * elements
284 * @throws IllegalStateException if this Spliterator has
285 * already commenced traversing elements
286 */
287 Spliterator<T> split();
288 }
289
290 /**
291 * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in
292 * which additions may optionally be enabled by mapping to a
293 * common value. This class cannot be directly instantiated. See
294 * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
295 * {@link #newKeySet(int)}.
296 *
297 * <p>The view's {@code iterator} is a "weakly consistent" iterator
298 * that will never throw {@link ConcurrentModificationException},
299 * and guarantees to traverse elements as they existed upon
300 * construction of the iterator, and may (but is not guaranteed to)
301 * reflect any modifications subsequent to construction.
302 */
303 public static class KeySetView<K,V> extends CHMView<K,V> implements Set<K>, java.io.Serializable {
304 private static final long serialVersionUID = 7249069246763182397L;
305 private final V value;
306 KeySetView(ConcurrentHashMapV8<K, V> map, V value) { // non-public
307 super(map);
308 this.value = value;
309 }
310
311 /**
312 * Returns the map backing this view.
313 *
314 * @return the map backing this view
315 */
316 public ConcurrentHashMapV8<K,V> getMap() { return map; }
317
318 /**
319 * Returns the default mapped value for additions,
320 * or {@code null} if additions are not supported.
321 *
322 * @return the default mapped value for additions, or {@code null}
323 * if not supported.
324 */
325 public V getMappedValue() { return value; }
326
327 // implement Set API
328
329 public boolean contains(Object o) { return map.containsKey(o); }
330 public boolean remove(Object o) { return map.remove(o) != null; }
331 public Iterator<K> iterator() { return new KeyIterator<K,V>(map); }
332 public boolean add(K e) {
333 V v;
334 if ((v = value) == null)
335 throw new UnsupportedOperationException();
336 if (e == null)
337 throw new NullPointerException();
338 return map.internalPutIfAbsent(e, v) == null;
339 }
340 public boolean addAll(Collection<? extends K> c) {
341 boolean added = false;
342 V v;
343 if ((v = value) == null)
344 throw new UnsupportedOperationException();
345 for (K e : c) {
346 if (e == null)
347 throw new NullPointerException();
348 if (map.internalPutIfAbsent(e, v) == null)
349 added = true;
350 }
351 return added;
352 }
353 public boolean equals(Object o) {
354 Set<?> c;
355 return ((o instanceof Set) &&
356 ((c = (Set<?>)o) == this ||
357 (containsAll(c) && c.containsAll(this))));
358 }
359 }
360
361 /*
362 * Overview:
363 *
364 * The primary design goal of this hash table is to maintain
365 * concurrent readability (typically method get(), but also
366 * iterators and related methods) while minimizing update
367 * contention. Secondary goals are to keep space consumption about
368 * the same or better than java.util.HashMap, and to support high
369 * initial insertion rates on an empty table by many threads.
370 *
371 * Each key-value mapping is held in a Node. Because Node fields
372 * can contain special values, they are defined using plain Object
373 * types. Similarly in turn, all internal methods that use them
374 * work off Object types. And similarly, so do the internal
375 * methods of auxiliary iterator and view classes. All public
376 * generic typed methods relay in/out of these internal methods,
377 * supplying null-checks and casts as needed. This also allows
378 * many of the public methods to be factored into a smaller number
379 * of internal methods (although sadly not so for the five
380 * variants of put-related operations). The validation-based
381 * approach explained below leads to a lot of code sprawl because
382 * retry-control precludes factoring into smaller methods.
383 *
384 * The table is lazily initialized to a power-of-two size upon the
385 * first insertion. Each bin in the table normally contains a
386 * list of Nodes (most often, the list has only zero or one Node).
387 * Table accesses require volatile/atomic reads, writes, and
388 * CASes. Because there is no other way to arrange this without
389 * adding further indirections, we use intrinsics
390 * (sun.misc.Unsafe) operations. The lists of nodes within bins
391 * are always accurately traversable under volatile reads, so long
392 * as lookups check hash code and non-nullness of value before
393 * checking key equality.
394 *
395 * We use the top two bits of Node hash fields for control
396 * purposes -- they are available anyway because of addressing
397 * constraints. As explained further below, these top bits are
398 * used as follows:
399 * 00 - Normal
400 * 01 - Locked
401 * 11 - Locked and may have a thread waiting for lock
402 * 10 - Node is a forwarding node
403 *
404 * The lower 30 bits of each Node's hash field contain a
405 * transformation of the key's hash code, except for forwarding
406 * nodes, for which the lower bits are zero (and so always have
407 * hash field == MOVED).
408 *
409 * Insertion (via put or its variants) of the first node in an
410 * empty bin is performed by just CASing it to the bin. This is
411 * by far the most common case for put operations under most
412 * key/hash distributions. Other update operations (insert,
413 * delete, and replace) require locks. We do not want to waste
414 * the space required to associate a distinct lock object with
415 * each bin, so instead use the first node of a bin list itself as
416 * a lock. Blocking support for these locks relies on the builtin
417 * "synchronized" monitors. However, we also need a tryLock
418 * construction, so we overlay these by using bits of the Node
419 * hash field for lock control (see above), and so normally use
420 * builtin monitors only for blocking and signalling using
421 * wait/notifyAll constructions. See Node.tryAwaitLock.
422 *
423 * Using the first node of a list as a lock does not by itself
424 * suffice though: When a node is locked, any update must first
425 * validate that it is still the first node after locking it, and
426 * retry if not. Because new nodes are always appended to lists,
427 * once a node is first in a bin, it remains first until deleted
428 * or the bin becomes invalidated (upon resizing). However,
429 * operations that only conditionally update may inspect nodes
430 * until the point of update. This is a converse of sorts to the
431 * lazy locking technique described by Herlihy & Shavit.
432 *
433 * The main disadvantage of per-bin locks is that other update
434 * operations on other nodes in a bin list protected by the same
435 * lock can stall, for example when user equals() or mapping
436 * functions take a long time. However, statistically, under
437 * random hash codes, this is not a common problem. Ideally, the
438 * frequency of nodes in bins follows a Poisson distribution
439 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
440 * parameter of about 0.5 on average, given the resizing threshold
441 * of 0.75, although with a large variance because of resizing
442 * granularity. Ignoring variance, the expected occurrences of
443 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
444 * first values are:
445 *
446 * 0: 0.60653066
447 * 1: 0.30326533
448 * 2: 0.07581633
449 * 3: 0.01263606
450 * 4: 0.00157952
451 * 5: 0.00015795
452 * 6: 0.00001316
453 * 7: 0.00000094
454 * 8: 0.00000006
455 * more: less than 1 in ten million
456 *
457 * Lock contention probability for two threads accessing distinct
458 * elements is roughly 1 / (8 * #elements) under random hashes.
459 *
460 * Actual hash code distributions encountered in practice
461 * sometimes deviate significantly from uniform randomness. This
462 * includes the case when N > (1<<30), so some keys MUST collide.
463 * Similarly for dumb or hostile usages in which multiple keys are
464 * designed to have identical hash codes. Also, although we guard
465 * against the worst effects of this (see method spread), sets of
466 * hashes may differ only in bits that do not impact their bin
467 * index for a given power-of-two mask. So we use a secondary
468 * strategy that applies when the number of nodes in a bin exceeds
469 * a threshold, and at least one of the keys implements
470 * Comparable. These TreeBins use a balanced tree to hold nodes
471 * (a specialized form of red-black trees), bounding search time
472 * to O(log N). Each search step in a TreeBin is around twice as
473 * slow as in a regular list, but given that N cannot exceed
474 * (1<<64) (before running out of addresses) this bounds search
475 * steps, lock hold times, etc, to reasonable constants (roughly
476 * 100 nodes inspected per operation worst case) so long as keys
477 * are Comparable (which is very common -- String, Long, etc).
478 * TreeBin nodes (TreeNodes) also maintain the same "next"
479 * traversal pointers as regular nodes, so can be traversed in
480 * iterators in the same way.
481 *
482 * The table is resized when occupancy exceeds a percentage
483 * threshold (nominally, 0.75, but see below). Only a single
484 * thread performs the resize (using field "sizeCtl", to arrange
485 * exclusion), but the table otherwise remains usable for reads
486 * and updates. Resizing proceeds by transferring bins, one by
487 * one, from the table to the next table. Because we are using
488 * power-of-two expansion, the elements from each bin must either
489 * stay at same index, or move with a power of two offset. We
490 * eliminate unnecessary node creation by catching cases where old
491 * nodes can be reused because their next fields won't change. On
492 * average, only about one-sixth of them need cloning when a table
493 * doubles. The nodes they replace will be garbage collectable as
494 * soon as they are no longer referenced by any reader thread that
495 * may be in the midst of concurrently traversing table. Upon
496 * transfer, the old table bin contains only a special forwarding
497 * node (with hash field "MOVED") that contains the next table as
498 * its key. On encountering a forwarding node, access and update
499 * operations restart, using the new table.
500 *
501 * Each bin transfer requires its bin lock. However, unlike other
502 * cases, a transfer can skip a bin if it fails to acquire its
503 * lock, and revisit it later (unless it is a TreeBin). Method
504 * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
505 * have been skipped because of failure to acquire a lock, and
506 * blocks only if none are available (i.e., only very rarely).
507 * The transfer operation must also ensure that all accessible
508 * bins in both the old and new table are usable by any traversal.
509 * When there are no lock acquisition failures, this is arranged
510 * simply by proceeding from the last bin (table.length - 1) up
511 * towards the first. Upon seeing a forwarding node, traversals
512 * (see class Iter) arrange to move to the new table
513 * without revisiting nodes. However, when any node is skipped
514 * during a transfer, all earlier table bins may have become
515 * visible, so are initialized with a reverse-forwarding node back
516 * to the old table until the new ones are established. (This
517 * sometimes requires transiently locking a forwarding node, which
518 * is possible under the above encoding.) These more expensive
519 * mechanics trigger only when necessary.
520 *
521 * The traversal scheme also applies to partial traversals of
522 * ranges of bins (via an alternate Traverser constructor)
523 * to support partitioned aggregate operations. Also, read-only
524 * operations give up if ever forwarded to a null table, which
525 * provides support for shutdown-style clearing, which is also not
526 * currently implemented.
527 *
528 * Lazy table initialization minimizes footprint until first use,
529 * and also avoids resizings when the first operation is from a
530 * putAll, constructor with map argument, or deserialization.
531 * These cases attempt to override the initial capacity settings,
532 * but harmlessly fail to take effect in cases of races.
533 *
534 * The element count is maintained using a LongAdder, which avoids
535 * contention on updates but can encounter cache thrashing if read
536 * too frequently during concurrent access. To avoid reading so
537 * often, resizing is attempted either when a bin lock is
538 * contended, or upon adding to a bin already holding two or more
539 * nodes (checked before adding in the xIfAbsent methods, after
540 * adding in others). Under uniform hash distributions, the
541 * probability of this occurring at threshold is around 13%,
542 * meaning that only about 1 in 8 puts check threshold (and after
543 * resizing, many fewer do so). But this approximation has high
544 * variance for small table sizes, so we check on any collision
545 * for sizes <= 64. The bulk putAll operation further reduces
546 * contention by only committing count updates upon these size
547 * checks.
548 *
549 * Maintaining API and serialization compatibility with previous
550 * versions of this class introduces several oddities. Mainly: We
551 * leave untouched but unused constructor arguments refering to
552 * concurrencyLevel. We accept a loadFactor constructor argument,
553 * but apply it only to initial table capacity (which is the only
554 * time that we can guarantee to honor it.) We also declare an
555 * unused "Segment" class that is instantiated in minimal form
556 * only when serializing.
557 */
558
559 /* ---------------- Constants -------------- */
560
561 /**
562 * The largest possible table capacity. This value must be
563 * exactly 1<<30 to stay within Java array allocation and indexing
564 * bounds for power of two table sizes, and is further required
565 * because the top two bits of 32bit hash fields are used for
566 * control purposes.
567 */
568 private static final int MAXIMUM_CAPACITY = 1 << 30;
569
570 /**
571 * The default initial table capacity. Must be a power of 2
572 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
573 */
574 private static final int DEFAULT_CAPACITY = 16;
575
576 /**
577 * The largest possible (non-power of two) array size.
578 * Needed by toArray and related methods.
579 */
580 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
581
582 /**
583 * The default concurrency level for this table. Unused but
584 * defined for compatibility with previous versions of this class.
585 */
586 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
587
588 /**
589 * The load factor for this table. Overrides of this value in
590 * constructors affect only the initial table capacity. The
591 * actual floating point value isn't normally used -- it is
592 * simpler to use expressions such as {@code n - (n >>> 2)} for
593 * the associated resizing threshold.
594 */
595 private static final float LOAD_FACTOR = 0.75f;
596
597 /**
598 * The buffer size for skipped bins during transfers. The
599 * value is arbitrary but should be large enough to avoid
600 * most locking stalls during resizes.
601 */
602 private static final int TRANSFER_BUFFER_SIZE = 32;
603
604 /**
605 * The bin count threshold for using a tree rather than list for a
606 * bin. The value reflects the approximate break-even point for
607 * using tree-based operations.
608 */
609 private static final int TREE_THRESHOLD = 8;
610
611 /*
612 * Encodings for special uses of Node hash fields. See above for
613 * explanation.
614 */
615 static final int MOVED = 0x80000000; // hash field for forwarding nodes
616 static final int LOCKED = 0x40000000; // set/tested only as a bit
617 static final int WAITING = 0xc0000000; // both bits set/tested together
618 static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
619
620 /* ---------------- Fields -------------- */
621
622 /**
623 * The array of bins. Lazily initialized upon first insertion.
624 * Size is always a power of two. Accessed directly by iterators.
625 */
626 transient volatile Node[] table;
627
628 /**
629 * The counter maintaining number of elements.
630 */
631 private transient final LongAdder counter;
632
633 /**
634 * Table initialization and resizing control. When negative, the
635 * table is being initialized or resized. Otherwise, when table is
636 * null, holds the initial table size to use upon creation, or 0
637 * for default. After initialization, holds the next element count
638 * value upon which to resize the table.
639 */
640 private transient volatile int sizeCtl;
641
642 // views
643 private transient KeySetView<K,V> keySet;
644 private transient Values<K,V> values;
645 private transient EntrySet<K,V> entrySet;
646
647 /** For serialization compatibility. Null unless serialized; see below */
648 private Segment<K,V>[] segments;
649
650 /* ---------------- Table element access -------------- */
651
652 /*
653 * Volatile access methods are used for table elements as well as
654 * elements of in-progress next table while resizing. Uses are
655 * null checked by callers, and implicitly bounds-checked, relying
656 * on the invariants that tab arrays have non-zero size, and all
657 * indices are masked with (tab.length - 1) which is never
658 * negative and always less than length. Note that, to be correct
659 * wrt arbitrary concurrency errors by users, bounds checks must
660 * operate on local variables, which accounts for some odd-looking
661 * inline assignments below.
662 */
663
664 static final Node tabAt(Node[] tab, int i) { // used by Iter
665 return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE);
666 }
667
668 private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
669 return UNSAFE.compareAndSwapObject(tab, ((long)i<<ASHIFT)+ABASE, c, v);
670 }
671
672 private static final void setTabAt(Node[] tab, int i, Node v) {
673 UNSAFE.putObjectVolatile(tab, ((long)i<<ASHIFT)+ABASE, v);
674 }
675
676 /* ---------------- Nodes -------------- */
677
678 /**
679 * Key-value entry. Note that this is never exported out as a
680 * user-visible Map.Entry (see MapEntry below). Nodes with a hash
681 * field of MOVED are special, and do not contain user keys or
682 * values. Otherwise, keys are never null, and null val fields
683 * indicate that a node is in the process of being deleted or
684 * created. For purposes of read-only access, a key may be read
685 * before a val, but can only be used after checking val to be
686 * non-null.
687 */
688 static class Node {
689 volatile int hash;
690 final Object key;
691 volatile Object val;
692 volatile Node next;
693
694 Node(int hash, Object key, Object val, Node next) {
695 this.hash = hash;
696 this.key = key;
697 this.val = val;
698 this.next = next;
699 }
700
701 /** CompareAndSet the hash field */
702 final boolean casHash(int cmp, int val) {
703 return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
704 }
705
706 /** The number of spins before blocking for a lock */
707 static final int MAX_SPINS =
708 Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
709
710 /**
711 * Spins a while if LOCKED bit set and this node is the first
712 * of its bin, and then sets WAITING bits on hash field and
713 * blocks (once) if they are still set. It is OK for this
714 * method to return even if lock is not available upon exit,
715 * which enables these simple single-wait mechanics.
716 *
717 * The corresponding signalling operation is performed within
718 * callers: Upon detecting that WAITING has been set when
719 * unlocking lock (via a failed CAS from non-waiting LOCKED
720 * state), unlockers acquire the sync lock and perform a
721 * notifyAll.
722 *
723 * The initial sanity check on tab and bounds is not currently
724 * necessary in the only usages of this method, but enables
725 * use in other future contexts.
726 */
727 final void tryAwaitLock(Node[] tab, int i) {
728 if (tab != null && i >= 0 && i < tab.length) { // sanity check
729 int r = ThreadLocalRandom.current().nextInt(); // randomize spins
730 int spins = MAX_SPINS, h;
731 while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
732 if (spins >= 0) {
733 r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
734 if (r >= 0 && --spins == 0)
735 Thread.yield(); // yield before block
736 }
737 else if (casHash(h, h | WAITING)) {
738 synchronized (this) {
739 if (tabAt(tab, i) == this &&
740 (hash & WAITING) == WAITING) {
741 try {
742 wait();
743 } catch (InterruptedException ie) {
744 Thread.currentThread().interrupt();
745 }
746 }
747 else
748 notifyAll(); // possibly won race vs signaller
749 }
750 break;
751 }
752 }
753 }
754 }
755
756 // Unsafe mechanics for casHash
757 private static final sun.misc.Unsafe UNSAFE;
758 private static final long hashOffset;
759
760 static {
761 try {
762 UNSAFE = getUnsafe();
763 Class<?> k = Node.class;
764 hashOffset = UNSAFE.objectFieldOffset
765 (k.getDeclaredField("hash"));
766 } catch (Exception e) {
767 throw new Error(e);
768 }
769 }
770 }
771
772 /* ---------------- TreeBins -------------- */
773
774 /**
775 * Nodes for use in TreeBins
776 */
777 static final class TreeNode extends Node {
778 TreeNode parent; // red-black tree links
779 TreeNode left;
780 TreeNode right;
781 TreeNode prev; // needed to unlink next upon deletion
782 boolean red;
783
784 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
785 super(hash, key, val, next);
786 this.parent = parent;
787 }
788 }
789
790 /**
791 * A specialized form of red-black tree for use in bins
792 * whose size exceeds a threshold.
793 *
794 * TreeBins use a special form of comparison for search and
795 * related operations (which is the main reason we cannot use
796 * existing collections such as TreeMaps). TreeBins contain
797 * Comparable elements, but may contain others, as well as
798 * elements that are Comparable but not necessarily Comparable<T>
799 * for the same T, so we cannot invoke compareTo among them. To
800 * handle this, the tree is ordered primarily by hash value, then
801 * by getClass().getName() order, and then by Comparator order
802 * among elements of the same class. On lookup at a node, if
803 * elements are not comparable or compare as 0, both left and
804 * right children may need to be searched in the case of tied hash
805 * values. (This corresponds to the full list search that would be
806 * necessary if all elements were non-Comparable and had tied
807 * hashes.) The red-black balancing code is updated from
808 * pre-jdk-collections
809 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
810 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
811 * Algorithms" (CLR).
812 *
813 * TreeBins also maintain a separate locking discipline than
814 * regular bins. Because they are forwarded via special MOVED
815 * nodes at bin heads (which can never change once established),
816 * we cannot use those nodes as locks. Instead, TreeBin
817 * extends AbstractQueuedSynchronizer to support a simple form of
818 * read-write lock. For update operations and table validation,
819 * the exclusive form of lock behaves in the same way as bin-head
820 * locks. However, lookups use shared read-lock mechanics to allow
821 * multiple readers in the absence of writers. Additionally,
822 * these lookups do not ever block: While the lock is not
823 * available, they proceed along the slow traversal path (via
824 * next-pointers) until the lock becomes available or the list is
825 * exhausted, whichever comes first. (These cases are not fast,
826 * but maximize aggregate expected throughput.) The AQS mechanics
827 * for doing this are straightforward. The lock state is held as
828 * AQS getState(). Read counts are negative; the write count (1)
829 * is positive. There are no signalling preferences among readers
830 * and writers. Since we don't need to export full Lock API, we
831 * just override the minimal AQS methods and use them directly.
832 */
833 static final class TreeBin extends AbstractQueuedSynchronizer {
834 private static final long serialVersionUID = 2249069246763182397L;
835 transient TreeNode root; // root of tree
836 transient TreeNode first; // head of next-pointer list
837
838 /* AQS overrides */
839 public final boolean isHeldExclusively() { return getState() > 0; }
840 public final boolean tryAcquire(int ignore) {
841 if (compareAndSetState(0, 1)) {
842 setExclusiveOwnerThread(Thread.currentThread());
843 return true;
844 }
845 return false;
846 }
847 public final boolean tryRelease(int ignore) {
848 setExclusiveOwnerThread(null);
849 setState(0);
850 return true;
851 }
852 public final int tryAcquireShared(int ignore) {
853 for (int c;;) {
854 if ((c = getState()) > 0)
855 return -1;
856 if (compareAndSetState(c, c -1))
857 return 1;
858 }
859 }
860 public final boolean tryReleaseShared(int ignore) {
861 int c;
862 do {} while (!compareAndSetState(c = getState(), c + 1));
863 return c == -1;
864 }
865
866 /** From CLR */
867 private void rotateLeft(TreeNode p) {
868 if (p != null) {
869 TreeNode r = p.right, pp, rl;
870 if ((rl = p.right = r.left) != null)
871 rl.parent = p;
872 if ((pp = r.parent = p.parent) == null)
873 root = r;
874 else if (pp.left == p)
875 pp.left = r;
876 else
877 pp.right = r;
878 r.left = p;
879 p.parent = r;
880 }
881 }
882
883 /** From CLR */
884 private void rotateRight(TreeNode p) {
885 if (p != null) {
886 TreeNode l = p.left, pp, lr;
887 if ((lr = p.left = l.right) != null)
888 lr.parent = p;
889 if ((pp = l.parent = p.parent) == null)
890 root = l;
891 else if (pp.right == p)
892 pp.right = l;
893 else
894 pp.left = l;
895 l.right = p;
896 p.parent = l;
897 }
898 }
899
900 /**
901 * Returns the TreeNode (or null if not found) for the given key
902 * starting at given root.
903 */
904 @SuppressWarnings("unchecked") final TreeNode getTreeNode
905 (int h, Object k, TreeNode p) {
906 Class<?> c = k.getClass();
907 while (p != null) {
908 int dir, ph; Object pk; Class<?> pc;
909 if ((ph = p.hash) == h) {
910 if ((pk = p.key) == k || k.equals(pk))
911 return p;
912 if (c != (pc = pk.getClass()) ||
913 !(k instanceof Comparable) ||
914 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
915 dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
916 TreeNode r = null, s = null, pl, pr;
917 if (dir >= 0) {
918 if ((pl = p.left) != null && h <= pl.hash)
919 s = pl;
920 }
921 else if ((pr = p.right) != null && h >= pr.hash)
922 s = pr;
923 if (s != null && (r = getTreeNode(h, k, s)) != null)
924 return r;
925 }
926 }
927 else
928 dir = (h < ph) ? -1 : 1;
929 p = (dir > 0) ? p.right : p.left;
930 }
931 return null;
932 }
933
934 /**
935 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
936 * read-lock to call getTreeNode, but during failure to get
937 * lock, searches along next links.
938 */
939 final Object getValue(int h, Object k) {
940 Node r = null;
941 int c = getState(); // Must read lock state first
942 for (Node e = first; e != null; e = e.next) {
943 if (c <= 0 && compareAndSetState(c, c - 1)) {
944 try {
945 r = getTreeNode(h, k, root);
946 } finally {
947 releaseShared(0);
948 }
949 break;
950 }
951 else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
952 r = e;
953 break;
954 }
955 else
956 c = getState();
957 }
958 return r == null ? null : r.val;
959 }
960
961 /**
962 * Finds or adds a node.
963 * @return null if added
964 */
965 @SuppressWarnings("unchecked") final TreeNode putTreeNode
966 (int h, Object k, Object v) {
967 Class<?> c = k.getClass();
968 TreeNode pp = root, p = null;
969 int dir = 0;
970 while (pp != null) { // find existing node or leaf to insert at
971 int ph; Object pk; Class<?> pc;
972 p = pp;
973 if ((ph = p.hash) == h) {
974 if ((pk = p.key) == k || k.equals(pk))
975 return p;
976 if (c != (pc = pk.getClass()) ||
977 !(k instanceof Comparable) ||
978 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
979 dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName());
980 TreeNode r = null, s = null, pl, pr;
981 if (dir >= 0) {
982 if ((pl = p.left) != null && h <= pl.hash)
983 s = pl;
984 }
985 else if ((pr = p.right) != null && h >= pr.hash)
986 s = pr;
987 if (s != null && (r = getTreeNode(h, k, s)) != null)
988 return r;
989 }
990 }
991 else
992 dir = (h < ph) ? -1 : 1;
993 pp = (dir > 0) ? p.right : p.left;
994 }
995
996 TreeNode f = first;
997 TreeNode x = first = new TreeNode(h, k, v, f, p);
998 if (p == null)
999 root = x;
1000 else { // attach and rebalance; adapted from CLR
1001 TreeNode xp, xpp;
1002 if (f != null)
1003 f.prev = x;
1004 if (dir <= 0)
1005 p.left = x;
1006 else
1007 p.right = x;
1008 x.red = true;
1009 while (x != null && (xp = x.parent) != null && xp.red &&
1010 (xpp = xp.parent) != null) {
1011 TreeNode xppl = xpp.left;
1012 if (xp == xppl) {
1013 TreeNode y = xpp.right;
1014 if (y != null && y.red) {
1015 y.red = false;
1016 xp.red = false;
1017 xpp.red = true;
1018 x = xpp;
1019 }
1020 else {
1021 if (x == xp.right) {
1022 rotateLeft(x = xp);
1023 xpp = (xp = x.parent) == null ? null : xp.parent;
1024 }
1025 if (xp != null) {
1026 xp.red = false;
1027 if (xpp != null) {
1028 xpp.red = true;
1029 rotateRight(xpp);
1030 }
1031 }
1032 }
1033 }
1034 else {
1035 TreeNode y = xppl;
1036 if (y != null && y.red) {
1037 y.red = false;
1038 xp.red = false;
1039 xpp.red = true;
1040 x = xpp;
1041 }
1042 else {
1043 if (x == xp.left) {
1044 rotateRight(x = xp);
1045 xpp = (xp = x.parent) == null ? null : xp.parent;
1046 }
1047 if (xp != null) {
1048 xp.red = false;
1049 if (xpp != null) {
1050 xpp.red = true;
1051 rotateLeft(xpp);
1052 }
1053 }
1054 }
1055 }
1056 }
1057 TreeNode r = root;
1058 if (r != null && r.red)
1059 r.red = false;
1060 }
1061 return null;
1062 }
1063
1064 /**
1065 * Removes the given node, that must be present before this
1066 * call. This is messier than typical red-black deletion code
1067 * because we cannot swap the contents of an interior node
1068 * with a leaf successor that is pinned by "next" pointers
1069 * that are accessible independently of lock. So instead we
1070 * swap the tree linkages.
1071 */
1072 final void deleteTreeNode(TreeNode p) {
1073 TreeNode next = (TreeNode)p.next; // unlink traversal pointers
1074 TreeNode pred = p.prev;
1075 if (pred == null)
1076 first = next;
1077 else
1078 pred.next = next;
1079 if (next != null)
1080 next.prev = pred;
1081 TreeNode replacement;
1082 TreeNode pl = p.left;
1083 TreeNode pr = p.right;
1084 if (pl != null && pr != null) {
1085 TreeNode s = pr, sl;
1086 while ((sl = s.left) != null) // find successor
1087 s = sl;
1088 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1089 TreeNode sr = s.right;
1090 TreeNode pp = p.parent;
1091 if (s == pr) { // p was s's direct parent
1092 p.parent = s;
1093 s.right = p;
1094 }
1095 else {
1096 TreeNode sp = s.parent;
1097 if ((p.parent = sp) != null) {
1098 if (s == sp.left)
1099 sp.left = p;
1100 else
1101 sp.right = p;
1102 }
1103 if ((s.right = pr) != null)
1104 pr.parent = s;
1105 }
1106 p.left = null;
1107 if ((p.right = sr) != null)
1108 sr.parent = p;
1109 if ((s.left = pl) != null)
1110 pl.parent = s;
1111 if ((s.parent = pp) == null)
1112 root = s;
1113 else if (p == pp.left)
1114 pp.left = s;
1115 else
1116 pp.right = s;
1117 replacement = sr;
1118 }
1119 else
1120 replacement = (pl != null) ? pl : pr;
1121 TreeNode pp = p.parent;
1122 if (replacement == null) {
1123 if (pp == null) {
1124 root = null;
1125 return;
1126 }
1127 replacement = p;
1128 }
1129 else {
1130 replacement.parent = pp;
1131 if (pp == null)
1132 root = replacement;
1133 else if (p == pp.left)
1134 pp.left = replacement;
1135 else
1136 pp.right = replacement;
1137 p.left = p.right = p.parent = null;
1138 }
1139 if (!p.red) { // rebalance, from CLR
1140 TreeNode x = replacement;
1141 while (x != null) {
1142 TreeNode xp, xpl;
1143 if (x.red || (xp = x.parent) == null) {
1144 x.red = false;
1145 break;
1146 }
1147 if (x == (xpl = xp.left)) {
1148 TreeNode sib = xp.right;
1149 if (sib != null && sib.red) {
1150 sib.red = false;
1151 xp.red = true;
1152 rotateLeft(xp);
1153 sib = (xp = x.parent) == null ? null : xp.right;
1154 }
1155 if (sib == null)
1156 x = xp;
1157 else {
1158 TreeNode sl = sib.left, sr = sib.right;
1159 if ((sr == null || !sr.red) &&
1160 (sl == null || !sl.red)) {
1161 sib.red = true;
1162 x = xp;
1163 }
1164 else {
1165 if (sr == null || !sr.red) {
1166 if (sl != null)
1167 sl.red = false;
1168 sib.red = true;
1169 rotateRight(sib);
1170 sib = (xp = x.parent) == null ? null : xp.right;
1171 }
1172 if (sib != null) {
1173 sib.red = (xp == null) ? false : xp.red;
1174 if ((sr = sib.right) != null)
1175 sr.red = false;
1176 }
1177 if (xp != null) {
1178 xp.red = false;
1179 rotateLeft(xp);
1180 }
1181 x = root;
1182 }
1183 }
1184 }
1185 else { // symmetric
1186 TreeNode sib = xpl;
1187 if (sib != null && sib.red) {
1188 sib.red = false;
1189 xp.red = true;
1190 rotateRight(xp);
1191 sib = (xp = x.parent) == null ? null : xp.left;
1192 }
1193 if (sib == null)
1194 x = xp;
1195 else {
1196 TreeNode sl = sib.left, sr = sib.right;
1197 if ((sl == null || !sl.red) &&
1198 (sr == null || !sr.red)) {
1199 sib.red = true;
1200 x = xp;
1201 }
1202 else {
1203 if (sl == null || !sl.red) {
1204 if (sr != null)
1205 sr.red = false;
1206 sib.red = true;
1207 rotateLeft(sib);
1208 sib = (xp = x.parent) == null ? null : xp.left;
1209 }
1210 if (sib != null) {
1211 sib.red = (xp == null) ? false : xp.red;
1212 if ((sl = sib.left) != null)
1213 sl.red = false;
1214 }
1215 if (xp != null) {
1216 xp.red = false;
1217 rotateRight(xp);
1218 }
1219 x = root;
1220 }
1221 }
1222 }
1223 }
1224 }
1225 if (p == replacement && (pp = p.parent) != null) {
1226 if (p == pp.left) // detach pointers
1227 pp.left = null;
1228 else if (p == pp.right)
1229 pp.right = null;
1230 p.parent = null;
1231 }
1232 }
1233 }
1234
1235 /* ---------------- Collision reduction methods -------------- */
1236
1237 /**
1238 * Spreads higher bits to lower, and also forces top 2 bits to 0.
1239 * Because the table uses power-of-two masking, sets of hashes
1240 * that vary only in bits above the current mask will always
1241 * collide. (Among known examples are sets of Float keys holding
1242 * consecutive whole numbers in small tables.) To counter this,
1243 * we apply a transform that spreads the impact of higher bits
1244 * downward. There is a tradeoff between speed, utility, and
1245 * quality of bit-spreading. Because many common sets of hashes
1246 * are already reasonably distributed across bits (so don't benefit
1247 * from spreading), and because we use trees to handle large sets
1248 * of collisions in bins, we don't need excessively high quality.
1249 */
1250 private static final int spread(int h) {
1251 h ^= (h >>> 18) ^ (h >>> 12);
1252 return (h ^ (h >>> 10)) & HASH_BITS;
1253 }
1254
1255 /**
1256 * Replaces a list bin with a tree bin. Call only when locked.
1257 * Fails to replace if the given key is non-comparable or table
1258 * is, or needs, resizing.
1259 */
1260 private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1261 if ((key instanceof Comparable) &&
1262 (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
1263 TreeBin t = new TreeBin();
1264 for (Node e = tabAt(tab, index); e != null; e = e.next)
1265 t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
1266 setTabAt(tab, index, new Node(MOVED, t, null, null));
1267 }
1268 }
1269
1270 /* ---------------- Internal access and update methods -------------- */
1271
1272 /** Implementation for get and containsKey */
1273 private final Object internalGet(Object k) {
1274 int h = spread(k.hashCode());
1275 retry: for (Node[] tab = table; tab != null;) {
1276 Node e, p; Object ek, ev; int eh; // locals to read fields once
1277 for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1278 if ((eh = e.hash) == MOVED) {
1279 if ((ek = e.key) instanceof TreeBin) // search TreeBin
1280 return ((TreeBin)ek).getValue(h, k);
1281 else { // restart with new table
1282 tab = (Node[])ek;
1283 continue retry;
1284 }
1285 }
1286 else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
1287 ((ek = e.key) == k || k.equals(ek)))
1288 return ev;
1289 }
1290 break;
1291 }
1292 return null;
1293 }
1294
1295 /**
1296 * Implementation for the four public remove/replace methods:
1297 * Replaces node value with v, conditional upon match of cv if
1298 * non-null. If resulting value is null, delete.
1299 */
1300 private final Object internalReplace(Object k, Object v, Object cv) {
1301 int h = spread(k.hashCode());
1302 Object oldVal = null;
1303 for (Node[] tab = table;;) {
1304 Node f; int i, fh; Object fk;
1305 if (tab == null ||
1306 (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1307 break;
1308 else if ((fh = f.hash) == MOVED) {
1309 if ((fk = f.key) instanceof TreeBin) {
1310 TreeBin t = (TreeBin)fk;
1311 boolean validated = false;
1312 boolean deleted = false;
1313 t.acquire(0);
1314 try {
1315 if (tabAt(tab, i) == f) {
1316 validated = true;
1317 TreeNode p = t.getTreeNode(h, k, t.root);
1318 if (p != null) {
1319 Object pv = p.val;
1320 if (cv == null || cv == pv || cv.equals(pv)) {
1321 oldVal = pv;
1322 if ((p.val = v) == null) {
1323 deleted = true;
1324 t.deleteTreeNode(p);
1325 }
1326 }
1327 }
1328 }
1329 } finally {
1330 t.release(0);
1331 }
1332 if (validated) {
1333 if (deleted)
1334 counter.add(-1L);
1335 break;
1336 }
1337 }
1338 else
1339 tab = (Node[])fk;
1340 }
1341 else if ((fh & HASH_BITS) != h && f.next == null) // precheck
1342 break; // rules out possible existence
1343 else if ((fh & LOCKED) != 0) {
1344 checkForResize(); // try resizing if can't get lock
1345 f.tryAwaitLock(tab, i);
1346 }
1347 else if (f.casHash(fh, fh | LOCKED)) {
1348 boolean validated = false;
1349 boolean deleted = false;
1350 try {
1351 if (tabAt(tab, i) == f) {
1352 validated = true;
1353 for (Node e = f, pred = null;;) {
1354 Object ek, ev;
1355 if ((e.hash & HASH_BITS) == h &&
1356 ((ev = e.val) != null) &&
1357 ((ek = e.key) == k || k.equals(ek))) {
1358 if (cv == null || cv == ev || cv.equals(ev)) {
1359 oldVal = ev;
1360 if ((e.val = v) == null) {
1361 deleted = true;
1362 Node en = e.next;
1363 if (pred != null)
1364 pred.next = en;
1365 else
1366 setTabAt(tab, i, en);
1367 }
1368 }
1369 break;
1370 }
1371 pred = e;
1372 if ((e = e.next) == null)
1373 break;
1374 }
1375 }
1376 } finally {
1377 if (!f.casHash(fh | LOCKED, fh)) {
1378 f.hash = fh;
1379 synchronized (f) { f.notifyAll(); };
1380 }
1381 }
1382 if (validated) {
1383 if (deleted)
1384 counter.add(-1L);
1385 break;
1386 }
1387 }
1388 }
1389 return oldVal;
1390 }
1391
1392 /*
1393 * Internal versions of the six insertion methods, each a
1394 * little more complicated than the last. All have
1395 * the same basic structure as the first (internalPut):
1396 * 1. If table uninitialized, create
1397 * 2. If bin empty, try to CAS new node
1398 * 3. If bin stale, use new table
1399 * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1400 * 5. Lock and validate; if valid, scan and add or update
1401 *
1402 * The others interweave other checks and/or alternative actions:
1403 * * Plain put checks for and performs resize after insertion.
1404 * * putIfAbsent prescans for mapping without lock (and fails to add
1405 * if present), which also makes pre-emptive resize checks worthwhile.
1406 * * computeIfAbsent extends form used in putIfAbsent with additional
1407 * mechanics to deal with, calls, potential exceptions and null
1408 * returns from function call.
1409 * * compute uses the same function-call mechanics, but without
1410 * the prescans
1411 * * merge acts as putIfAbsent in the absent case, but invokes the
1412 * update function if present
1413 * * putAll attempts to pre-allocate enough table space
1414 * and more lazily performs count updates and checks.
1415 *
1416 * Someday when details settle down a bit more, it might be worth
1417 * some factoring to reduce sprawl.
1418 */
1419
1420 /** Implementation for put */
1421 private final Object internalPut(Object k, Object v) {
1422 int h = spread(k.hashCode());
1423 int count = 0;
1424 for (Node[] tab = table;;) {
1425 int i; Node f; int fh; Object fk;
1426 if (tab == null)
1427 tab = initTable();
1428 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1429 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1430 break; // no lock when adding to empty bin
1431 }
1432 else if ((fh = f.hash) == MOVED) {
1433 if ((fk = f.key) instanceof TreeBin) {
1434 TreeBin t = (TreeBin)fk;
1435 Object oldVal = null;
1436 t.acquire(0);
1437 try {
1438 if (tabAt(tab, i) == f) {
1439 count = 2;
1440 TreeNode p = t.putTreeNode(h, k, v);
1441 if (p != null) {
1442 oldVal = p.val;
1443 p.val = v;
1444 }
1445 }
1446 } finally {
1447 t.release(0);
1448 }
1449 if (count != 0) {
1450 if (oldVal != null)
1451 return oldVal;
1452 break;
1453 }
1454 }
1455 else
1456 tab = (Node[])fk;
1457 }
1458 else if ((fh & LOCKED) != 0) {
1459 checkForResize();
1460 f.tryAwaitLock(tab, i);
1461 }
1462 else if (f.casHash(fh, fh | LOCKED)) {
1463 Object oldVal = null;
1464 try { // needed in case equals() throws
1465 if (tabAt(tab, i) == f) {
1466 count = 1;
1467 for (Node e = f;; ++count) {
1468 Object ek, ev;
1469 if ((e.hash & HASH_BITS) == h &&
1470 (ev = e.val) != null &&
1471 ((ek = e.key) == k || k.equals(ek))) {
1472 oldVal = ev;
1473 e.val = v;
1474 break;
1475 }
1476 Node last = e;
1477 if ((e = e.next) == null) {
1478 last.next = new Node(h, k, v, null);
1479 if (count >= TREE_THRESHOLD)
1480 replaceWithTreeBin(tab, i, k);
1481 break;
1482 }
1483 }
1484 }
1485 } finally { // unlock and signal if needed
1486 if (!f.casHash(fh | LOCKED, fh)) {
1487 f.hash = fh;
1488 synchronized (f) { f.notifyAll(); };
1489 }
1490 }
1491 if (count != 0) {
1492 if (oldVal != null)
1493 return oldVal;
1494 if (tab.length <= 64)
1495 count = 2;
1496 break;
1497 }
1498 }
1499 }
1500 counter.add(1L);
1501 if (count > 1)
1502 checkForResize();
1503 return null;
1504 }
1505
1506 /** Implementation for putIfAbsent */
1507 private final Object internalPutIfAbsent(Object k, Object v) {
1508 int h = spread(k.hashCode());
1509 int count = 0;
1510 for (Node[] tab = table;;) {
1511 int i; Node f; int fh; Object fk, fv;
1512 if (tab == null)
1513 tab = initTable();
1514 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1515 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1516 break;
1517 }
1518 else if ((fh = f.hash) == MOVED) {
1519 if ((fk = f.key) instanceof TreeBin) {
1520 TreeBin t = (TreeBin)fk;
1521 Object oldVal = null;
1522 t.acquire(0);
1523 try {
1524 if (tabAt(tab, i) == f) {
1525 count = 2;
1526 TreeNode p = t.putTreeNode(h, k, v);
1527 if (p != null)
1528 oldVal = p.val;
1529 }
1530 } finally {
1531 t.release(0);
1532 }
1533 if (count != 0) {
1534 if (oldVal != null)
1535 return oldVal;
1536 break;
1537 }
1538 }
1539 else
1540 tab = (Node[])fk;
1541 }
1542 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1543 ((fk = f.key) == k || k.equals(fk)))
1544 return fv;
1545 else {
1546 Node g = f.next;
1547 if (g != null) { // at least 2 nodes -- search and maybe resize
1548 for (Node e = g;;) {
1549 Object ek, ev;
1550 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1551 ((ek = e.key) == k || k.equals(ek)))
1552 return ev;
1553 if ((e = e.next) == null) {
1554 checkForResize();
1555 break;
1556 }
1557 }
1558 }
1559 if (((fh = f.hash) & LOCKED) != 0) {
1560 checkForResize();
1561 f.tryAwaitLock(tab, i);
1562 }
1563 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1564 Object oldVal = null;
1565 try {
1566 if (tabAt(tab, i) == f) {
1567 count = 1;
1568 for (Node e = f;; ++count) {
1569 Object ek, ev;
1570 if ((e.hash & HASH_BITS) == h &&
1571 (ev = e.val) != null &&
1572 ((ek = e.key) == k || k.equals(ek))) {
1573 oldVal = ev;
1574 break;
1575 }
1576 Node last = e;
1577 if ((e = e.next) == null) {
1578 last.next = new Node(h, k, v, null);
1579 if (count >= TREE_THRESHOLD)
1580 replaceWithTreeBin(tab, i, k);
1581 break;
1582 }
1583 }
1584 }
1585 } finally {
1586 if (!f.casHash(fh | LOCKED, fh)) {
1587 f.hash = fh;
1588 synchronized (f) { f.notifyAll(); };
1589 }
1590 }
1591 if (count != 0) {
1592 if (oldVal != null)
1593 return oldVal;
1594 if (tab.length <= 64)
1595 count = 2;
1596 break;
1597 }
1598 }
1599 }
1600 }
1601 counter.add(1L);
1602 if (count > 1)
1603 checkForResize();
1604 return null;
1605 }
1606
1607 /** Implementation for computeIfAbsent */
1608 private final Object internalComputeIfAbsent(K k,
1609 Fun<? super K, ?> mf) {
1610 int h = spread(k.hashCode());
1611 Object val = null;
1612 int count = 0;
1613 for (Node[] tab = table;;) {
1614 Node f; int i, fh; Object fk, fv;
1615 if (tab == null)
1616 tab = initTable();
1617 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1618 Node node = new Node(fh = h | LOCKED, k, null, null);
1619 if (casTabAt(tab, i, null, node)) {
1620 count = 1;
1621 try {
1622 if ((val = mf.apply(k)) != null)
1623 node.val = val;
1624 } finally {
1625 if (val == null)
1626 setTabAt(tab, i, null);
1627 if (!node.casHash(fh, h)) {
1628 node.hash = h;
1629 synchronized (node) { node.notifyAll(); };
1630 }
1631 }
1632 }
1633 if (count != 0)
1634 break;
1635 }
1636 else if ((fh = f.hash) == MOVED) {
1637 if ((fk = f.key) instanceof TreeBin) {
1638 TreeBin t = (TreeBin)fk;
1639 boolean added = false;
1640 t.acquire(0);
1641 try {
1642 if (tabAt(tab, i) == f) {
1643 count = 1;
1644 TreeNode p = t.getTreeNode(h, k, t.root);
1645 if (p != null)
1646 val = p.val;
1647 else if ((val = mf.apply(k)) != null) {
1648 added = true;
1649 count = 2;
1650 t.putTreeNode(h, k, val);
1651 }
1652 }
1653 } finally {
1654 t.release(0);
1655 }
1656 if (count != 0) {
1657 if (!added)
1658 return val;
1659 break;
1660 }
1661 }
1662 else
1663 tab = (Node[])fk;
1664 }
1665 else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
1666 ((fk = f.key) == k || k.equals(fk)))
1667 return fv;
1668 else {
1669 Node g = f.next;
1670 if (g != null) {
1671 for (Node e = g;;) {
1672 Object ek, ev;
1673 if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
1674 ((ek = e.key) == k || k.equals(ek)))
1675 return ev;
1676 if ((e = e.next) == null) {
1677 checkForResize();
1678 break;
1679 }
1680 }
1681 }
1682 if (((fh = f.hash) & LOCKED) != 0) {
1683 checkForResize();
1684 f.tryAwaitLock(tab, i);
1685 }
1686 else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
1687 boolean added = false;
1688 try {
1689 if (tabAt(tab, i) == f) {
1690 count = 1;
1691 for (Node e = f;; ++count) {
1692 Object ek, ev;
1693 if ((e.hash & HASH_BITS) == h &&
1694 (ev = e.val) != null &&
1695 ((ek = e.key) == k || k.equals(ek))) {
1696 val = ev;
1697 break;
1698 }
1699 Node last = e;
1700 if ((e = e.next) == null) {
1701 if ((val = mf.apply(k)) != null) {
1702 added = true;
1703 last.next = new Node(h, k, val, null);
1704 if (count >= TREE_THRESHOLD)
1705 replaceWithTreeBin(tab, i, k);
1706 }
1707 break;
1708 }
1709 }
1710 }
1711 } finally {
1712 if (!f.casHash(fh | LOCKED, fh)) {
1713 f.hash = fh;
1714 synchronized (f) { f.notifyAll(); };
1715 }
1716 }
1717 if (count != 0) {
1718 if (!added)
1719 return val;
1720 if (tab.length <= 64)
1721 count = 2;
1722 break;
1723 }
1724 }
1725 }
1726 }
1727 if (val != null) {
1728 counter.add(1L);
1729 if (count > 1)
1730 checkForResize();
1731 }
1732 return val;
1733 }
1734
1735 /** Implementation for compute */
1736 @SuppressWarnings("unchecked") private final Object internalCompute
1737 (K k, boolean onlyIfPresent, BiFun<? super K, ? super V, ? extends V> mf) {
1738 int h = spread(k.hashCode());
1739 Object val = null;
1740 int delta = 0;
1741 int count = 0;
1742 for (Node[] tab = table;;) {
1743 Node f; int i, fh; Object fk;
1744 if (tab == null)
1745 tab = initTable();
1746 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1747 if (onlyIfPresent)
1748 break;
1749 Node node = new Node(fh = h | LOCKED, k, null, null);
1750 if (casTabAt(tab, i, null, node)) {
1751 try {
1752 count = 1;
1753 if ((val = mf.apply(k, null)) != null) {
1754 node.val = val;
1755 delta = 1;
1756 }
1757 } finally {
1758 if (delta == 0)
1759 setTabAt(tab, i, null);
1760 if (!node.casHash(fh, h)) {
1761 node.hash = h;
1762 synchronized (node) { node.notifyAll(); };
1763 }
1764 }
1765 }
1766 if (count != 0)
1767 break;
1768 }
1769 else if ((fh = f.hash) == MOVED) {
1770 if ((fk = f.key) instanceof TreeBin) {
1771 TreeBin t = (TreeBin)fk;
1772 t.acquire(0);
1773 try {
1774 if (tabAt(tab, i) == f) {
1775 count = 1;
1776 TreeNode p = t.getTreeNode(h, k, t.root);
1777 Object pv = (p == null) ? null : p.val;
1778 if ((val = mf.apply(k, (V)pv)) != null) {
1779 if (p != null)
1780 p.val = val;
1781 else {
1782 count = 2;
1783 delta = 1;
1784 t.putTreeNode(h, k, val);
1785 }
1786 }
1787 else if (p != null) {
1788 delta = -1;
1789 t.deleteTreeNode(p);
1790 }
1791 }
1792 } finally {
1793 t.release(0);
1794 }
1795 if (count != 0)
1796 break;
1797 }
1798 else
1799 tab = (Node[])fk;
1800 }
1801 else if ((fh & LOCKED) != 0) {
1802 checkForResize();
1803 f.tryAwaitLock(tab, i);
1804 }
1805 else if (f.casHash(fh, fh | LOCKED)) {
1806 try {
1807 if (tabAt(tab, i) == f) {
1808 count = 1;
1809 for (Node e = f, pred = null;; ++count) {
1810 Object ek, ev;
1811 if ((e.hash & HASH_BITS) == h &&
1812 (ev = e.val) != null &&
1813 ((ek = e.key) == k || k.equals(ek))) {
1814 val = mf.apply(k, (V)ev);
1815 if (val != null)
1816 e.val = val;
1817 else {
1818 delta = -1;
1819 Node en = e.next;
1820 if (pred != null)
1821 pred.next = en;
1822 else
1823 setTabAt(tab, i, en);
1824 }
1825 break;
1826 }
1827 pred = e;
1828 if ((e = e.next) == null) {
1829 if (!onlyIfPresent && (val = mf.apply(k, null)) != null) {
1830 pred.next = new Node(h, k, val, null);
1831 delta = 1;
1832 if (count >= TREE_THRESHOLD)
1833 replaceWithTreeBin(tab, i, k);
1834 }
1835 break;
1836 }
1837 }
1838 }
1839 } finally {
1840 if (!f.casHash(fh | LOCKED, fh)) {
1841 f.hash = fh;
1842 synchronized (f) { f.notifyAll(); };
1843 }
1844 }
1845 if (count != 0) {
1846 if (tab.length <= 64)
1847 count = 2;
1848 break;
1849 }
1850 }
1851 }
1852 if (delta != 0) {
1853 counter.add((long)delta);
1854 if (count > 1)
1855 checkForResize();
1856 }
1857 return val;
1858 }
1859
1860 /** Implementation for merge */
1861 @SuppressWarnings("unchecked") private final Object internalMerge
1862 (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1863 int h = spread(k.hashCode());
1864 Object val = null;
1865 int delta = 0;
1866 int count = 0;
1867 for (Node[] tab = table;;) {
1868 int i; Node f; int fh; Object fk, fv;
1869 if (tab == null)
1870 tab = initTable();
1871 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1872 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1873 delta = 1;
1874 val = v;
1875 break;
1876 }
1877 }
1878 else if ((fh = f.hash) == MOVED) {
1879 if ((fk = f.key) instanceof TreeBin) {
1880 TreeBin t = (TreeBin)fk;
1881 t.acquire(0);
1882 try {
1883 if (tabAt(tab, i) == f) {
1884 count = 1;
1885 TreeNode p = t.getTreeNode(h, k, t.root);
1886 val = (p == null) ? v : mf.apply((V)p.val, v);
1887 if (val != null) {
1888 if (p != null)
1889 p.val = val;
1890 else {
1891 count = 2;
1892 delta = 1;
1893 t.putTreeNode(h, k, val);
1894 }
1895 }
1896 else if (p != null) {
1897 delta = -1;
1898 t.deleteTreeNode(p);
1899 }
1900 }
1901 } finally {
1902 t.release(0);
1903 }
1904 if (count != 0)
1905 break;
1906 }
1907 else
1908 tab = (Node[])fk;
1909 }
1910 else if ((fh & LOCKED) != 0) {
1911 checkForResize();
1912 f.tryAwaitLock(tab, i);
1913 }
1914 else if (f.casHash(fh, fh | LOCKED)) {
1915 try {
1916 if (tabAt(tab, i) == f) {
1917 count = 1;
1918 for (Node e = f, pred = null;; ++count) {
1919 Object ek, ev;
1920 if ((e.hash & HASH_BITS) == h &&
1921 (ev = e.val) != null &&
1922 ((ek = e.key) == k || k.equals(ek))) {
1923 val = mf.apply(v, (V)ev);
1924 if (val != null)
1925 e.val = val;
1926 else {
1927 delta = -1;
1928 Node en = e.next;
1929 if (pred != null)
1930 pred.next = en;
1931 else
1932 setTabAt(tab, i, en);
1933 }
1934 break;
1935 }
1936 pred = e;
1937 if ((e = e.next) == null) {
1938 val = v;
1939 pred.next = new Node(h, k, val, null);
1940 delta = 1;
1941 if (count >= TREE_THRESHOLD)
1942 replaceWithTreeBin(tab, i, k);
1943 break;
1944 }
1945 }
1946 }
1947 } finally {
1948 if (!f.casHash(fh | LOCKED, fh)) {
1949 f.hash = fh;
1950 synchronized (f) { f.notifyAll(); };
1951 }
1952 }
1953 if (count != 0) {
1954 if (tab.length <= 64)
1955 count = 2;
1956 break;
1957 }
1958 }
1959 }
1960 if (delta != 0) {
1961 counter.add((long)delta);
1962 if (count > 1)
1963 checkForResize();
1964 }
1965 return val;
1966 }
1967
1968 /** Implementation for putAll */
1969 private final void internalPutAll(Map<?, ?> m) {
1970 tryPresize(m.size());
1971 long delta = 0L; // number of uncommitted additions
1972 boolean npe = false; // to throw exception on exit for nulls
1973 try { // to clean up counts on other exceptions
1974 for (Map.Entry<?, ?> entry : m.entrySet()) {
1975 Object k, v;
1976 if (entry == null || (k = entry.getKey()) == null ||
1977 (v = entry.getValue()) == null) {
1978 npe = true;
1979 break;
1980 }
1981 int h = spread(k.hashCode());
1982 for (Node[] tab = table;;) {
1983 int i; Node f; int fh; Object fk;
1984 if (tab == null)
1985 tab = initTable();
1986 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1987 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1988 ++delta;
1989 break;
1990 }
1991 }
1992 else if ((fh = f.hash) == MOVED) {
1993 if ((fk = f.key) instanceof TreeBin) {
1994 TreeBin t = (TreeBin)fk;
1995 boolean validated = false;
1996 t.acquire(0);
1997 try {
1998 if (tabAt(tab, i) == f) {
1999 validated = true;
2000 TreeNode p = t.getTreeNode(h, k, t.root);
2001 if (p != null)
2002 p.val = v;
2003 else {
2004 t.putTreeNode(h, k, v);
2005 ++delta;
2006 }
2007 }
2008 } finally {
2009 t.release(0);
2010 }
2011 if (validated)
2012 break;
2013 }
2014 else
2015 tab = (Node[])fk;
2016 }
2017 else if ((fh & LOCKED) != 0) {
2018 counter.add(delta);
2019 delta = 0L;
2020 checkForResize();
2021 f.tryAwaitLock(tab, i);
2022 }
2023 else if (f.casHash(fh, fh | LOCKED)) {
2024 int count = 0;
2025 try {
2026 if (tabAt(tab, i) == f) {
2027 count = 1;
2028 for (Node e = f;; ++count) {
2029 Object ek, ev;
2030 if ((e.hash & HASH_BITS) == h &&
2031 (ev = e.val) != null &&
2032 ((ek = e.key) == k || k.equals(ek))) {
2033 e.val = v;
2034 break;
2035 }
2036 Node last = e;
2037 if ((e = e.next) == null) {
2038 ++delta;
2039 last.next = new Node(h, k, v, null);
2040 if (count >= TREE_THRESHOLD)
2041 replaceWithTreeBin(tab, i, k);
2042 break;
2043 }
2044 }
2045 }
2046 } finally {
2047 if (!f.casHash(fh | LOCKED, fh)) {
2048 f.hash = fh;
2049 synchronized (f) { f.notifyAll(); };
2050 }
2051 }
2052 if (count != 0) {
2053 if (count > 1) {
2054 counter.add(delta);
2055 delta = 0L;
2056 checkForResize();
2057 }
2058 break;
2059 }
2060 }
2061 }
2062 }
2063 } finally {
2064 if (delta != 0)
2065 counter.add(delta);
2066 }
2067 if (npe)
2068 throw new NullPointerException();
2069 }
2070
2071 /* ---------------- Table Initialization and Resizing -------------- */
2072
2073 /**
2074 * Returns a power of two table size for the given desired capacity.
2075 * See Hackers Delight, sec 3.2
2076 */
2077 private static final int tableSizeFor(int c) {
2078 int n = c - 1;
2079 n |= n >>> 1;
2080 n |= n >>> 2;
2081 n |= n >>> 4;
2082 n |= n >>> 8;
2083 n |= n >>> 16;
2084 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
2085 }
2086
2087 /**
2088 * Initializes table, using the size recorded in sizeCtl.
2089 */
2090 private final Node[] initTable() {
2091 Node[] tab; int sc;
2092 while ((tab = table) == null) {
2093 if ((sc = sizeCtl) < 0)
2094 Thread.yield(); // lost initialization race; just spin
2095 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2096 try {
2097 if ((tab = table) == null) {
2098 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2099 tab = table = new Node[n];
2100 sc = n - (n >>> 2);
2101 }
2102 } finally {
2103 sizeCtl = sc;
2104 }
2105 break;
2106 }
2107 }
2108 return tab;
2109 }
2110
2111 /**
2112 * If table is too small and not already resizing, creates next
2113 * table and transfers bins. Rechecks occupancy after a transfer
2114 * to see if another resize is already needed because resizings
2115 * are lagging additions.
2116 */
2117 private final void checkForResize() {
2118 Node[] tab; int n, sc;
2119 while ((tab = table) != null &&
2120 (n = tab.length) < MAXIMUM_CAPACITY &&
2121 (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
2122 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2123 try {
2124 if (tab == table) {
2125 table = rebuild(tab);
2126 sc = (n << 1) - (n >>> 1);
2127 }
2128 } finally {
2129 sizeCtl = sc;
2130 }
2131 }
2132 }
2133
2134 /**
2135 * Tries to presize table to accommodate the given number of elements.
2136 *
2137 * @param size number of elements (doesn't need to be perfectly accurate)
2138 */
2139 private final void tryPresize(int size) {
2140 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2141 tableSizeFor(size + (size >>> 1) + 1);
2142 int sc;
2143 while ((sc = sizeCtl) >= 0) {
2144 Node[] tab = table; int n;
2145 if (tab == null || (n = tab.length) == 0) {
2146 n = (sc > c) ? sc : c;
2147 if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2148 try {
2149 if (table == tab) {
2150 table = new Node[n];
2151 sc = n - (n >>> 2);
2152 }
2153 } finally {
2154 sizeCtl = sc;
2155 }
2156 }
2157 }
2158 else if (c <= sc || n >= MAXIMUM_CAPACITY)
2159 break;
2160 else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
2161 try {
2162 if (table == tab) {
2163 table = rebuild(tab);
2164 sc = (n << 1) - (n >>> 1);
2165 }
2166 } finally {
2167 sizeCtl = sc;
2168 }
2169 }
2170 }
2171 }
2172
2173 /*
2174 * Moves and/or copies the nodes in each bin to new table. See
2175 * above for explanation.
2176 *
2177 * @return the new table
2178 */
2179 private static final Node[] rebuild(Node[] tab) {
2180 int n = tab.length;
2181 Node[] nextTab = new Node[n << 1];
2182 Node fwd = new Node(MOVED, nextTab, null, null);
2183 int[] buffer = null; // holds bins to revisit; null until needed
2184 Node rev = null; // reverse forwarder; null until needed
2185 int nbuffered = 0; // the number of bins in buffer list
2186 int bufferIndex = 0; // buffer index of current buffered bin
2187 int bin = n - 1; // current non-buffered bin or -1 if none
2188
2189 for (int i = bin;;) { // start upwards sweep
2190 int fh; Node f;
2191 if ((f = tabAt(tab, i)) == null) {
2192 if (bin >= 0) { // Unbuffered; no lock needed (or available)
2193 if (!casTabAt(tab, i, f, fwd))
2194 continue;
2195 }
2196 else { // transiently use a locked forwarding node
2197 Node g = new Node(MOVED|LOCKED, nextTab, null, null);
2198 if (!casTabAt(tab, i, f, g))
2199 continue;
2200 setTabAt(nextTab, i, null);
2201 setTabAt(nextTab, i + n, null);
2202 setTabAt(tab, i, fwd);
2203 if (!g.casHash(MOVED|LOCKED, MOVED)) {
2204 g.hash = MOVED;
2205 synchronized (g) { g.notifyAll(); }
2206 }
2207 }
2208 }
2209 else if ((fh = f.hash) == MOVED) {
2210 Object fk = f.key;
2211 if (fk instanceof TreeBin) {
2212 TreeBin t = (TreeBin)fk;
2213 boolean validated = false;
2214 t.acquire(0);
2215 try {
2216 if (tabAt(tab, i) == f) {
2217 validated = true;
2218 splitTreeBin(nextTab, i, t);
2219 setTabAt(tab, i, fwd);
2220 }
2221 } finally {
2222 t.release(0);
2223 }
2224 if (!validated)
2225 continue;
2226 }
2227 }
2228 else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
2229 boolean validated = false;
2230 try { // split to lo and hi lists; copying as needed
2231 if (tabAt(tab, i) == f) {
2232 validated = true;
2233 splitBin(nextTab, i, f);
2234 setTabAt(tab, i, fwd);
2235 }
2236 } finally {
2237 if (!f.casHash(fh | LOCKED, fh)) {
2238 f.hash = fh;
2239 synchronized (f) { f.notifyAll(); };
2240 }
2241 }
2242 if (!validated)
2243 continue;
2244 }
2245 else {
2246 if (buffer == null) // initialize buffer for revisits
2247 buffer = new int[TRANSFER_BUFFER_SIZE];
2248 if (bin < 0 && bufferIndex > 0) {
2249 int j = buffer[--bufferIndex];
2250 buffer[bufferIndex] = i;
2251 i = j; // swap with another bin
2252 continue;
2253 }
2254 if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
2255 f.tryAwaitLock(tab, i);
2256 continue; // no other options -- block
2257 }
2258 if (rev == null) // initialize reverse-forwarder
2259 rev = new Node(MOVED, tab, null, null);
2260 if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
2261 continue; // recheck before adding to list
2262 buffer[nbuffered++] = i;
2263 setTabAt(nextTab, i, rev); // install place-holders
2264 setTabAt(nextTab, i + n, rev);
2265 }
2266
2267 if (bin > 0)
2268 i = --bin;
2269 else if (buffer != null && nbuffered > 0) {
2270 bin = -1;
2271 i = buffer[bufferIndex = --nbuffered];
2272 }
2273 else
2274 return nextTab;
2275 }
2276 }
2277
2278 /**
2279 * Splits a normal bin with list headed by e into lo and hi parts;
2280 * installs in given table.
2281 */
2282 private static void splitBin(Node[] nextTab, int i, Node e) {
2283 int bit = nextTab.length >>> 1; // bit to split on
2284 int runBit = e.hash & bit;
2285 Node lastRun = e, lo = null, hi = null;
2286 for (Node p = e.next; p != null; p = p.next) {
2287 int b = p.hash & bit;
2288 if (b != runBit) {
2289 runBit = b;
2290 lastRun = p;
2291 }
2292 }
2293 if (runBit == 0)
2294 lo = lastRun;
2295 else
2296 hi = lastRun;
2297 for (Node p = e; p != lastRun; p = p.next) {
2298 int ph = p.hash & HASH_BITS;
2299 Object pk = p.key, pv = p.val;
2300 if ((ph & bit) == 0)
2301 lo = new Node(ph, pk, pv, lo);
2302 else
2303 hi = new Node(ph, pk, pv, hi);
2304 }
2305 setTabAt(nextTab, i, lo);
2306 setTabAt(nextTab, i + bit, hi);
2307 }
2308
2309 /**
2310 * Splits a tree bin into lo and hi parts; installs in given table.
2311 */
2312 private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
2313 int bit = nextTab.length >>> 1;
2314 TreeBin lt = new TreeBin();
2315 TreeBin ht = new TreeBin();
2316 int lc = 0, hc = 0;
2317 for (Node e = t.first; e != null; e = e.next) {
2318 int h = e.hash & HASH_BITS;
2319 Object k = e.key, v = e.val;
2320 if ((h & bit) == 0) {
2321 ++lc;
2322 lt.putTreeNode(h, k, v);
2323 }
2324 else {
2325 ++hc;
2326 ht.putTreeNode(h, k, v);
2327 }
2328 }
2329 Node ln, hn; // throw away trees if too small
2330 if (lc <= (TREE_THRESHOLD >>> 1)) {
2331 ln = null;
2332 for (Node p = lt.first; p != null; p = p.next)
2333 ln = new Node(p.hash, p.key, p.val, ln);
2334 }
2335 else
2336 ln = new Node(MOVED, lt, null, null);
2337 setTabAt(nextTab, i, ln);
2338 if (hc <= (TREE_THRESHOLD >>> 1)) {
2339 hn = null;
2340 for (Node p = ht.first; p != null; p = p.next)
2341 hn = new Node(p.hash, p.key, p.val, hn);
2342 }
2343 else
2344 hn = new Node(MOVED, ht, null, null);
2345 setTabAt(nextTab, i + bit, hn);
2346 }
2347
2348 /**
2349 * Implementation for clear. Steps through each bin, removing all
2350 * nodes.
2351 */
2352 private final void internalClear() {
2353 long delta = 0L; // negative number of deletions
2354 int i = 0;
2355 Node[] tab = table;
2356 while (tab != null && i < tab.length) {
2357 int fh; Object fk;
2358 Node f = tabAt(tab, i);
2359 if (f == null)
2360 ++i;
2361 else if ((fh = f.hash) == MOVED) {
2362 if ((fk = f.key) instanceof TreeBin) {
2363 TreeBin t = (TreeBin)fk;
2364 t.acquire(0);
2365 try {
2366 if (tabAt(tab, i) == f) {
2367 for (Node p = t.first; p != null; p = p.next) {
2368 if (p.val != null) { // (currently always true)
2369 p.val = null;
2370 --delta;
2371 }
2372 }
2373 t.first = null;
2374 t.root = null;
2375 ++i;
2376 }
2377 } finally {
2378 t.release(0);
2379 }
2380 }
2381 else
2382 tab = (Node[])fk;
2383 }
2384 else if ((fh & LOCKED) != 0) {
2385 counter.add(delta); // opportunistically update count
2386 delta = 0L;
2387 f.tryAwaitLock(tab, i);
2388 }
2389 else if (f.casHash(fh, fh | LOCKED)) {
2390 try {
2391 if (tabAt(tab, i) == f) {
2392 for (Node e = f; e != null; e = e.next) {
2393 if (e.val != null) { // (currently always true)
2394 e.val = null;
2395 --delta;
2396 }
2397 }
2398 setTabAt(tab, i, null);
2399 ++i;
2400 }
2401 } finally {
2402 if (!f.casHash(fh | LOCKED, fh)) {
2403 f.hash = fh;
2404 synchronized (f) { f.notifyAll(); };
2405 }
2406 }
2407 }
2408 }
2409 if (delta != 0)
2410 counter.add(delta);
2411 }
2412
2413 /* ----------------Table Traversal -------------- */
2414
2415 /**
2416 * Encapsulates traversal for methods such as containsValue; also
2417 * serves as a base class for other iterators and bulk tasks.
2418 *
2419 * At each step, the iterator snapshots the key ("nextKey") and
2420 * value ("nextVal") of a valid node (i.e., one that, at point of
2421 * snapshot, has a non-null user value). Because val fields can
2422 * change (including to null, indicating deletion), field nextVal
2423 * might not be accurate at point of use, but still maintains the
2424 * weak consistency property of holding a value that was once
2425 * valid. To support iterator.remove, the nextKey field is not
2426 * updated (nulled out) when the iterator cannot advance.
2427 *
2428 * Internal traversals directly access these fields, as in:
2429 * {@code while (it.advance() != null) { process(it.nextKey); }}
2430 *
2431 * Exported iterators must track whether the iterator has advanced
2432 * (in hasNext vs next) (by setting/checking/nulling field
2433 * nextVal), and then extract key, value, or key-value pairs as
2434 * return values of next().
2435 *
2436 * The iterator visits once each still-valid node that was
2437 * reachable upon iterator construction. It might miss some that
2438 * were added to a bin after the bin was visited, which is OK wrt
2439 * consistency guarantees. Maintaining this property in the face
2440 * of possible ongoing resizes requires a fair amount of
2441 * bookkeeping state that is difficult to optimize away amidst
2442 * volatile accesses. Even so, traversal maintains reasonable
2443 * throughput.
2444 *
2445 * Normally, iteration proceeds bin-by-bin traversing lists.
2446 * However, if the table has been resized, then all future steps
2447 * must traverse both the bin at the current index as well as at
2448 * (index + baseSize); and so on for further resizings. To
2449 * paranoically cope with potential sharing by users of iterators
2450 * across threads, iteration terminates if a bounds checks fails
2451 * for a table read.
2452 *
2453 * This class extends ForkJoinTask to streamline parallel
2454 * iteration in bulk operations (see BulkTask). This adds only an
2455 * int of space overhead, which is close enough to negligible in
2456 * cases where it is not needed to not worry about it. Because
2457 * ForkJoinTask is Serializable, but iterators need not be, we
2458 * need to add warning suppressions.
2459 */
2460 @SuppressWarnings("serial") static class Traverser<K,V,R> extends ForkJoinTask<R> {
2461 final ConcurrentHashMapV8<K, V> map;
2462 Node next; // the next entry to use
2463 Object nextKey; // cached key field of next
2464 Object nextVal; // cached val field of next
2465 Node[] tab; // current table; updated if resized
2466 int index; // index of bin to use next
2467 int baseIndex; // current index of initial table
2468 int baseLimit; // index bound for initial table
2469 int baseSize; // initial table size
2470
2471 /** Creates iterator for all entries in the table. */
2472 Traverser(ConcurrentHashMapV8<K, V> map) {
2473 this.map = map;
2474 }
2475
2476 /** Creates iterator for split() methods */
2477 Traverser(Traverser<K,V,?> it) {
2478 ConcurrentHashMapV8<K, V> m; Node[] t;
2479 if ((m = this.map = it.map) == null)
2480 t = null;
2481 else if ((t = it.tab) == null && // force parent tab initialization
2482 (t = it.tab = m.table) != null)
2483 it.baseLimit = it.baseSize = t.length;
2484 this.tab = t;
2485 this.baseSize = it.baseSize;
2486 it.baseLimit = this.index = this.baseIndex =
2487 ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1;
2488 }
2489
2490 /**
2491 * Advances next; returns nextVal or null if terminated.
2492 * See above for explanation.
2493 */
2494 final Object advance() {
2495 Node e = next;
2496 Object ev = null;
2497 outer: do {
2498 if (e != null) // advance past used/skipped node
2499 e = e.next;
2500 while (e == null) { // get to next non-null bin
2501 ConcurrentHashMapV8<K, V> m;
2502 Node[] t; int b, i, n; Object ek; // checks must use locals
2503 if ((t = tab) != null)
2504 n = t.length;
2505 else if ((m = map) != null && (t = tab = m.table) != null)
2506 n = baseLimit = baseSize = t.length;
2507 else
2508 break outer;
2509 if ((b = baseIndex) >= baseLimit ||
2510 (i = index) < 0 || i >= n)
2511 break outer;
2512 if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
2513 if ((ek = e.key) instanceof TreeBin)
2514 e = ((TreeBin)ek).first;
2515 else {
2516 tab = (Node[])ek;
2517 continue; // restarts due to null val
2518 }
2519 } // visit upper slots if present
2520 index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2521 }
2522 nextKey = e.key;
2523 } while ((ev = e.val) == null); // skip deleted or special nodes
2524 next = e;
2525 return nextVal = ev;
2526 }
2527
2528 public final void remove() {
2529 Object k = nextKey;
2530 if (k == null && (advance() == null || (k = nextKey) == null))
2531 throw new IllegalStateException();
2532 map.internalReplace(k, null, null);
2533 }
2534
2535 public final boolean hasNext() {
2536 return nextVal != null || advance() != null;
2537 }
2538
2539 public final boolean hasMoreElements() { return hasNext(); }
2540 public final void setRawResult(Object x) { }
2541 public R getRawResult() { return null; }
2542 public boolean exec() { return true; }
2543 }
2544
2545 /* ---------------- Public operations -------------- */
2546
2547 /**
2548 * Creates a new, empty map with the default initial table size (16).
2549 */
2550 public ConcurrentHashMapV8() {
2551 this.counter = new LongAdder();
2552 }
2553
2554 /**
2555 * Creates a new, empty map with an initial table size
2556 * accommodating the specified number of elements without the need
2557 * to dynamically resize.
2558 *
2559 * @param initialCapacity The implementation performs internal
2560 * sizing to accommodate this many elements.
2561 * @throws IllegalArgumentException if the initial capacity of
2562 * elements is negative
2563 */
2564 public ConcurrentHashMapV8(int initialCapacity) {
2565 if (initialCapacity < 0)
2566 throw new IllegalArgumentException();
2567 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2568 MAXIMUM_CAPACITY :
2569 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2570 this.counter = new LongAdder();
2571 this.sizeCtl = cap;
2572 }
2573
2574 /**
2575 * Creates a new map with the same mappings as the given map.
2576 *
2577 * @param m the map
2578 */
2579 public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) {
2580 this.counter = new LongAdder();
2581 this.sizeCtl = DEFAULT_CAPACITY;
2582 internalPutAll(m);
2583 }
2584
2585 /**
2586 * Creates a new, empty map with an initial table size based on
2587 * the given number of elements ({@code initialCapacity}) and
2588 * initial table density ({@code loadFactor}).
2589 *
2590 * @param initialCapacity the initial capacity. The implementation
2591 * performs internal sizing to accommodate this many elements,
2592 * given the specified load factor.
2593 * @param loadFactor the load factor (table density) for
2594 * establishing the initial table size
2595 * @throws IllegalArgumentException if the initial capacity of
2596 * elements is negative or the load factor is nonpositive
2597 *
2598 * @since 1.6
2599 */
2600 public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
2601 this(initialCapacity, loadFactor, 1);
2602 }
2603
2604 /**
2605 * Creates a new, empty map with an initial table size based on
2606 * the given number of elements ({@code initialCapacity}), table
2607 * density ({@code loadFactor}), and number of concurrently
2608 * updating threads ({@code concurrencyLevel}).
2609 *
2610 * @param initialCapacity the initial capacity. The implementation
2611 * performs internal sizing to accommodate this many elements,
2612 * given the specified load factor.
2613 * @param loadFactor the load factor (table density) for
2614 * establishing the initial table size
2615 * @param concurrencyLevel the estimated number of concurrently
2616 * updating threads. The implementation may use this value as
2617 * a sizing hint.
2618 * @throws IllegalArgumentException if the initial capacity is
2619 * negative or the load factor or concurrencyLevel are
2620 * nonpositive
2621 */
2622 public ConcurrentHashMapV8(int initialCapacity,
2623 float loadFactor, int concurrencyLevel) {
2624 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2625 throw new IllegalArgumentException();
2626 if (initialCapacity < concurrencyLevel) // Use at least as many bins
2627 initialCapacity = concurrencyLevel; // as estimated threads
2628 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2629 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2630 MAXIMUM_CAPACITY : tableSizeFor((int)size);
2631 this.counter = new LongAdder();
2632 this.sizeCtl = cap;
2633 }
2634
2635 /**
2636 * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2637 * from the given type to {@code Boolean.TRUE}.
2638 *
2639 * @return the new set
2640 */
2641 public static <K> KeySetView<K,Boolean> newKeySet() {
2642 return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(),
2643 Boolean.TRUE);
2644 }
2645
2646 /**
2647 * Creates a new {@link Set} backed by a ConcurrentHashMapV8
2648 * from the given type to {@code Boolean.TRUE}.
2649 *
2650 * @param initialCapacity The implementation performs internal
2651 * sizing to accommodate this many elements.
2652 * @throws IllegalArgumentException if the initial capacity of
2653 * elements is negative
2654 * @return the new set
2655 */
2656 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2657 return new KeySetView<K,Boolean>(new ConcurrentHashMapV8<K,Boolean>(initialCapacity),
2658 Boolean.TRUE);
2659 }
2660
2661 /**
2662 * {@inheritDoc}
2663 */
2664 public boolean isEmpty() {
2665 return counter.sum() <= 0L; // ignore transient negative values
2666 }
2667
2668 /**
2669 * {@inheritDoc}
2670 */
2671 public int size() {
2672 long n = counter.sum();
2673 return ((n < 0L) ? 0 :
2674 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2675 (int)n);
2676 }
2677
2678 /**
2679 * Returns the number of mappings. This method should be used
2680 * instead of {@link #size} because a ConcurrentHashMapV8 may
2681 * contain more mappings than can be represented as an int. The
2682 * value returned is a snapshot; the actual count may differ if
2683 * there are ongoing concurrent insertions or removals.
2684 *
2685 * @return the number of mappings
2686 */
2687 public long mappingCount() {
2688 long n = counter.sum();
2689 return (n < 0L) ? 0L : n; // ignore transient negative values
2690 }
2691
2692 /**
2693 * Returns the value to which the specified key is mapped,
2694 * or {@code null} if this map contains no mapping for the key.
2695 *
2696 * <p>More formally, if this map contains a mapping from a key
2697 * {@code k} to a value {@code v} such that {@code key.equals(k)},
2698 * then this method returns {@code v}; otherwise it returns
2699 * {@code null}. (There can be at most one such mapping.)
2700 *
2701 * @throws NullPointerException if the specified key is null
2702 */
2703 @SuppressWarnings("unchecked") public V get(Object key) {
2704 if (key == null)
2705 throw new NullPointerException();
2706 return (V)internalGet(key);
2707 }
2708
2709 /**
2710 * Returns the value to which the specified key is mapped,
2711 * or the given defaultValue if this map contains no mapping for the key.
2712 *
2713 * @param key the key
2714 * @param defaultValue the value to return if this map contains
2715 * no mapping for the given key
2716 * @return the mapping for the key, if present; else the defaultValue
2717 * @throws NullPointerException if the specified key is null
2718 */
2719 @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
2720 if (key == null)
2721 throw new NullPointerException();
2722 V v = (V) internalGet(key);
2723 return v == null ? defaultValue : v;
2724 }
2725
2726 /**
2727 * Tests if the specified object is a key in this table.
2728 *
2729 * @param key possible key
2730 * @return {@code true} if and only if the specified object
2731 * is a key in this table, as determined by the
2732 * {@code equals} method; {@code false} otherwise
2733 * @throws NullPointerException if the specified key is null
2734 */
2735 public boolean containsKey(Object key) {
2736 if (key == null)
2737 throw new NullPointerException();
2738 return internalGet(key) != null;
2739 }
2740
2741 /**
2742 * Returns {@code true} if this map maps one or more keys to the
2743 * specified value. Note: This method may require a full traversal
2744 * of the map, and is much slower than method {@code containsKey}.
2745 *
2746 * @param value value whose presence in this map is to be tested
2747 * @return {@code true} if this map maps one or more keys to the
2748 * specified value
2749 * @throws NullPointerException if the specified value is null
2750 */
2751 public boolean containsValue(Object value) {
2752 if (value == null)
2753 throw new NullPointerException();
2754 Object v;
2755 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2756 while ((v = it.advance()) != null) {
2757 if (v == value || value.equals(v))
2758 return true;
2759 }
2760 return false;
2761 }
2762
2763 /**
2764 * Legacy method testing if some key maps into the specified value
2765 * in this table. This method is identical in functionality to
2766 * {@link #containsValue}, and exists solely to ensure
2767 * full compatibility with class {@link java.util.Hashtable},
2768 * which supported this method prior to introduction of the
2769 * Java Collections framework.
2770 *
2771 * @param value a value to search for
2772 * @return {@code true} if and only if some key maps to the
2773 * {@code value} argument in this table as
2774 * determined by the {@code equals} method;
2775 * {@code false} otherwise
2776 * @throws NullPointerException if the specified value is null
2777 */
2778 public boolean contains(Object value) {
2779 return containsValue(value);
2780 }
2781
2782 /**
2783 * Maps the specified key to the specified value in this table.
2784 * Neither the key nor the value can be null.
2785 *
2786 * <p> The value can be retrieved by calling the {@code get} method
2787 * with a key that is equal to the original key.
2788 *
2789 * @param key key with which the specified value is to be associated
2790 * @param value value to be associated with the specified key
2791 * @return the previous value associated with {@code key}, or
2792 * {@code null} if there was no mapping for {@code key}
2793 * @throws NullPointerException if the specified key or value is null
2794 */
2795 @SuppressWarnings("unchecked") public V put(K key, V value) {
2796 if (key == null || value == null)
2797 throw new NullPointerException();
2798 return (V)internalPut(key, value);
2799 }
2800
2801 /**
2802 * {@inheritDoc}
2803 *
2804 * @return the previous value associated with the specified key,
2805 * or {@code null} if there was no mapping for the key
2806 * @throws NullPointerException if the specified key or value is null
2807 */
2808 @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
2809 if (key == null || value == null)
2810 throw new NullPointerException();
2811 return (V)internalPutIfAbsent(key, value);
2812 }
2813
2814 /**
2815 * Copies all of the mappings from the specified map to this one.
2816 * These mappings replace any mappings that this map had for any of the
2817 * keys currently in the specified map.
2818 *
2819 * @param m mappings to be stored in this map
2820 */
2821 public void putAll(Map<? extends K, ? extends V> m) {
2822 internalPutAll(m);
2823 }
2824
2825 /**
2826 * If the specified key is not already associated with a value,
2827 * computes its value using the given mappingFunction and enters
2828 * it into the map unless null. This is equivalent to
2829 * <pre> {@code
2830 * if (map.containsKey(key))
2831 * return map.get(key);
2832 * value = mappingFunction.apply(key);
2833 * if (value != null)
2834 * map.put(key, value);
2835 * return value;}</pre>
2836 *
2837 * except that the action is performed atomically. If the
2838 * function returns {@code null} no mapping is recorded. If the
2839 * function itself throws an (unchecked) exception, the exception
2840 * is rethrown to its caller, and no mapping is recorded. Some
2841 * attempted update operations on this map by other threads may be
2842 * blocked while computation is in progress, so the computation
2843 * should be short and simple, and must not attempt to update any
2844 * other mappings of this Map. The most appropriate usage is to
2845 * construct a new object serving as an initial mapped value, or
2846 * memoized result, as in:
2847 *
2848 * <pre> {@code
2849 * map.computeIfAbsent(key, new Fun<K, V>() {
2850 * public V map(K k) { return new Value(f(k)); }});}</pre>
2851 *
2852 * @param key key with which the specified value is to be associated
2853 * @param mappingFunction the function to compute a value
2854 * @return the current (existing or computed) value associated with
2855 * the specified key, or null if the computed value is null
2856 * @throws NullPointerException if the specified key or mappingFunction
2857 * is null
2858 * @throws IllegalStateException if the computation detectably
2859 * attempts a recursive update to this map that would
2860 * otherwise never complete
2861 * @throws RuntimeException or Error if the mappingFunction does so,
2862 * in which case the mapping is left unestablished
2863 */
2864 @SuppressWarnings("unchecked") public V computeIfAbsent
2865 (K key, Fun<? super K, ? extends V> mappingFunction) {
2866 if (key == null || mappingFunction == null)
2867 throw new NullPointerException();
2868 return (V)internalComputeIfAbsent(key, mappingFunction);
2869 }
2870
2871 /**
2872 * If the given key is present, computes a new mapping value given a key and
2873 * its current mapped value. This is equivalent to
2874 * <pre> {@code
2875 * if (map.containsKey(key)) {
2876 * value = remappingFunction.apply(key, map.get(key));
2877 * if (value != null)
2878 * map.put(key, value);
2879 * else
2880 * map.remove(key);
2881 * }
2882 * }</pre>
2883 *
2884 * except that the action is performed atomically. If the
2885 * function returns {@code null}, the mapping is removed. If the
2886 * function itself throws an (unchecked) exception, the exception
2887 * is rethrown to its caller, and the current mapping is left
2888 * unchanged. Some attempted update operations on this map by
2889 * other threads may be blocked while computation is in progress,
2890 * so the computation should be short and simple, and must not
2891 * attempt to update any other mappings of this Map. For example,
2892 * to either create or append new messages to a value mapping:
2893 *
2894 * @param key key with which the specified value is to be associated
2895 * @param remappingFunction the function to compute a value
2896 * @return the new value associated with the specified key, or null if none
2897 * @throws NullPointerException if the specified key or remappingFunction
2898 * is null
2899 * @throws IllegalStateException if the computation detectably
2900 * attempts a recursive update to this map that would
2901 * otherwise never complete
2902 * @throws RuntimeException or Error if the remappingFunction does so,
2903 * in which case the mapping is unchanged
2904 */
2905 @SuppressWarnings("unchecked") public V computeIfPresent
2906 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2907 if (key == null || remappingFunction == null)
2908 throw new NullPointerException();
2909 return (V)internalCompute(key, true, remappingFunction);
2910 }
2911
2912 /**
2913 * Computes a new mapping value given a key and
2914 * its current mapped value (or {@code null} if there is no current
2915 * mapping). This is equivalent to
2916 * <pre> {@code
2917 * value = remappingFunction.apply(key, map.get(key));
2918 * if (value != null)
2919 * map.put(key, value);
2920 * else
2921 * map.remove(key);
2922 * }</pre>
2923 *
2924 * except that the action is performed atomically. If the
2925 * function returns {@code null}, the mapping is removed. If the
2926 * function itself throws an (unchecked) exception, the exception
2927 * is rethrown to its caller, and the current mapping is left
2928 * unchanged. Some attempted update operations on this map by
2929 * other threads may be blocked while computation is in progress,
2930 * so the computation should be short and simple, and must not
2931 * attempt to update any other mappings of this Map. For example,
2932 * to either create or append new messages to a value mapping:
2933 *
2934 * <pre> {@code
2935 * Map<Key, String> map = ...;
2936 * final String msg = ...;
2937 * map.compute(key, new BiFun<Key, String, String>() {
2938 * public String apply(Key k, String v) {
2939 * return (v == null) ? msg : v + msg;});}}</pre>
2940 *
2941 * @param key key with which the specified value is to be associated
2942 * @param remappingFunction the function to compute a value
2943 * @return the new value associated with the specified key, or null if none
2944 * @throws NullPointerException if the specified key or remappingFunction
2945 * is null
2946 * @throws IllegalStateException if the computation detectably
2947 * attempts a recursive update to this map that would
2948 * otherwise never complete
2949 * @throws RuntimeException or Error if the remappingFunction does so,
2950 * in which case the mapping is unchanged
2951 */
2952 @SuppressWarnings("unchecked") public V compute
2953 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2954 if (key == null || remappingFunction == null)
2955 throw new NullPointerException();
2956 return (V)internalCompute(key, false, remappingFunction);
2957 }
2958
2959 /**
2960 * If the specified key is not already associated
2961 * with a value, associate it with the given value.
2962 * Otherwise, replace the value with the results of
2963 * the given remapping function. This is equivalent to:
2964 * <pre> {@code
2965 * if (!map.containsKey(key))
2966 * map.put(value);
2967 * else {
2968 * newValue = remappingFunction.apply(map.get(key), value);
2969 * if (value != null)
2970 * map.put(key, value);
2971 * else
2972 * map.remove(key);
2973 * }
2974 * }</pre>
2975 * except that the action is performed atomically. If the
2976 * function returns {@code null}, the mapping is removed. If the
2977 * function itself throws an (unchecked) exception, the exception
2978 * is rethrown to its caller, and the current mapping is left
2979 * unchanged. Some attempted update operations on this map by
2980 * other threads may be blocked while computation is in progress,
2981 * so the computation should be short and simple, and must not
2982 * attempt to update any other mappings of this Map.
2983 */
2984 @SuppressWarnings("unchecked") public V merge
2985 (K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2986 if (key == null || value == null || remappingFunction == null)
2987 throw new NullPointerException();
2988 return (V)internalMerge(key, value, remappingFunction);
2989 }
2990
2991 /**
2992 * Removes the key (and its corresponding value) from this map.
2993 * This method does nothing if the key is not in the map.
2994 *
2995 * @param key the key that needs to be removed
2996 * @return the previous value associated with {@code key}, or
2997 * {@code null} if there was no mapping for {@code key}
2998 * @throws NullPointerException if the specified key is null
2999 */
3000 @SuppressWarnings("unchecked") public V remove(Object key) {
3001 if (key == null)
3002 throw new NullPointerException();
3003 return (V)internalReplace(key, null, null);
3004 }
3005
3006 /**
3007 * {@inheritDoc}
3008 *
3009 * @throws NullPointerException if the specified key is null
3010 */
3011 public boolean remove(Object key, Object value) {
3012 if (key == null)
3013 throw new NullPointerException();
3014 if (value == null)
3015 return false;
3016 return internalReplace(key, null, value) != null;
3017 }
3018
3019 /**
3020 * {@inheritDoc}
3021 *
3022 * @throws NullPointerException if any of the arguments are null
3023 */
3024 public boolean replace(K key, V oldValue, V newValue) {
3025 if (key == null || oldValue == null || newValue == null)
3026 throw new NullPointerException();
3027 return internalReplace(key, newValue, oldValue) != null;
3028 }
3029
3030 /**
3031 * {@inheritDoc}
3032 *
3033 * @return the previous value associated with the specified key,
3034 * or {@code null} if there was no mapping for the key
3035 * @throws NullPointerException if the specified key or value is null
3036 */
3037 @SuppressWarnings("unchecked") public V replace(K key, V value) {
3038 if (key == null || value == null)
3039 throw new NullPointerException();
3040 return (V)internalReplace(key, value, null);
3041 }
3042
3043 /**
3044 * Removes all of the mappings from this map.
3045 */
3046 public void clear() {
3047 internalClear();
3048 }
3049
3050 /**
3051 * Returns a {@link Set} view of the keys contained in this map.
3052 * The set is backed by the map, so changes to the map are
3053 * reflected in the set, and vice-versa.
3054 *
3055 * @return the set view
3056 */
3057 public KeySetView<K,V> keySet() {
3058 KeySetView<K,V> ks = keySet;
3059 return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
3060 }
3061
3062 /**
3063 * Returns a {@link Set} view of the keys in this map, using the
3064 * given common mapped value for any additions (i.e., {@link
3065 * Collection#add} and {@link Collection#addAll}). This is of
3066 * course only appropriate if it is acceptable to use the same
3067 * value for all additions from this view.
3068 *
3069 * @param mappedValue the mapped value to use for any
3070 * additions.
3071 * @return the set view
3072 * @throws NullPointerException if the mappedValue is null
3073 */
3074 public KeySetView<K,V> keySet(V mappedValue) {
3075 if (mappedValue == null)
3076 throw new NullPointerException();
3077 return new KeySetView<K,V>(this, mappedValue);
3078 }
3079
3080 /**
3081 * Returns a {@link Collection} view of the values contained in this map.
3082 * The collection is backed by the map, so changes to the map are
3083 * reflected in the collection, and vice-versa. The collection
3084 * supports element removal, which removes the corresponding
3085 * mapping from this map, via the {@code Iterator.remove},
3086 * {@code Collection.remove}, {@code removeAll},
3087 * {@code retainAll}, and {@code clear} operations. It does not
3088 * support the {@code add} or {@code addAll} operations.
3089 *
3090 * <p>The view's {@code iterator} is a "weakly consistent" iterator
3091 * that will never throw {@link ConcurrentModificationException},
3092 * and guarantees to traverse elements as they existed upon
3093 * construction of the iterator, and may (but is not guaranteed to)
3094 * reflect any modifications subsequent to construction.
3095 */
3096 public Collection<V> values() {
3097 Values<K,V> vs = values;
3098 return (vs != null) ? vs : (values = new Values<K,V>(this));
3099 }
3100
3101 /**
3102 * Returns a {@link Set} view of the mappings contained in this map.
3103 * The set is backed by the map, so changes to the map are
3104 * reflected in the set, and vice-versa. The set supports element
3105 * removal, which removes the corresponding mapping from the map,
3106 * via the {@code Iterator.remove}, {@code Set.remove},
3107 * {@code removeAll}, {@code retainAll}, and {@code clear}
3108 * operations. It does not support the {@code add} or
3109 * {@code addAll} operations.
3110 *
3111 * <p>The view's {@code iterator} is a "weakly consistent" iterator
3112 * that will never throw {@link ConcurrentModificationException},
3113 * and guarantees to traverse elements as they existed upon
3114 * construction of the iterator, and may (but is not guaranteed to)
3115 * reflect any modifications subsequent to construction.
3116 */
3117 public Set<Map.Entry<K,V>> entrySet() {
3118 EntrySet<K,V> es = entrySet;
3119 return (es != null) ? es : (entrySet = new EntrySet<K,V>(this));
3120 }
3121
3122 /**
3123 * Returns an enumeration of the keys in this table.
3124 *
3125 * @return an enumeration of the keys in this table
3126 * @see #keySet()
3127 */
3128 public Enumeration<K> keys() {
3129 return new KeyIterator<K,V>(this);
3130 }
3131
3132 /**
3133 * Returns an enumeration of the values in this table.
3134 *
3135 * @return an enumeration of the values in this table
3136 * @see #values()
3137 */
3138 public Enumeration<V> elements() {
3139 return new ValueIterator<K,V>(this);
3140 }
3141
3142 /**
3143 * Returns a partitionable iterator of the keys in this map.
3144 *
3145 * @return a partitionable iterator of the keys in this map
3146 */
3147 public Spliterator<K> keySpliterator() {
3148 return new KeyIterator<K,V>(this);
3149 }
3150
3151 /**
3152 * Returns a partitionable iterator of the values in this map.
3153 *
3154 * @return a partitionable iterator of the values in this map
3155 */
3156 public Spliterator<V> valueSpliterator() {
3157 return new ValueIterator<K,V>(this);
3158 }
3159
3160 /**
3161 * Returns a partitionable iterator of the entries in this map.
3162 *
3163 * @return a partitionable iterator of the entries in this map
3164 */
3165 public Spliterator<Map.Entry<K,V>> entrySpliterator() {
3166 return new EntryIterator<K,V>(this);
3167 }
3168
3169 /**
3170 * Returns the hash code value for this {@link Map}, i.e.,
3171 * the sum of, for each key-value pair in the map,
3172 * {@code key.hashCode() ^ value.hashCode()}.
3173 *
3174 * @return the hash code value for this map
3175 */
3176 public int hashCode() {
3177 int h = 0;
3178 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3179 Object v;
3180 while ((v = it.advance()) != null) {
3181 h += it.nextKey.hashCode() ^ v.hashCode();
3182 }
3183 return h;
3184 }
3185
3186 /**
3187 * Returns a string representation of this map. The string
3188 * representation consists of a list of key-value mappings (in no
3189 * particular order) enclosed in braces ("{@code {}}"). Adjacent
3190 * mappings are separated by the characters {@code ", "} (comma
3191 * and space). Each key-value mapping is rendered as the key
3192 * followed by an equals sign ("{@code =}") followed by the
3193 * associated value.
3194 *
3195 * @return a string representation of this map
3196 */
3197 public String toString() {
3198 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3199 StringBuilder sb = new StringBuilder();
3200 sb.append('{');
3201 Object v;
3202 if ((v = it.advance()) != null) {
3203 for (;;) {
3204 Object k = it.nextKey;
3205 sb.append(k == this ? "(this Map)" : k);
3206 sb.append('=');
3207 sb.append(v == this ? "(this Map)" : v);
3208 if ((v = it.advance()) == null)
3209 break;
3210 sb.append(',').append(' ');
3211 }
3212 }
3213 return sb.append('}').toString();
3214 }
3215
3216 /**
3217 * Compares the specified object with this map for equality.
3218 * Returns {@code true} if the given object is a map with the same
3219 * mappings as this map. This operation may return misleading
3220 * results if either map is concurrently modified during execution
3221 * of this method.
3222 *
3223 * @param o object to be compared for equality with this map
3224 * @return {@code true} if the specified object is equal to this map
3225 */
3226 public boolean equals(Object o) {
3227 if (o != this) {
3228 if (!(o instanceof Map))
3229 return false;
3230 Map<?,?> m = (Map<?,?>) o;
3231 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3232 Object val;
3233 while ((val = it.advance()) != null) {
3234 Object v = m.get(it.nextKey);
3235 if (v == null || (v != val && !v.equals(val)))
3236 return false;
3237 }
3238 for (Map.Entry<?,?> e : m.entrySet()) {
3239 Object mk, mv, v;
3240 if ((mk = e.getKey()) == null ||
3241 (mv = e.getValue()) == null ||
3242 (v = internalGet(mk)) == null ||
3243 (mv != v && !mv.equals(v)))
3244 return false;
3245 }
3246 }
3247 return true;
3248 }
3249
3250 /* ----------------Iterators -------------- */
3251
3252 @SuppressWarnings("serial") static final class KeyIterator<K,V> extends Traverser<K,V,Object>
3253 implements Spliterator<K>, Enumeration<K> {
3254 KeyIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3255 KeyIterator(Traverser<K,V,Object> it) {
3256 super(it);
3257 }
3258 public KeyIterator<K,V> split() {
3259 if (nextKey != null)
3260 throw new IllegalStateException();
3261 return new KeyIterator<K,V>(this);
3262 }
3263 @SuppressWarnings("unchecked") public final K next() {
3264 if (nextVal == null && advance() == null)
3265 throw new NoSuchElementException();
3266 Object k = nextKey;
3267 nextVal = null;
3268 return (K) k;
3269 }
3270
3271 public final K nextElement() { return next(); }
3272 }
3273
3274 @SuppressWarnings("serial") static final class ValueIterator<K,V> extends Traverser<K,V,Object>
3275 implements Spliterator<V>, Enumeration<V> {
3276 ValueIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3277 ValueIterator(Traverser<K,V,Object> it) {
3278 super(it);
3279 }
3280 public ValueIterator<K,V> split() {
3281 if (nextKey != null)
3282 throw new IllegalStateException();
3283 return new ValueIterator<K,V>(this);
3284 }
3285
3286 @SuppressWarnings("unchecked") public final V next() {
3287 Object v;
3288 if ((v = nextVal) == null && (v = advance()) == null)
3289 throw new NoSuchElementException();
3290 nextVal = null;
3291 return (V) v;
3292 }
3293
3294 public final V nextElement() { return next(); }
3295 }
3296
3297 @SuppressWarnings("serial") static final class EntryIterator<K,V> extends Traverser<K,V,Object>
3298 implements Spliterator<Map.Entry<K,V>> {
3299 EntryIterator(ConcurrentHashMapV8<K, V> map) { super(map); }
3300 EntryIterator(Traverser<K,V,Object> it) {
3301 super(it);
3302 }
3303 public EntryIterator<K,V> split() {
3304 if (nextKey != null)
3305 throw new IllegalStateException();
3306 return new EntryIterator<K,V>(this);
3307 }
3308
3309 @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3310 Object v;
3311 if ((v = nextVal) == null && (v = advance()) == null)
3312 throw new NoSuchElementException();
3313 Object k = nextKey;
3314 nextVal = null;
3315 return new MapEntry<K,V>((K)k, (V)v, map);
3316 }
3317 }
3318
3319 /**
3320 * Exported Entry for iterators
3321 */
3322 static final class MapEntry<K,V> implements Map.Entry<K, V> {
3323 final K key; // non-null
3324 V val; // non-null
3325 final ConcurrentHashMapV8<K, V> map;
3326 MapEntry(K key, V val, ConcurrentHashMapV8<K, V> map) {
3327 this.key = key;
3328 this.val = val;
3329 this.map = map;
3330 }
3331 public final K getKey() { return key; }
3332 public final V getValue() { return val; }
3333 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3334 public final String toString(){ return key + "=" + val; }
3335
3336 public final boolean equals(Object o) {
3337 Object k, v; Map.Entry<?,?> e;
3338 return ((o instanceof Map.Entry) &&
3339 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3340 (v = e.getValue()) != null &&
3341 (k == key || k.equals(key)) &&
3342 (v == val || v.equals(val)));
3343 }
3344
3345 /**
3346 * Sets our entry's value and writes through to the map. The
3347 * value to return is somewhat arbitrary here. Since we do not
3348 * necessarily track asynchronous changes, the most recent
3349 * "previous" value could be different from what we return (or
3350 * could even have been removed in which case the put will
3351 * re-establish). We do not and cannot guarantee more.
3352 */
3353 public final V setValue(V value) {
3354 if (value == null) throw new NullPointerException();
3355 V v = val;
3356 val = value;
3357 map.put(key, value);
3358 return v;
3359 }
3360 }
3361
3362 /* ----------------Views -------------- */
3363
3364 /**
3365 * Base class for views.
3366 */
3367 static abstract class CHMView<K, V> {
3368 final ConcurrentHashMapV8<K, V> map;
3369 CHMView(ConcurrentHashMapV8<K, V> map) { this.map = map; }
3370 public final int size() { return map.size(); }
3371 public final boolean isEmpty() { return map.isEmpty(); }
3372 public final void clear() { map.clear(); }
3373
3374 // implementations below rely on concrete classes supplying these
3375 abstract public Iterator<?> iterator();
3376 abstract public boolean contains(Object o);
3377 abstract public boolean remove(Object o);
3378
3379 private static final String oomeMsg = "Required array size too large";
3380
3381 public final Object[] toArray() {
3382 long sz = map.mappingCount();
3383 if (sz > (long)(MAX_ARRAY_SIZE))
3384 throw new OutOfMemoryError(oomeMsg);
3385 int n = (int)sz;
3386 Object[] r = new Object[n];
3387 int i = 0;
3388 Iterator<?> it = iterator();
3389 while (it.hasNext()) {
3390 if (i == n) {
3391 if (n >= MAX_ARRAY_SIZE)
3392 throw new OutOfMemoryError(oomeMsg);
3393 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3394 n = MAX_ARRAY_SIZE;
3395 else
3396 n += (n >>> 1) + 1;
3397 r = Arrays.copyOf(r, n);
3398 }
3399 r[i++] = it.next();
3400 }
3401 return (i == n) ? r : Arrays.copyOf(r, i);
3402 }
3403
3404 @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3405 long sz = map.mappingCount();
3406 if (sz > (long)(MAX_ARRAY_SIZE))
3407 throw new OutOfMemoryError(oomeMsg);
3408 int m = (int)sz;
3409 T[] r = (a.length >= m) ? a :
3410 (T[])java.lang.reflect.Array
3411 .newInstance(a.getClass().getComponentType(), m);
3412 int n = r.length;
3413 int i = 0;
3414 Iterator<?> it = iterator();
3415 while (it.hasNext()) {
3416 if (i == n) {
3417 if (n >= MAX_ARRAY_SIZE)
3418 throw new OutOfMemoryError(oomeMsg);
3419 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3420 n = MAX_ARRAY_SIZE;
3421 else
3422 n += (n >>> 1) + 1;
3423 r = Arrays.copyOf(r, n);
3424 }
3425 r[i++] = (T)it.next();
3426 }
3427 if (a == r && i < n) {
3428 r[i] = null; // null-terminate
3429 return r;
3430 }
3431 return (i == n) ? r : Arrays.copyOf(r, i);
3432 }
3433
3434 public final int hashCode() {
3435 int h = 0;
3436 for (Iterator<?> it = iterator(); it.hasNext();)
3437 h += it.next().hashCode();
3438 return h;
3439 }
3440
3441 public final String toString() {
3442 StringBuilder sb = new StringBuilder();
3443 sb.append('[');
3444 Iterator<?> it = iterator();
3445 if (it.hasNext()) {
3446 for (;;) {
3447 Object e = it.next();
3448 sb.append(e == this ? "(this Collection)" : e);
3449 if (!it.hasNext())
3450 break;
3451 sb.append(',').append(' ');
3452 }
3453 }
3454 return sb.append(']').toString();
3455 }
3456
3457 public final boolean containsAll(Collection<?> c) {
3458 if (c != this) {
3459 for (Iterator<?> it = c.iterator(); it.hasNext();) {
3460 Object e = it.next();
3461 if (e == null || !contains(e))
3462 return false;
3463 }
3464 }
3465 return true;
3466 }
3467
3468 public final boolean removeAll(Collection<?> c) {
3469 boolean modified = false;
3470 for (Iterator<?> it = iterator(); it.hasNext();) {
3471 if (c.contains(it.next())) {
3472 it.remove();
3473 modified = true;
3474 }
3475 }
3476 return modified;
3477 }
3478
3479 public final boolean retainAll(Collection<?> c) {
3480 boolean modified = false;
3481 for (Iterator<?> it = iterator(); it.hasNext();) {
3482 if (!c.contains(it.next())) {
3483 it.remove();
3484 modified = true;
3485 }
3486 }
3487 return modified;
3488 }
3489
3490 }
3491
3492 static final class Values<K,V> extends CHMView<K,V>
3493 implements Collection<V> {
3494 Values(ConcurrentHashMapV8<K, V> map) { super(map); }
3495 public final boolean contains(Object o) { return map.containsValue(o); }
3496 public final boolean remove(Object o) {
3497 if (o != null) {
3498 Iterator<V> it = new ValueIterator<K,V>(map);
3499 while (it.hasNext()) {
3500 if (o.equals(it.next())) {
3501 it.remove();
3502 return true;
3503 }
3504 }
3505 }
3506 return false;
3507 }
3508 public final Iterator<V> iterator() {
3509 return new ValueIterator<K,V>(map);
3510 }
3511 public final boolean add(V e) {
3512 throw new UnsupportedOperationException();
3513 }
3514 public final boolean addAll(Collection<? extends V> c) {
3515 throw new UnsupportedOperationException();
3516 }
3517
3518 }
3519
3520 static final class EntrySet<K,V> extends CHMView<K,V>
3521 implements Set<Map.Entry<K,V>> {
3522 EntrySet(ConcurrentHashMapV8<K, V> map) { super(map); }
3523 public final boolean contains(Object o) {
3524 Object k, v, r; Map.Entry<?,?> e;
3525 return ((o instanceof Map.Entry) &&
3526 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3527 (r = map.get(k)) != null &&
3528 (v = e.getValue()) != null &&
3529 (v == r || v.equals(r)));
3530 }
3531 public final boolean remove(Object o) {
3532 Object k, v; Map.Entry<?,?> e;
3533 return ((o instanceof Map.Entry) &&
3534 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3535 (v = e.getValue()) != null &&
3536 map.remove(k, v));
3537 }
3538 public final Iterator<Map.Entry<K,V>> iterator() {
3539 return new EntryIterator<K,V>(map);
3540 }
3541 public final boolean add(Entry<K,V> e) {
3542 throw new UnsupportedOperationException();
3543 }
3544 public final boolean addAll(Collection<? extends Entry<K,V>> c) {
3545 throw new UnsupportedOperationException();
3546 }
3547 public boolean equals(Object o) {
3548 Set<?> c;
3549 return ((o instanceof Set) &&
3550 ((c = (Set<?>)o) == this ||
3551 (containsAll(c) && c.containsAll(this))));
3552 }
3553 }
3554
3555 /* ---------------- Serialization Support -------------- */
3556
3557 /**
3558 * Stripped-down version of helper class used in previous version,
3559 * declared for the sake of serialization compatibility
3560 */
3561 static class Segment<K,V> implements Serializable {
3562 private static final long serialVersionUID = 2249069246763182397L;
3563 final float loadFactor;
3564 Segment(float lf) { this.loadFactor = lf; }
3565 }
3566
3567 /**
3568 * Saves the state of the {@code ConcurrentHashMapV8} instance to a
3569 * stream (i.e., serializes it).
3570 * @param s the stream
3571 * @serialData
3572 * the key (Object) and value (Object)
3573 * for each key-value mapping, followed by a null pair.
3574 * The key-value mappings are emitted in no particular order.
3575 */
3576 @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s)
3577 throws java.io.IOException {
3578 if (segments == null) { // for serialization compatibility
3579 segments = (Segment<K,V>[])
3580 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3581 for (int i = 0; i < segments.length; ++i)
3582 segments[i] = new Segment<K,V>(LOAD_FACTOR);
3583 }
3584 s.defaultWriteObject();
3585 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3586 Object v;
3587 while ((v = it.advance()) != null) {
3588 s.writeObject(it.nextKey);
3589 s.writeObject(v);
3590 }
3591 s.writeObject(null);
3592 s.writeObject(null);
3593 segments = null; // throw away
3594 }
3595
3596 /**
3597 * Reconstitutes the instance from a stream (that is, deserializes it).
3598 * @param s the stream
3599 */
3600 @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s)
3601 throws java.io.IOException, ClassNotFoundException {
3602 s.defaultReadObject();
3603 this.segments = null; // unneeded
3604 // initialize transient final field
3605 UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
3606
3607 // Create all nodes, then place in table once size is known
3608 long size = 0L;
3609 Node p = null;
3610 for (;;) {
3611 K k = (K) s.readObject();
3612 V v = (V) s.readObject();
3613 if (k != null && v != null) {
3614 int h = spread(k.hashCode());
3615 p = new Node(h, k, v, p);
3616 ++size;
3617 }
3618 else
3619 break;
3620 }
3621 if (p != null) {
3622 boolean init = false;
3623 int n;
3624 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3625 n = MAXIMUM_CAPACITY;
3626 else {
3627 int sz = (int)size;
3628 n = tableSizeFor(sz + (sz >>> 1) + 1);
3629 }
3630 int sc = sizeCtl;
3631 boolean collide = false;
3632 if (n > sc &&
3633 UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
3634 try {
3635 if (table == null) {
3636 init = true;
3637 Node[] tab = new Node[n];
3638 int mask = n - 1;
3639 while (p != null) {
3640 int j = p.hash & mask;
3641 Node next = p.next;
3642 Node q = p.next = tabAt(tab, j);
3643 setTabAt(tab, j, p);
3644 if (!collide && q != null && q.hash == p.hash)
3645 collide = true;
3646 p = next;
3647 }
3648 table = tab;
3649 counter.add(size);
3650 sc = n - (n >>> 2);
3651 }
3652 } finally {
3653 sizeCtl = sc;
3654 }
3655 if (collide) { // rescan and convert to TreeBins
3656 Node[] tab = table;
3657 for (int i = 0; i < tab.length; ++i) {
3658 int c = 0;
3659 for (Node e = tabAt(tab, i); e != null; e = e.next) {
3660 if (++c > TREE_THRESHOLD &&
3661 (e.key instanceof Comparable)) {
3662 replaceWithTreeBin(tab, i, e.key);
3663 break;
3664 }
3665 }
3666 }
3667 }
3668 }
3669 if (!init) { // Can only happen if unsafely published.
3670 while (p != null) {
3671 internalPut(p.key, p.val);
3672 p = p.next;
3673 }
3674 }
3675 }
3676 }
3677
3678
3679 // -------------------------------------------------------
3680
3681 // Sams
3682 /** Interface describing a void action of one argument */
3683 public interface Action<A> { void apply(A a); }
3684 /** Interface describing a void action of two arguments */
3685 public interface BiAction<A,B> { void apply(A a, B b); }
3686 /** Interface describing a function of one argument */
3687 public interface Fun<A,T> { T apply(A a); }
3688 /** Interface describing a function of two arguments */
3689 public interface BiFun<A,B,T> { T apply(A a, B b); }
3690 /** Interface describing a function of no arguments */
3691 public interface Generator<T> { T apply(); }
3692 /** Interface describing a function mapping its argument to a double */
3693 public interface ObjectToDouble<A> { double apply(A a); }
3694 /** Interface describing a function mapping its argument to a long */
3695 public interface ObjectToLong<A> { long apply(A a); }
3696 /** Interface describing a function mapping its argument to an int */
3697 public interface ObjectToInt<A> {int apply(A a); }
3698 /** Interface describing a function mapping two arguments to a double */
3699 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3700 /** Interface describing a function mapping two arguments to a long */
3701 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3702 /** Interface describing a function mapping two arguments to an int */
3703 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3704 /** Interface describing a function mapping a double to a double */
3705 public interface DoubleToDouble { double apply(double a); }
3706 /** Interface describing a function mapping a long to a long */
3707 public interface LongToLong { long apply(long a); }
3708 /** Interface describing a function mapping an int to an int */
3709 public interface IntToInt { int apply(int a); }
3710 /** Interface describing a function mapping two doubles to a double */
3711 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3712 /** Interface describing a function mapping two longs to a long */
3713 public interface LongByLongToLong { long apply(long a, long b); }
3714 /** Interface describing a function mapping two ints to an int */
3715 public interface IntByIntToInt { int apply(int a, int b); }
3716
3717
3718 // -------------------------------------------------------
3719
3720 /**
3721 * Performs the given action for each (key, value).
3722 *
3723 * @param action the action
3724 */
3725 public void forEach(BiAction<K,V> action) {
3726 ForkJoinTasks.forEach
3727 (this, action).invoke();
3728 }
3729
3730 /**
3731 * Performs the given action for each non-null transformation
3732 * of each (key, value).
3733 *
3734 * @param transformer a function returning the transformation
3735 * for an element, or null of there is no transformation (in
3736 * which case the action is not applied).
3737 * @param action the action
3738 */
3739 public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3740 Action<U> action) {
3741 ForkJoinTasks.forEach
3742 (this, transformer, action).invoke();
3743 }
3744
3745 /**
3746 * Returns a non-null result from applying the given search
3747 * function on each (key, value), or null if none. Upon
3748 * success, further element processing is suppressed and the
3749 * results of any other parallel invocations of the search
3750 * function are ignored.
3751 *
3752 * @param searchFunction a function returning a non-null
3753 * result on success, else null
3754 * @return a non-null result from applying the given search
3755 * function on each (key, value), or null if none
3756 */
3757 public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3758 return ForkJoinTasks.search
3759 (this, searchFunction).invoke();
3760 }
3761
3762 /**
3763 * Returns the result of accumulating the given transformation
3764 * of all (key, value) pairs using the given reducer to
3765 * combine values, or null if none.
3766 *
3767 * @param transformer a function returning the transformation
3768 * for an element, or null of there is no transformation (in
3769 * which case it is not combined).
3770 * @param reducer a commutative associative combining function
3771 * @return the result of accumulating the given transformation
3772 * of all (key, value) pairs
3773 */
3774 public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3775 BiFun<? super U, ? super U, ? extends U> reducer) {
3776 return ForkJoinTasks.reduce
3777 (this, transformer, reducer).invoke();
3778 }
3779
3780 /**
3781 * Returns the result of accumulating the given transformation
3782 * of all (key, value) pairs using the given reducer to
3783 * combine values, and the given basis as an identity value.
3784 *
3785 * @param transformer a function returning the transformation
3786 * for an element
3787 * @param basis the identity (initial default value) for the reduction
3788 * @param reducer a commutative associative combining function
3789 * @return the result of accumulating the given transformation
3790 * of all (key, value) pairs
3791 */
3792 public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3793 double basis,
3794 DoubleByDoubleToDouble reducer) {
3795 return ForkJoinTasks.reduceToDouble
3796 (this, transformer, basis, reducer).invoke();
3797 }
3798
3799 /**
3800 * Returns the result of accumulating the given transformation
3801 * of all (key, value) pairs using the given reducer to
3802 * combine values, and the given basis as an identity value.
3803 *
3804 * @param transformer a function returning the transformation
3805 * for an element
3806 * @param basis the identity (initial default value) for the reduction
3807 * @param reducer a commutative associative combining function
3808 * @return the result of accumulating the given transformation
3809 * of all (key, value) pairs
3810 */
3811 public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3812 long basis,
3813 LongByLongToLong reducer) {
3814 return ForkJoinTasks.reduceToLong
3815 (this, transformer, basis, reducer).invoke();
3816 }
3817
3818 /**
3819 * Returns the result of accumulating the given transformation
3820 * of all (key, value) pairs using the given reducer to
3821 * combine values, and the given basis as an identity value.
3822 *
3823 * @param transformer a function returning the transformation
3824 * for an element
3825 * @param basis the identity (initial default value) for the reduction
3826 * @param reducer a commutative associative combining function
3827 * @return the result of accumulating the given transformation
3828 * of all (key, value) pairs
3829 */
3830 public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3831 int basis,
3832 IntByIntToInt reducer) {
3833 return ForkJoinTasks.reduceToInt
3834 (this, transformer, basis, reducer).invoke();
3835 }
3836
3837 /**
3838 * Performs the given action for each key.
3839 *
3840 * @param action the action
3841 */
3842 public void forEachKey(Action<K> action) {
3843 ForkJoinTasks.forEachKey
3844 (this, action).invoke();
3845 }
3846
3847 /**
3848 * Performs the given action for each non-null transformation
3849 * of each key.
3850 *
3851 * @param transformer a function returning the transformation
3852 * for an element, or null of there is no transformation (in
3853 * which case the action is not applied).
3854 * @param action the action
3855 */
3856 public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3857 Action<U> action) {
3858 ForkJoinTasks.forEachKey
3859 (this, transformer, action).invoke();
3860 }
3861
3862 /**
3863 * Returns the result of accumulating all keys using the given
3864 * reducer to combine values, or null if none.
3865 *
3866 * @param reducer a commutative associative combining function
3867 * @return the result of accumulating all keys using the given
3868 * reducer to combine values, or null if none
3869 */
3870 public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3871 return ForkJoinTasks.reduceKeys
3872 (this, reducer).invoke();
3873 }
3874
3875 /**
3876 * Returns the result of accumulating the given transformation
3877 * of all keys using the given reducer to combine values, or
3878 * null if none.
3879 *
3880 * @param transformer a function returning the transformation
3881 * for an element, or null of there is no transformation (in
3882 * which case it is not combined).
3883 * @param reducer a commutative associative combining function
3884 * @return the result of accumulating the given transformation
3885 * of all keys
3886 */
3887 public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3888 BiFun<? super U, ? super U, ? extends U> reducer) {
3889 return ForkJoinTasks.reduceKeys
3890 (this, transformer, reducer).invoke();
3891 }
3892
3893 /**
3894 * Returns the result of accumulating the given transformation
3895 * of all keys using the given reducer to combine values, and
3896 * the given basis as an identity value.
3897 *
3898 * @param transformer a function returning the transformation
3899 * for an element
3900 * @param basis the identity (initial default value) for the reduction
3901 * @param reducer a commutative associative combining function
3902 * @return the result of accumulating the given transformation
3903 * of all keys
3904 */
3905 public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3906 double basis,
3907 DoubleByDoubleToDouble reducer) {
3908 return ForkJoinTasks.reduceKeysToDouble
3909 (this, transformer, basis, reducer).invoke();
3910 }
3911
3912 /**
3913 * Returns the result of accumulating the given transformation
3914 * of all keys using the given reducer to combine values, and
3915 * the given basis as an identity value.
3916 *
3917 * @param transformer a function returning the transformation
3918 * for an element
3919 * @param basis the identity (initial default value) for the reduction
3920 * @param reducer a commutative associative combining function
3921 * @return the result of accumulating the given transformation
3922 * of all keys
3923 */
3924 public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3925 long basis,
3926 LongByLongToLong reducer) {
3927 return ForkJoinTasks.reduceKeysToLong
3928 (this, transformer, basis, reducer).invoke();
3929 }
3930
3931 /**
3932 * Returns the result of accumulating the given transformation
3933 * of all keys using the given reducer to combine values, and
3934 * the given basis as an identity value.
3935 *
3936 * @param transformer a function returning the transformation
3937 * for an element
3938 * @param basis the identity (initial default value) for the reduction
3939 * @param reducer a commutative associative combining function
3940 * @return the result of accumulating the given transformation
3941 * of all keys
3942 */
3943 public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3944 int basis,
3945 IntByIntToInt reducer) {
3946 return ForkJoinTasks.reduceKeysToInt
3947 (this, transformer, basis, reducer).invoke();
3948 }
3949
3950 /**
3951 * Performs the given action for each value.
3952 *
3953 * @param action the action
3954 */
3955 public void forEachValue(Action<V> action) {
3956 ForkJoinTasks.forEachValue
3957 (this, action).invoke();
3958 }
3959
3960 /**
3961 * Performs the given action for each non-null transformation
3962 * of each value.
3963 *
3964 * @param transformer a function returning the transformation
3965 * for an element, or null of there is no transformation (in
3966 * which case the action is not applied).
3967 */
3968 public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3969 Action<U> action) {
3970 ForkJoinTasks.forEachValue
3971 (this, transformer, action).invoke();
3972 }
3973
3974 /**
3975 * Returns a non-null result from applying the given search
3976 * function on each value, or null if none. Upon success,
3977 * further element processing is suppressed and the results of
3978 * any other parallel invocations of the search function are
3979 * ignored.
3980 *
3981 * @param searchFunction a function returning a non-null
3982 * result on success, else null
3983 * @return a non-null result from applying the given search
3984 * function on each value, or null if none
3985 *
3986 */
3987 public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
3988 return ForkJoinTasks.searchValues
3989 (this, searchFunction).invoke();
3990 }
3991
3992 /**
3993 * Returns the result of accumulating all values using the
3994 * given reducer to combine values, or null if none.
3995 *
3996 * @param reducer a commutative associative combining function
3997 * @return the result of accumulating all values
3998 */
3999 public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
4000 return ForkJoinTasks.reduceValues
4001 (this, reducer).invoke();
4002 }
4003
4004 /**
4005 * Returns the result of accumulating the given transformation
4006 * of all values using the given reducer to combine values, or
4007 * null if none.
4008 *
4009 * @param transformer a function returning the transformation
4010 * for an element, or null of there is no transformation (in
4011 * which case it is not combined).
4012 * @param reducer a commutative associative combining function
4013 * @return the result of accumulating the given transformation
4014 * of all values
4015 */
4016 public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
4017 BiFun<? super U, ? super U, ? extends U> reducer) {
4018 return ForkJoinTasks.reduceValues
4019 (this, transformer, reducer).invoke();
4020 }
4021
4022 /**
4023 * Returns the result of accumulating the given transformation
4024 * of all values using the given reducer to combine values,
4025 * and the given basis as an identity value.
4026 *
4027 * @param transformer a function returning the transformation
4028 * for an element
4029 * @param basis the identity (initial default value) for the reduction
4030 * @param reducer a commutative associative combining function
4031 * @return the result of accumulating the given transformation
4032 * of all values
4033 */
4034 public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
4035 double basis,
4036 DoubleByDoubleToDouble reducer) {
4037 return ForkJoinTasks.reduceValuesToDouble
4038 (this, transformer, basis, reducer).invoke();
4039 }
4040
4041 /**
4042 * Returns the result of accumulating the given transformation
4043 * of all values using the given reducer to combine values,
4044 * and the given basis as an identity value.
4045 *
4046 * @param transformer a function returning the transformation
4047 * for an element
4048 * @param basis the identity (initial default value) for the reduction
4049 * @param reducer a commutative associative combining function
4050 * @return the result of accumulating the given transformation
4051 * of all values
4052 */
4053 public long reduceValuesToLong(ObjectToLong<? super V> transformer,
4054 long basis,
4055 LongByLongToLong reducer) {
4056 return ForkJoinTasks.reduceValuesToLong
4057 (this, transformer, basis, reducer).invoke();
4058 }
4059
4060 /**
4061 * Returns the result of accumulating the given transformation
4062 * of all values using the given reducer to combine values,
4063 * and the given basis as an identity value.
4064 *
4065 * @param transformer a function returning the transformation
4066 * for an element
4067 * @param basis the identity (initial default value) for the reduction
4068 * @param reducer a commutative associative combining function
4069 * @return the result of accumulating the given transformation
4070 * of all values
4071 */
4072 public int reduceValuesToInt(ObjectToInt<? super V> transformer,
4073 int basis,
4074 IntByIntToInt reducer) {
4075 return ForkJoinTasks.reduceValuesToInt
4076 (this, transformer, basis, reducer).invoke();
4077 }
4078
4079 /**
4080 * Performs the given action for each entry.
4081 *
4082 * @param action the action
4083 */
4084 public void forEachEntry(Action<Map.Entry<K,V>> action) {
4085 ForkJoinTasks.forEachEntry
4086 (this, action).invoke();
4087 }
4088
4089 /**
4090 * Performs the given action for each non-null transformation
4091 * of each entry.
4092 *
4093 * @param transformer a function returning the transformation
4094 * for an element, or null of there is no transformation (in
4095 * which case the action is not applied).
4096 * @param action the action
4097 */
4098 public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
4099 Action<U> action) {
4100 ForkJoinTasks.forEachEntry
4101 (this, transformer, action).invoke();
4102 }
4103
4104 /**
4105 * Returns a non-null result from applying the given search
4106 * function on each entry, or null if none. Upon success,
4107 * further element processing is suppressed and the results of
4108 * any other parallel invocations of the search function are
4109 * ignored.
4110 *
4111 * @param searchFunction a function returning a non-null
4112 * result on success, else null
4113 * @return a non-null result from applying the given search
4114 * function on each entry, or null if none
4115 */
4116 public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4117 return ForkJoinTasks.searchEntries
4118 (this, searchFunction).invoke();
4119 }
4120
4121 /**
4122 * Returns the result of accumulating all entries using the
4123 * given reducer to combine values, or null if none.
4124 *
4125 * @param reducer a commutative associative combining function
4126 * @return the result of accumulating all entries
4127 */
4128 public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4129 return ForkJoinTasks.reduceEntries
4130 (this, reducer).invoke();
4131 }
4132
4133 /**
4134 * Returns the result of accumulating the given transformation
4135 * of all entries using the given reducer to combine values,
4136 * or null if none.
4137 *
4138 * @param transformer a function returning the transformation
4139 * for an element, or null of there is no transformation (in
4140 * which case it is not combined).
4141 * @param reducer a commutative associative combining function
4142 * @return the result of accumulating the given transformation
4143 * of all entries
4144 */
4145 public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
4146 BiFun<? super U, ? super U, ? extends U> reducer) {
4147 return ForkJoinTasks.reduceEntries
4148 (this, transformer, reducer).invoke();
4149 }
4150
4151 /**
4152 * Returns the result of accumulating the given transformation
4153 * of all entries using the given reducer to combine values,
4154 * and the given basis as an identity value.
4155 *
4156 * @param transformer a function returning the transformation
4157 * for an element
4158 * @param basis the identity (initial default value) for the reduction
4159 * @param reducer a commutative associative combining function
4160 * @return the result of accumulating the given transformation
4161 * of all entries
4162 */
4163 public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4164 double basis,
4165 DoubleByDoubleToDouble reducer) {
4166 return ForkJoinTasks.reduceEntriesToDouble
4167 (this, transformer, basis, reducer).invoke();
4168 }
4169
4170 /**
4171 * Returns the result of accumulating the given transformation
4172 * of all entries using the given reducer to combine values,
4173 * and the given basis as an identity value.
4174 *
4175 * @param transformer a function returning the transformation
4176 * for an element
4177 * @param basis the identity (initial default value) for the reduction
4178 * @param reducer a commutative associative combining function
4179 * @return the result of accumulating the given transformation
4180 * of all entries
4181 */
4182 public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4183 long basis,
4184 LongByLongToLong reducer) {
4185 return ForkJoinTasks.reduceEntriesToLong
4186 (this, transformer, basis, reducer).invoke();
4187 }
4188
4189 /**
4190 * Returns the result of accumulating the given transformation
4191 * of all entries using the given reducer to combine values,
4192 * and the given basis as an identity value.
4193 *
4194 * @param transformer a function returning the transformation
4195 * for an element
4196 * @param basis the identity (initial default value) for the reduction
4197 * @param reducer a commutative associative combining function
4198 * @return the result of accumulating the given transformation
4199 * of all entries
4200 */
4201 public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4202 int basis,
4203 IntByIntToInt reducer) {
4204 return ForkJoinTasks.reduceEntriesToInt
4205 (this, transformer, basis, reducer).invoke();
4206 }
4207
4208 // ---------------------------------------------------------------------
4209
4210 /**
4211 * Predefined tasks for performing bulk parallel operations on
4212 * ConcurrentHashMapV8s. These tasks follow the forms and rules used
4213 * for bulk operations. Each method has the same name, but returns
4214 * a task rather than invoking it. These methods may be useful in
4215 * custom applications such as submitting a task without waiting
4216 * for completion, using a custom pool, or combining with other
4217 * tasks.
4218 */
4219 public static class ForkJoinTasks {
4220 private ForkJoinTasks() {}
4221
4222 /**
4223 * Returns a task that when invoked, performs the given
4224 * action for each (key, value)
4225 *
4226 * @param map the map
4227 * @param action the action
4228 * @return the task
4229 */
4230 public static <K,V> ForkJoinTask<Void> forEach
4231 (ConcurrentHashMapV8<K,V> map,
4232 BiAction<K,V> action) {
4233 if (action == null) throw new NullPointerException();
4234 return new ForEachMappingTask<K,V>(map, null, -1, null, action);
4235 }
4236
4237 /**
4238 * Returns a task that when invoked, performs the given
4239 * action for each non-null transformation of each (key, value)
4240 *
4241 * @param map the map
4242 * @param transformer a function returning the transformation
4243 * for an element, or null if there is no transformation (in
4244 * which case the action is not applied)
4245 * @param action the action
4246 * @return the task
4247 */
4248 public static <K,V,U> ForkJoinTask<Void> forEach
4249 (ConcurrentHashMapV8<K,V> map,
4250 BiFun<? super K, ? super V, ? extends U> transformer,
4251 Action<U> action) {
4252 if (transformer == null || action == null)
4253 throw new NullPointerException();
4254 return new ForEachTransformedMappingTask<K,V,U>
4255 (map, null, -1, null, transformer, action);
4256 }
4257
4258 /**
4259 * Returns a task that when invoked, returns a non-null result
4260 * from applying the given search function on each (key,
4261 * value), or null if none. Upon success, further element
4262 * processing is suppressed and the results of any other
4263 * parallel invocations of the search function are ignored.
4264 *
4265 * @param map the map
4266 * @param searchFunction a function returning a non-null
4267 * result on success, else null
4268 * @return the task
4269 */
4270 public static <K,V,U> ForkJoinTask<U> search
4271 (ConcurrentHashMapV8<K,V> map,
4272 BiFun<? super K, ? super V, ? extends U> searchFunction) {
4273 if (searchFunction == null) throw new NullPointerException();
4274 return new SearchMappingsTask<K,V,U>
4275 (map, null, -1, null, searchFunction,
4276 new AtomicReference<U>());
4277 }
4278
4279 /**
4280 * Returns a task that when invoked, returns the result of
4281 * accumulating the given transformation of all (key, value) pairs
4282 * using the given reducer to combine values, or null if none.
4283 *
4284 * @param map the map
4285 * @param transformer a function returning the transformation
4286 * for an element, or null if there is no transformation (in
4287 * which case it is not combined).
4288 * @param reducer a commutative associative combining function
4289 * @return the task
4290 */
4291 public static <K,V,U> ForkJoinTask<U> reduce
4292 (ConcurrentHashMapV8<K,V> map,
4293 BiFun<? super K, ? super V, ? extends U> transformer,
4294 BiFun<? super U, ? super U, ? extends U> reducer) {
4295 if (transformer == null || reducer == null)
4296 throw new NullPointerException();
4297 return new MapReduceMappingsTask<K,V,U>
4298 (map, null, -1, null, transformer, reducer);
4299 }
4300
4301 /**
4302 * Returns a task that when invoked, returns the result of
4303 * accumulating the given transformation of all (key, value) pairs
4304 * using the given reducer to combine values, and the given
4305 * basis as an identity value.
4306 *
4307 * @param map the map
4308 * @param transformer a function returning the transformation
4309 * for an element
4310 * @param basis the identity (initial default value) for the reduction
4311 * @param reducer a commutative associative combining function
4312 * @return the task
4313 */
4314 public static <K,V> ForkJoinTask<Double> reduceToDouble
4315 (ConcurrentHashMapV8<K,V> map,
4316 ObjectByObjectToDouble<? super K, ? super V> transformer,
4317 double basis,
4318 DoubleByDoubleToDouble reducer) {
4319 if (transformer == null || reducer == null)
4320 throw new NullPointerException();
4321 return new MapReduceMappingsToDoubleTask<K,V>
4322 (map, null, -1, null, transformer, basis, reducer);
4323 }
4324
4325 /**
4326 * Returns a task that when invoked, returns the result of
4327 * accumulating the given transformation of all (key, value) pairs
4328 * using the given reducer to combine values, and the given
4329 * basis as an identity value.
4330 *
4331 * @param map the map
4332 * @param transformer a function returning the transformation
4333 * for an element
4334 * @param basis the identity (initial default value) for the reduction
4335 * @param reducer a commutative associative combining function
4336 * @return the task
4337 */
4338 public static <K,V> ForkJoinTask<Long> reduceToLong
4339 (ConcurrentHashMapV8<K,V> map,
4340 ObjectByObjectToLong<? super K, ? super V> transformer,
4341 long basis,
4342 LongByLongToLong reducer) {
4343 if (transformer == null || reducer == null)
4344 throw new NullPointerException();
4345 return new MapReduceMappingsToLongTask<K,V>
4346 (map, null, -1, null, transformer, basis, reducer);
4347 }
4348
4349 /**
4350 * Returns a task that when invoked, returns the result of
4351 * accumulating the given transformation of all (key, value) pairs
4352 * using the given reducer to combine values, and the given
4353 * basis as an identity value.
4354 *
4355 * @param transformer a function returning the transformation
4356 * for an element
4357 * @param basis the identity (initial default value) for the reduction
4358 * @param reducer a commutative associative combining function
4359 * @return the task
4360 */
4361 public static <K,V> ForkJoinTask<Integer> reduceToInt
4362 (ConcurrentHashMapV8<K,V> map,
4363 ObjectByObjectToInt<? super K, ? super V> transformer,
4364 int basis,
4365 IntByIntToInt reducer) {
4366 if (transformer == null || reducer == null)
4367 throw new NullPointerException();
4368 return new MapReduceMappingsToIntTask<K,V>
4369 (map, null, -1, null, transformer, basis, reducer);
4370 }
4371
4372 /**
4373 * Returns a task that when invoked, performs the given action
4374 * for each key.
4375 *
4376 * @param map the map
4377 * @param action the action
4378 * @return the task
4379 */
4380 public static <K,V> ForkJoinTask<Void> forEachKey
4381 (ConcurrentHashMapV8<K,V> map,
4382 Action<K> action) {
4383 if (action == null) throw new NullPointerException();
4384 return new ForEachKeyTask<K,V>(map, null, -1, null, action);
4385 }
4386
4387 /**
4388 * Returns a task that when invoked, performs the given action
4389 * for each non-null transformation of each key.
4390 *
4391 * @param map the map
4392 * @param transformer a function returning the transformation
4393 * for an element, or null if there is no transformation (in
4394 * which case the action is not applied)
4395 * @param action the action
4396 * @return the task
4397 */
4398 public static <K,V,U> ForkJoinTask<Void> forEachKey
4399 (ConcurrentHashMapV8<K,V> map,
4400 Fun<? super K, ? extends U> transformer,
4401 Action<U> action) {
4402 if (transformer == null || action == null)
4403 throw new NullPointerException();
4404 return new ForEachTransformedKeyTask<K,V,U>
4405 (map, null, -1, null, transformer, action);
4406 }
4407
4408 /**
4409 * Returns a task that when invoked, returns a non-null result
4410 * from applying the given search function on each key, or
4411 * null if none. Upon success, further element processing is
4412 * suppressed and the results of any other parallel
4413 * invocations of the search function are ignored.
4414 *
4415 * @param map the map
4416 * @param searchFunction a function returning a non-null
4417 * result on success, else null
4418 * @return the task
4419 */
4420 public static <K,V,U> ForkJoinTask<U> searchKeys
4421 (ConcurrentHashMapV8<K,V> map,
4422 Fun<? super K, ? extends U> searchFunction) {
4423 if (searchFunction == null) throw new NullPointerException();
4424 return new SearchKeysTask<K,V,U>
4425 (map, null, -1, null, searchFunction,
4426 new AtomicReference<U>());
4427 }
4428
4429 /**
4430 * Returns a task that when invoked, returns the result of
4431 * accumulating all keys using the given reducer to combine
4432 * values, or null if none.
4433 *
4434 * @param map the map
4435 * @param reducer a commutative associative combining function
4436 * @return the task
4437 */
4438 public static <K,V> ForkJoinTask<K> reduceKeys
4439 (ConcurrentHashMapV8<K,V> map,
4440 BiFun<? super K, ? super K, ? extends K> reducer) {
4441 if (reducer == null) throw new NullPointerException();
4442 return new ReduceKeysTask<K,V>
4443 (map, null, -1, null, reducer);
4444 }
4445
4446 /**
4447 * Returns a task that when invoked, returns the result of
4448 * accumulating the given transformation of all keys using the given
4449 * reducer to combine values, or null if none.
4450 *
4451 * @param map the map
4452 * @param transformer a function returning the transformation
4453 * for an element, or null if there is no transformation (in
4454 * which case it is not combined).
4455 * @param reducer a commutative associative combining function
4456 * @return the task
4457 */
4458 public static <K,V,U> ForkJoinTask<U> reduceKeys
4459 (ConcurrentHashMapV8<K,V> map,
4460 Fun<? super K, ? extends U> transformer,
4461 BiFun<? super U, ? super U, ? extends U> reducer) {
4462 if (transformer == null || reducer == null)
4463 throw new NullPointerException();
4464 return new MapReduceKeysTask<K,V,U>
4465 (map, null, -1, null, transformer, reducer);
4466 }
4467
4468 /**
4469 * Returns a task that when invoked, returns the result of
4470 * accumulating the given transformation of all keys using the given
4471 * reducer to combine values, and the given basis as an
4472 * identity value.
4473 *
4474 * @param map the map
4475 * @param transformer a function returning the transformation
4476 * for an element
4477 * @param basis the identity (initial default value) for the reduction
4478 * @param reducer a commutative associative combining function
4479 * @return the task
4480 */
4481 public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4482 (ConcurrentHashMapV8<K,V> map,
4483 ObjectToDouble<? super K> transformer,
4484 double basis,
4485 DoubleByDoubleToDouble reducer) {
4486 if (transformer == null || reducer == null)
4487 throw new NullPointerException();
4488 return new MapReduceKeysToDoubleTask<K,V>
4489 (map, null, -1, null, transformer, basis, reducer);
4490 }
4491
4492 /**
4493 * Returns a task that when invoked, returns the result of
4494 * accumulating the given transformation of all keys using the given
4495 * reducer to combine values, and the given basis as an
4496 * identity value.
4497 *
4498 * @param map the map
4499 * @param transformer a function returning the transformation
4500 * for an element
4501 * @param basis the identity (initial default value) for the reduction
4502 * @param reducer a commutative associative combining function
4503 * @return the task
4504 */
4505 public static <K,V> ForkJoinTask<Long> reduceKeysToLong
4506 (ConcurrentHashMapV8<K,V> map,
4507 ObjectToLong<? super K> transformer,
4508 long basis,
4509 LongByLongToLong reducer) {
4510 if (transformer == null || reducer == null)
4511 throw new NullPointerException();
4512 return new MapReduceKeysToLongTask<K,V>
4513 (map, null, -1, null, transformer, basis, reducer);
4514 }
4515
4516 /**
4517 * Returns a task that when invoked, returns the result of
4518 * accumulating the given transformation of all keys using the given
4519 * reducer to combine values, and the given basis as an
4520 * identity value.
4521 *
4522 * @param map the map
4523 * @param transformer a function returning the transformation
4524 * for an element
4525 * @param basis the identity (initial default value) for the reduction
4526 * @param reducer a commutative associative combining function
4527 * @return the task
4528 */
4529 public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
4530 (ConcurrentHashMapV8<K,V> map,
4531 ObjectToInt<? super K> transformer,
4532 int basis,
4533 IntByIntToInt reducer) {
4534 if (transformer == null || reducer == null)
4535 throw new NullPointerException();
4536 return new MapReduceKeysToIntTask<K,V>
4537 (map, null, -1, null, transformer, basis, reducer);
4538 }
4539
4540 /**
4541 * Returns a task that when invoked, performs the given action
4542 * for each value.
4543 *
4544 * @param map the map
4545 * @param action the action
4546 */
4547 public static <K,V> ForkJoinTask<Void> forEachValue
4548 (ConcurrentHashMapV8<K,V> map,
4549 Action<V> action) {
4550 if (action == null) throw new NullPointerException();
4551 return new ForEachValueTask<K,V>(map, null, -1, null, action);
4552 }
4553
4554 /**
4555 * Returns a task that when invoked, performs the given action
4556 * for each non-null transformation of each value.
4557 *
4558 * @param map the map
4559 * @param transformer a function returning the transformation
4560 * for an element, or null if there is no transformation (in
4561 * which case the action is not applied)
4562 * @param action the action
4563 */
4564 public static <K,V,U> ForkJoinTask<Void> forEachValue
4565 (ConcurrentHashMapV8<K,V> map,
4566 Fun<? super V, ? extends U> transformer,
4567 Action<U> action) {
4568 if (transformer == null || action == null)
4569 throw new NullPointerException();
4570 return new ForEachTransformedValueTask<K,V,U>
4571 (map, null, -1, null, transformer, action);
4572 }
4573
4574 /**
4575 * Returns a task that when invoked, returns a non-null result
4576 * from applying the given search function on each value, or
4577 * null if none. Upon success, further element processing is
4578 * suppressed and the results of any other parallel
4579 * invocations of the search function are ignored.
4580 *
4581 * @param map the map
4582 * @param searchFunction a function returning a non-null
4583 * result on success, else null
4584 * @return the task
4585 */
4586 public static <K,V,U> ForkJoinTask<U> searchValues
4587 (ConcurrentHashMapV8<K,V> map,
4588 Fun<? super V, ? extends U> searchFunction) {
4589 if (searchFunction == null) throw new NullPointerException();
4590 return new SearchValuesTask<K,V,U>
4591 (map, null, -1, null, searchFunction,
4592 new AtomicReference<U>());
4593 }
4594
4595 /**
4596 * Returns a task that when invoked, returns the result of
4597 * accumulating all values using the given reducer to combine
4598 * values, or null if none.
4599 *
4600 * @param map the map
4601 * @param reducer a commutative associative combining function
4602 * @return the task
4603 */
4604 public static <K,V> ForkJoinTask<V> reduceValues
4605 (ConcurrentHashMapV8<K,V> map,
4606 BiFun<? super V, ? super V, ? extends V> reducer) {
4607 if (reducer == null) throw new NullPointerException();
4608 return new ReduceValuesTask<K,V>
4609 (map, null, -1, null, reducer);
4610 }
4611
4612 /**
4613 * Returns a task that when invoked, returns the result of
4614 * accumulating the given transformation of all values using the
4615 * given reducer to combine values, or null if none.
4616 *
4617 * @param map the map
4618 * @param transformer a function returning the transformation
4619 * for an element, or null if there is no transformation (in
4620 * which case it is not combined).
4621 * @param reducer a commutative associative combining function
4622 * @return the task
4623 */
4624 public static <K,V,U> ForkJoinTask<U> reduceValues
4625 (ConcurrentHashMapV8<K,V> map,
4626 Fun<? super V, ? extends U> transformer,
4627 BiFun<? super U, ? super U, ? extends U> reducer) {
4628 if (transformer == null || reducer == null)
4629 throw new NullPointerException();
4630 return new MapReduceValuesTask<K,V,U>
4631 (map, null, -1, null, transformer, reducer);
4632 }
4633
4634 /**
4635 * Returns a task that when invoked, returns the result of
4636 * accumulating the given transformation of all values using the
4637 * given reducer to combine values, and the given basis as an
4638 * identity value.
4639 *
4640 * @param map the map
4641 * @param transformer a function returning the transformation
4642 * for an element
4643 * @param basis the identity (initial default value) for the reduction
4644 * @param reducer a commutative associative combining function
4645 * @return the task
4646 */
4647 public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
4648 (ConcurrentHashMapV8<K,V> map,
4649 ObjectToDouble<? super V> transformer,
4650 double basis,
4651 DoubleByDoubleToDouble reducer) {
4652 if (transformer == null || reducer == null)
4653 throw new NullPointerException();
4654 return new MapReduceValuesToDoubleTask<K,V>
4655 (map, null, -1, null, transformer, basis, reducer);
4656 }
4657
4658 /**
4659 * Returns a task that when invoked, returns the result of
4660 * accumulating the given transformation of all values using the
4661 * given reducer to combine values, and the given basis as an
4662 * identity value.
4663 *
4664 * @param map the map
4665 * @param transformer a function returning the transformation
4666 * for an element
4667 * @param basis the identity (initial default value) for the reduction
4668 * @param reducer a commutative associative combining function
4669 * @return the task
4670 */
4671 public static <K,V> ForkJoinTask<Long> reduceValuesToLong
4672 (ConcurrentHashMapV8<K,V> map,
4673 ObjectToLong<? super V> transformer,
4674 long basis,
4675 LongByLongToLong reducer) {
4676 if (transformer == null || reducer == null)
4677 throw new NullPointerException();
4678 return new MapReduceValuesToLongTask<K,V>
4679 (map, null, -1, null, transformer, basis, reducer);
4680 }
4681
4682 /**
4683 * Returns a task that when invoked, returns the result of
4684 * accumulating the given transformation of all values using the
4685 * given reducer to combine values, and the given basis as an
4686 * identity value.
4687 *
4688 * @param map the map
4689 * @param transformer a function returning the transformation
4690 * for an element
4691 * @param basis the identity (initial default value) for the reduction
4692 * @param reducer a commutative associative combining function
4693 * @return the task
4694 */
4695 public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
4696 (ConcurrentHashMapV8<K,V> map,
4697 ObjectToInt<? super V> transformer,
4698 int basis,
4699 IntByIntToInt reducer) {
4700 if (transformer == null || reducer == null)
4701 throw new NullPointerException();
4702 return new MapReduceValuesToIntTask<K,V>
4703 (map, null, -1, null, transformer, basis, reducer);
4704 }
4705
4706 /**
4707 * Returns a task that when invoked, perform the given action
4708 * for each entry.
4709 *
4710 * @param map the map
4711 * @param action the action
4712 */
4713 public static <K,V> ForkJoinTask<Void> forEachEntry
4714 (ConcurrentHashMapV8<K,V> map,
4715 Action<Map.Entry<K,V>> action) {
4716 if (action == null) throw new NullPointerException();
4717 return new ForEachEntryTask<K,V>(map, null, -1, null, action);
4718 }
4719
4720 /**
4721 * Returns a task that when invoked, perform the given action
4722 * for each non-null transformation of each entry.
4723 *
4724 * @param map the map
4725 * @param transformer a function returning the transformation
4726 * for an element, or null if there is no transformation (in
4727 * which case the action is not applied)
4728 * @param action the action
4729 */
4730 public static <K,V,U> ForkJoinTask<Void> forEachEntry
4731 (ConcurrentHashMapV8<K,V> map,
4732 Fun<Map.Entry<K,V>, ? extends U> transformer,
4733 Action<U> action) {
4734 if (transformer == null || action == null)
4735 throw new NullPointerException();
4736 return new ForEachTransformedEntryTask<K,V,U>
4737 (map, null, -1, null, transformer, action);
4738 }
4739
4740 /**
4741 * Returns a task that when invoked, returns a non-null result
4742 * from applying the given search function on each entry, or
4743 * null if none. Upon success, further element processing is
4744 * suppressed and the results of any other parallel
4745 * invocations of the search function are ignored.
4746 *
4747 * @param map the map
4748 * @param searchFunction a function returning a non-null
4749 * result on success, else null
4750 * @return the task
4751 */
4752 public static <K,V,U> ForkJoinTask<U> searchEntries
4753 (ConcurrentHashMapV8<K,V> map,
4754 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4755 if (searchFunction == null) throw new NullPointerException();
4756 return new SearchEntriesTask<K,V,U>
4757 (map, null, -1, null, searchFunction,
4758 new AtomicReference<U>());
4759 }
4760
4761 /**
4762 * Returns a task that when invoked, returns the result of
4763 * accumulating all entries using the given reducer to combine
4764 * values, or null if none.
4765 *
4766 * @param map the map
4767 * @param reducer a commutative associative combining function
4768 * @return the task
4769 */
4770 public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
4771 (ConcurrentHashMapV8<K,V> map,
4772 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4773 if (reducer == null) throw new NullPointerException();
4774 return new ReduceEntriesTask<K,V>
4775 (map, null, -1, null, reducer);
4776 }
4777
4778 /**
4779 * Returns a task that when invoked, returns the result of
4780 * accumulating the given transformation of all entries using the
4781 * given reducer to combine values, or null if none.
4782 *
4783 * @param map the map
4784 * @param transformer a function returning the transformation
4785 * for an element, or null if there is no transformation (in
4786 * which case it is not combined).
4787 * @param reducer a commutative associative combining function
4788 * @return the task
4789 */
4790 public static <K,V,U> ForkJoinTask<U> reduceEntries
4791 (ConcurrentHashMapV8<K,V> map,
4792 Fun<Map.Entry<K,V>, ? extends U> transformer,
4793 BiFun<? super U, ? super U, ? extends U> reducer) {
4794 if (transformer == null || reducer == null)
4795 throw new NullPointerException();
4796 return new MapReduceEntriesTask<K,V,U>
4797 (map, null, -1, null, transformer, reducer);
4798 }
4799
4800 /**
4801 * Returns a task that when invoked, returns the result of
4802 * accumulating the given transformation of all entries using the
4803 * given reducer to combine values, and the given basis as an
4804 * identity value.
4805 *
4806 * @param map the map
4807 * @param transformer a function returning the transformation
4808 * for an element
4809 * @param basis the identity (initial default value) for the reduction
4810 * @param reducer a commutative associative combining function
4811 * @return the task
4812 */
4813 public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
4814 (ConcurrentHashMapV8<K,V> map,
4815 ObjectToDouble<Map.Entry<K,V>> transformer,
4816 double basis,
4817 DoubleByDoubleToDouble reducer) {
4818 if (transformer == null || reducer == null)
4819 throw new NullPointerException();
4820 return new MapReduceEntriesToDoubleTask<K,V>
4821 (map, null, -1, null, transformer, basis, reducer);
4822 }
4823
4824 /**
4825 * Returns a task that when invoked, returns the result of
4826 * accumulating the given transformation of all entries using the
4827 * given reducer to combine values, and the given basis as an
4828 * identity value.
4829 *
4830 * @param map the map
4831 * @param transformer a function returning the transformation
4832 * for an element
4833 * @param basis the identity (initial default value) for the reduction
4834 * @param reducer a commutative associative combining function
4835 * @return the task
4836 */
4837 public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
4838 (ConcurrentHashMapV8<K,V> map,
4839 ObjectToLong<Map.Entry<K,V>> transformer,
4840 long basis,
4841 LongByLongToLong reducer) {
4842 if (transformer == null || reducer == null)
4843 throw new NullPointerException();
4844 return new MapReduceEntriesToLongTask<K,V>
4845 (map, null, -1, null, transformer, basis, reducer);
4846 }
4847
4848 /**
4849 * Returns a task that when invoked, returns the result of
4850 * accumulating the given transformation of all entries using the
4851 * given reducer to combine values, and the given basis as an
4852 * identity value.
4853 *
4854 * @param map the map
4855 * @param transformer a function returning the transformation
4856 * for an element
4857 * @param basis the identity (initial default value) for the reduction
4858 * @param reducer a commutative associative combining function
4859 * @return the task
4860 */
4861 public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
4862 (ConcurrentHashMapV8<K,V> map,
4863 ObjectToInt<Map.Entry<K,V>> transformer,
4864 int basis,
4865 IntByIntToInt reducer) {
4866 if (transformer == null || reducer == null)
4867 throw new NullPointerException();
4868 return new MapReduceEntriesToIntTask<K,V>
4869 (map, null, -1, null, transformer, basis, reducer);
4870 }
4871 }
4872
4873 // -------------------------------------------------------
4874
4875 /**
4876 * Base for FJ tasks for bulk operations. This adds a variant of
4877 * CountedCompleters and some split and merge bookkeeping to
4878 * iterator functionality. The forEach and reduce methods are
4879 * similar to those illustrated in CountedCompleter documentation,
4880 * except that bottom-up reduction completions perform them within
4881 * their compute methods. The search methods are like forEach
4882 * except they continually poll for success and exit early. Also,
4883 * exceptions are handled in a simpler manner, by just trying to
4884 * complete root task exceptionally.
4885 */
4886 @SuppressWarnings("serial") static abstract class BulkTask<K,V,R> extends Traverser<K,V,R> {
4887 final BulkTask<K,V,?> parent; // completion target
4888 int batch; // split control; -1 for unknown
4889 int pending; // completion control
4890
4891 BulkTask(ConcurrentHashMapV8<K,V> map, BulkTask<K,V,?> parent,
4892 int batch) {
4893 super(map);
4894 this.parent = parent;
4895 this.batch = batch;
4896 if (parent != null && map != null) { // split parent
4897 Node[] t;
4898 if ((t = parent.tab) == null &&
4899 (t = parent.tab = map.table) != null)
4900 parent.baseLimit = parent.baseSize = t.length;
4901 this.tab = t;
4902 this.baseSize = parent.baseSize;
4903 int hi = this.baseLimit = parent.baseLimit;
4904 parent.baseLimit = this.index = this.baseIndex =
4905 (hi + parent.baseIndex + 1) >>> 1;
4906 }
4907 }
4908
4909 // FJ methods
4910
4911 /**
4912 * Propagates completion. Note that all reduce actions
4913 * bypass this method to combine while completing.
4914 */
4915 final void tryComplete() {
4916 BulkTask<K,V,?> a = this, s = a;
4917 for (int c;;) {
4918 if ((c = a.pending) == 0) {
4919 if ((a = (s = a).parent) == null) {
4920 s.quietlyComplete();
4921 break;
4922 }
4923 }
4924 else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
4925 break;
4926 }
4927 }
4928
4929 /**
4930 * Forces root task to complete.
4931 * @param ex if null, complete normally, else exceptionally
4932 * @return false to simplify use
4933 */
4934 final boolean tryCompleteComputation(Throwable ex) {
4935 for (BulkTask<K,V,?> a = this;;) {
4936 BulkTask<K,V,?> p = a.parent;
4937 if (p == null) {
4938 if (ex != null)
4939 a.completeExceptionally(ex);
4940 else
4941 a.quietlyComplete();
4942 return false;
4943 }
4944 a = p;
4945 }
4946 }
4947
4948 /**
4949 * Version of tryCompleteComputation for function screening checks
4950 */
4951 final boolean abortOnNullFunction() {
4952 return tryCompleteComputation(new Error("Unexpected null function"));
4953 }
4954
4955 // utilities
4956
4957 /** CompareAndSet pending count */
4958 final boolean casPending(int cmp, int val) {
4959 return U.compareAndSwapInt(this, PENDING, cmp, val);
4960 }
4961
4962 /**
4963 * Returns approx exp2 of the number of times (minus one) to
4964 * split task by two before executing leaf action. This value
4965 * is faster to compute and more convenient to use as a guide
4966 * to splitting than is the depth, since it is used while
4967 * dividing by two anyway.
4968 */
4969 final int batch() {
4970 ConcurrentHashMapV8<K, V> m; int b; Node[] t; ForkJoinPool pool;
4971 if ((b = batch) < 0 && (m = map) != null) { // force initialization
4972 if ((t = tab) == null && (t = tab = m.table) != null)
4973 baseLimit = baseSize = t.length;
4974 if (t != null) {
4975 long n = m.counter.sum();
4976 int par = (pool = getPool()) == null?
4977 ForkJoinPool.getCommonPoolParallelism() :
4978 pool.getParallelism();
4979 int sp = par << 3; // slack of 8
4980 b = batch = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
4981 }
4982 }
4983 return b;
4984 }
4985
4986 /**
4987 * Returns exportable snapshot entry.
4988 */
4989 static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
4990 return new AbstractMap.SimpleEntry<K,V>(k, v);
4991 }
4992
4993 // Unsafe mechanics
4994 private static final sun.misc.Unsafe U;
4995 private static final long PENDING;
4996 static {
4997 try {
4998 U = getUnsafe();
4999 PENDING = U.objectFieldOffset
5000 (BulkTask.class.getDeclaredField("pending"));
5001 } catch (Exception e) {
5002 throw new Error(e);
5003 }
5004 }
5005 }
5006
5007 /*
5008 * Task classes. Coded in a regular but ugly format/style to
5009 * simplify checks that each variant differs in the right way from
5010 * others.
5011 */
5012
5013 @SuppressWarnings("serial") static final class ForEachKeyTask<K,V>
5014 extends BulkTask<K,V,Void> {
5015 final Action<K> action;
5016 ForEachKeyTask<K,V> nextRight;
5017 ForEachKeyTask
5018 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5019 ForEachKeyTask<K,V> nextRight,
5020 Action<K> action) {
5021 super(m, p, b);
5022 this.nextRight = nextRight;
5023 this.action = action;
5024 }
5025 @SuppressWarnings("unchecked") public final boolean exec() {
5026 final Action<K> action = this.action;
5027 if (action == null)
5028 return abortOnNullFunction();
5029 ForEachKeyTask<K,V> rights = null;
5030 try {
5031 int b = batch(), c;
5032 while (b > 1 && baseIndex != baseLimit) {
5033 do {} while (!casPending(c = pending, c+1));
5034 (rights = new ForEachKeyTask<K,V>
5035 (map, this, b >>>= 1, rights, action)).fork();
5036 }
5037 while (advance() != null)
5038 action.apply((K)nextKey);
5039 tryComplete();
5040 } catch (Throwable ex) {
5041 return tryCompleteComputation(ex);
5042 }
5043 while (rights != null && rights.tryUnfork()) {
5044 rights.exec();
5045 rights = rights.nextRight;
5046 }
5047 return false;
5048 }
5049 }
5050
5051 @SuppressWarnings("serial") static final class ForEachValueTask<K,V>
5052 extends BulkTask<K,V,Void> {
5053 ForEachValueTask<K,V> nextRight;
5054 final Action<V> action;
5055 ForEachValueTask
5056 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5057 ForEachValueTask<K,V> nextRight,
5058 Action<V> action) {
5059 super(m, p, b);
5060 this.nextRight = nextRight;
5061 this.action = action;
5062 }
5063 @SuppressWarnings("unchecked") public final boolean exec() {
5064 final Action<V> action = this.action;
5065 if (action == null)
5066 return abortOnNullFunction();
5067 ForEachValueTask<K,V> rights = null;
5068 try {
5069 int b = batch(), c;
5070 while (b > 1 && baseIndex != baseLimit) {
5071 do {} while (!casPending(c = pending, c+1));
5072 (rights = new ForEachValueTask<K,V>
5073 (map, this, b >>>= 1, rights, action)).fork();
5074 }
5075 Object v;
5076 while ((v = advance()) != null)
5077 action.apply((V)v);
5078 tryComplete();
5079 } catch (Throwable ex) {
5080 return tryCompleteComputation(ex);
5081 }
5082 while (rights != null && rights.tryUnfork()) {
5083 rights.exec();
5084 rights = rights.nextRight;
5085 }
5086 return false;
5087 }
5088 }
5089
5090 @SuppressWarnings("serial") static final class ForEachEntryTask<K,V>
5091 extends BulkTask<K,V,Void> {
5092 ForEachEntryTask<K,V> nextRight;
5093 final Action<Entry<K,V>> action;
5094 ForEachEntryTask
5095 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5096 ForEachEntryTask<K,V> nextRight,
5097 Action<Entry<K,V>> action) {
5098 super(m, p, b);
5099 this.nextRight = nextRight;
5100 this.action = action;
5101 }
5102 @SuppressWarnings("unchecked") public final boolean exec() {
5103 final Action<Entry<K,V>> action = this.action;
5104 if (action == null)
5105 return abortOnNullFunction();
5106 ForEachEntryTask<K,V> rights = null;
5107 try {
5108 int b = batch(), c;
5109 while (b > 1 && baseIndex != baseLimit) {
5110 do {} while (!casPending(c = pending, c+1));
5111 (rights = new ForEachEntryTask<K,V>
5112 (map, this, b >>>= 1, rights, action)).fork();
5113 }
5114 Object v;
5115 while ((v = advance()) != null)
5116 action.apply(entryFor((K)nextKey, (V)v));
5117 tryComplete();
5118 } catch (Throwable ex) {
5119 return tryCompleteComputation(ex);
5120 }
5121 while (rights != null && rights.tryUnfork()) {
5122 rights.exec();
5123 rights = rights.nextRight;
5124 }
5125 return false;
5126 }
5127 }
5128
5129 @SuppressWarnings("serial") static final class ForEachMappingTask<K,V>
5130 extends BulkTask<K,V,Void> {
5131 ForEachMappingTask<K,V> nextRight;
5132 final BiAction<K,V> action;
5133 ForEachMappingTask
5134 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5135 ForEachMappingTask<K,V> nextRight,
5136 BiAction<K,V> action) {
5137 super(m, p, b);
5138 this.nextRight = nextRight;
5139 this.action = action;
5140 }
5141 @SuppressWarnings("unchecked") public final boolean exec() {
5142 final BiAction<K,V> action = this.action;
5143 if (action == null)
5144 return abortOnNullFunction();
5145 ForEachMappingTask<K,V> rights = null;
5146 try {
5147 int b = batch(), c;
5148 while (b > 1 && baseIndex != baseLimit) {
5149 do {} while (!casPending(c = pending, c+1));
5150 (rights = new ForEachMappingTask<K,V>
5151 (map, this, b >>>= 1, rights, action)).fork();
5152 }
5153 Object v;
5154 while ((v = advance()) != null)
5155 action.apply((K)nextKey, (V)v);
5156 tryComplete();
5157 } catch (Throwable ex) {
5158 return tryCompleteComputation(ex);
5159 }
5160 while (rights != null && rights.tryUnfork()) {
5161 rights.exec();
5162 rights = rights.nextRight;
5163 }
5164 return false;
5165 }
5166 }
5167
5168 @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U>
5169 extends BulkTask<K,V,Void> {
5170 ForEachTransformedKeyTask<K,V,U> nextRight;
5171 final Fun<? super K, ? extends U> transformer;
5172 final Action<U> action;
5173 ForEachTransformedKeyTask
5174 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5175 ForEachTransformedKeyTask<K,V,U> nextRight,
5176 Fun<? super K, ? extends U> transformer,
5177 Action<U> action) {
5178 super(m, p, b);
5179 this.nextRight = nextRight;
5180 this.transformer = transformer;
5181 this.action = action;
5182
5183 }
5184 @SuppressWarnings("unchecked") public final boolean exec() {
5185 final Fun<? super K, ? extends U> transformer =
5186 this.transformer;
5187 final Action<U> action = this.action;
5188 if (transformer == null || action == null)
5189 return abortOnNullFunction();
5190 ForEachTransformedKeyTask<K,V,U> rights = null;
5191 try {
5192 int b = batch(), c;
5193 while (b > 1 && baseIndex != baseLimit) {
5194 do {} while (!casPending(c = pending, c+1));
5195 (rights = new ForEachTransformedKeyTask<K,V,U>
5196 (map, this, b >>>= 1, rights, transformer, action)).fork();
5197 }
5198 U u;
5199 while (advance() != null) {
5200 if ((u = transformer.apply((K)nextKey)) != null)
5201 action.apply(u);
5202 }
5203 tryComplete();
5204 } catch (Throwable ex) {
5205 return tryCompleteComputation(ex);
5206 }
5207 while (rights != null && rights.tryUnfork()) {
5208 rights.exec();
5209 rights = rights.nextRight;
5210 }
5211 return false;
5212 }
5213 }
5214
5215 @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U>
5216 extends BulkTask<K,V,Void> {
5217 ForEachTransformedValueTask<K,V,U> nextRight;
5218 final Fun<? super V, ? extends U> transformer;
5219 final Action<U> action;
5220 ForEachTransformedValueTask
5221 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5222 ForEachTransformedValueTask<K,V,U> nextRight,
5223 Fun<? super V, ? extends U> transformer,
5224 Action<U> action) {
5225 super(m, p, b);
5226 this.nextRight = nextRight;
5227 this.transformer = transformer;
5228 this.action = action;
5229
5230 }
5231 @SuppressWarnings("unchecked") public final boolean exec() {
5232 final Fun<? super V, ? extends U> transformer =
5233 this.transformer;
5234 final Action<U> action = this.action;
5235 if (transformer == null || action == null)
5236 return abortOnNullFunction();
5237 ForEachTransformedValueTask<K,V,U> rights = null;
5238 try {
5239 int b = batch(), c;
5240 while (b > 1 && baseIndex != baseLimit) {
5241 do {} while (!casPending(c = pending, c+1));
5242 (rights = new ForEachTransformedValueTask<K,V,U>
5243 (map, this, b >>>= 1, rights, transformer, action)).fork();
5244 }
5245 Object v; U u;
5246 while ((v = advance()) != null) {
5247 if ((u = transformer.apply((V)v)) != null)
5248 action.apply(u);
5249 }
5250 tryComplete();
5251 } catch (Throwable ex) {
5252 return tryCompleteComputation(ex);
5253 }
5254 while (rights != null && rights.tryUnfork()) {
5255 rights.exec();
5256 rights = rights.nextRight;
5257 }
5258 return false;
5259 }
5260 }
5261
5262 @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U>
5263 extends BulkTask<K,V,Void> {
5264 ForEachTransformedEntryTask<K,V,U> nextRight;
5265 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5266 final Action<U> action;
5267 ForEachTransformedEntryTask
5268 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5269 ForEachTransformedEntryTask<K,V,U> nextRight,
5270 Fun<Map.Entry<K,V>, ? extends U> transformer,
5271 Action<U> action) {
5272 super(m, p, b);
5273 this.nextRight = nextRight;
5274 this.transformer = transformer;
5275 this.action = action;
5276
5277 }
5278 @SuppressWarnings("unchecked") public final boolean exec() {
5279 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5280 this.transformer;
5281 final Action<U> action = this.action;
5282 if (transformer == null || action == null)
5283 return abortOnNullFunction();
5284 ForEachTransformedEntryTask<K,V,U> rights = null;
5285 try {
5286 int b = batch(), c;
5287 while (b > 1 && baseIndex != baseLimit) {
5288 do {} while (!casPending(c = pending, c+1));
5289 (rights = new ForEachTransformedEntryTask<K,V,U>
5290 (map, this, b >>>= 1, rights, transformer, action)).fork();
5291 }
5292 Object v; U u;
5293 while ((v = advance()) != null) {
5294 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5295 action.apply(u);
5296 }
5297 tryComplete();
5298 } catch (Throwable ex) {
5299 return tryCompleteComputation(ex);
5300 }
5301 while (rights != null && rights.tryUnfork()) {
5302 rights.exec();
5303 rights = rights.nextRight;
5304 }
5305 return false;
5306 }
5307 }
5308
5309 @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U>
5310 extends BulkTask<K,V,Void> {
5311 ForEachTransformedMappingTask<K,V,U> nextRight;
5312 final BiFun<? super K, ? super V, ? extends U> transformer;
5313 final Action<U> action;
5314 ForEachTransformedMappingTask
5315 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5316 ForEachTransformedMappingTask<K,V,U> nextRight,
5317 BiFun<? super K, ? super V, ? extends U> transformer,
5318 Action<U> action) {
5319 super(m, p, b);
5320 this.nextRight = nextRight;
5321 this.transformer = transformer;
5322 this.action = action;
5323
5324 }
5325 @SuppressWarnings("unchecked") public final boolean exec() {
5326 final BiFun<? super K, ? super V, ? extends U> transformer =
5327 this.transformer;
5328 final Action<U> action = this.action;
5329 if (transformer == null || action == null)
5330 return abortOnNullFunction();
5331 ForEachTransformedMappingTask<K,V,U> rights = null;
5332 try {
5333 int b = batch(), c;
5334 while (b > 1 && baseIndex != baseLimit) {
5335 do {} while (!casPending(c = pending, c+1));
5336 (rights = new ForEachTransformedMappingTask<K,V,U>
5337 (map, this, b >>>= 1, rights, transformer, action)).fork();
5338 }
5339 Object v; U u;
5340 while ((v = advance()) != null) {
5341 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5342 action.apply(u);
5343 }
5344 tryComplete();
5345 } catch (Throwable ex) {
5346 return tryCompleteComputation(ex);
5347 }
5348 while (rights != null && rights.tryUnfork()) {
5349 rights.exec();
5350 rights = rights.nextRight;
5351 }
5352 return false;
5353 }
5354 }
5355
5356 @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U>
5357 extends BulkTask<K,V,U> {
5358 SearchKeysTask<K,V,U> nextRight;
5359 final Fun<? super K, ? extends U> searchFunction;
5360 final AtomicReference<U> result;
5361 SearchKeysTask
5362 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5363 SearchKeysTask<K,V,U> nextRight,
5364 Fun<? super K, ? extends U> searchFunction,
5365 AtomicReference<U> result) {
5366 super(m, p, b);
5367 this.nextRight = nextRight;
5368 this.searchFunction = searchFunction; this.result = result;
5369 }
5370 @SuppressWarnings("unchecked") public final boolean exec() {
5371 AtomicReference<U> result = this.result;
5372 final Fun<? super K, ? extends U> searchFunction =
5373 this.searchFunction;
5374 if (searchFunction == null || result == null)
5375 return abortOnNullFunction();
5376 SearchKeysTask<K,V,U> rights = null;
5377 try {
5378 int b = batch(), c;
5379 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5380 do {} while (!casPending(c = pending, c+1));
5381 (rights = new SearchKeysTask<K,V,U>
5382 (map, this, b >>>= 1, rights, searchFunction, result)).fork();
5383 }
5384 U u;
5385 while (result.get() == null && advance() != null) {
5386 if ((u = searchFunction.apply((K)nextKey)) != null) {
5387 if (result.compareAndSet(null, u))
5388 tryCompleteComputation(null);
5389 break;
5390 }
5391 }
5392 tryComplete();
5393 } catch (Throwable ex) {
5394 return tryCompleteComputation(ex);
5395 }
5396 while (rights != null && result.get() == null && rights.tryUnfork()) {
5397 rights.exec();
5398 rights = rights.nextRight;
5399 }
5400 return false;
5401 }
5402 public final U getRawResult() { return result.get(); }
5403 }
5404
5405 @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U>
5406 extends BulkTask<K,V,U> {
5407 SearchValuesTask<K,V,U> nextRight;
5408 final Fun<? super V, ? extends U> searchFunction;
5409 final AtomicReference<U> result;
5410 SearchValuesTask
5411 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5412 SearchValuesTask<K,V,U> nextRight,
5413 Fun<? super V, ? extends U> searchFunction,
5414 AtomicReference<U> result) {
5415 super(m, p, b);
5416 this.nextRight = nextRight;
5417 this.searchFunction = searchFunction; this.result = result;
5418 }
5419 @SuppressWarnings("unchecked") public final boolean exec() {
5420 AtomicReference<U> result = this.result;
5421 final Fun<? super V, ? extends U> searchFunction =
5422 this.searchFunction;
5423 if (searchFunction == null || result == null)
5424 return abortOnNullFunction();
5425 SearchValuesTask<K,V,U> rights = null;
5426 try {
5427 int b = batch(), c;
5428 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5429 do {} while (!casPending(c = pending, c+1));
5430 (rights = new SearchValuesTask<K,V,U>
5431 (map, this, b >>>= 1, rights, searchFunction, result)).fork();
5432 }
5433 Object v; U u;
5434 while (result.get() == null && (v = advance()) != null) {
5435 if ((u = searchFunction.apply((V)v)) != null) {
5436 if (result.compareAndSet(null, u))
5437 tryCompleteComputation(null);
5438 break;
5439 }
5440 }
5441 tryComplete();
5442 } catch (Throwable ex) {
5443 return tryCompleteComputation(ex);
5444 }
5445 while (rights != null && result.get() == null && rights.tryUnfork()) {
5446 rights.exec();
5447 rights = rights.nextRight;
5448 }
5449 return false;
5450 }
5451 public final U getRawResult() { return result.get(); }
5452 }
5453
5454 @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U>
5455 extends BulkTask<K,V,U> {
5456 SearchEntriesTask<K,V,U> nextRight;
5457 final Fun<Entry<K,V>, ? extends U> searchFunction;
5458 final AtomicReference<U> result;
5459 SearchEntriesTask
5460 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5461 SearchEntriesTask<K,V,U> nextRight,
5462 Fun<Entry<K,V>, ? extends U> searchFunction,
5463 AtomicReference<U> result) {
5464 super(m, p, b);
5465 this.nextRight = nextRight;
5466 this.searchFunction = searchFunction; this.result = result;
5467 }
5468 @SuppressWarnings("unchecked") public final boolean exec() {
5469 AtomicReference<U> result = this.result;
5470 final Fun<Entry<K,V>, ? extends U> searchFunction =
5471 this.searchFunction;
5472 if (searchFunction == null || result == null)
5473 return abortOnNullFunction();
5474 SearchEntriesTask<K,V,U> rights = null;
5475 try {
5476 int b = batch(), c;
5477 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5478 do {} while (!casPending(c = pending, c+1));
5479 (rights = new SearchEntriesTask<K,V,U>
5480 (map, this, b >>>= 1, rights, searchFunction, result)).fork();
5481 }
5482 Object v; U u;
5483 while (result.get() == null && (v = advance()) != null) {
5484 if ((u = searchFunction.apply(entryFor((K)nextKey, (V)v))) != null) {
5485 if (result.compareAndSet(null, u))
5486 tryCompleteComputation(null);
5487 break;
5488 }
5489 }
5490 tryComplete();
5491 } catch (Throwable ex) {
5492 return tryCompleteComputation(ex);
5493 }
5494 while (rights != null && result.get() == null && rights.tryUnfork()) {
5495 rights.exec();
5496 rights = rights.nextRight;
5497 }
5498 return false;
5499 }
5500 public final U getRawResult() { return result.get(); }
5501 }
5502
5503 @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U>
5504 extends BulkTask<K,V,U> {
5505 SearchMappingsTask<K,V,U> nextRight;
5506 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5507 final AtomicReference<U> result;
5508 SearchMappingsTask
5509 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5510 SearchMappingsTask<K,V,U> nextRight,
5511 BiFun<? super K, ? super V, ? extends U> searchFunction,
5512 AtomicReference<U> result) {
5513 super(m, p, b);
5514 this.nextRight = nextRight;
5515 this.searchFunction = searchFunction; this.result = result;
5516 }
5517 @SuppressWarnings("unchecked") public final boolean exec() {
5518 AtomicReference<U> result = this.result;
5519 final BiFun<? super K, ? super V, ? extends U> searchFunction =
5520 this.searchFunction;
5521 if (searchFunction == null || result == null)
5522 return abortOnNullFunction();
5523 SearchMappingsTask<K,V,U> rights = null;
5524 try {
5525 int b = batch(), c;
5526 while (b > 1 && baseIndex != baseLimit && result.get() == null) {
5527 do {} while (!casPending(c = pending, c+1));
5528 (rights = new SearchMappingsTask<K,V,U>
5529 (map, this, b >>>= 1, rights, searchFunction, result)).fork();
5530 }
5531 Object v; U u;
5532 while (result.get() == null && (v = advance()) != null) {
5533 if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5534 if (result.compareAndSet(null, u))
5535 tryCompleteComputation(null);
5536 break;
5537 }
5538 }
5539 tryComplete();
5540 } catch (Throwable ex) {
5541 return tryCompleteComputation(ex);
5542 }
5543 while (rights != null && result.get() == null && rights.tryUnfork()) {
5544 rights.exec();
5545 rights = rights.nextRight;
5546 }
5547 return false;
5548 }
5549 public final U getRawResult() { return result.get(); }
5550 }
5551
5552 @SuppressWarnings("serial") static final class ReduceKeysTask<K,V>
5553 extends BulkTask<K,V,K> {
5554 final BiFun<? super K, ? super K, ? extends K> reducer;
5555 K result;
5556 ReduceKeysTask<K,V> rights, nextRight;
5557 ReduceKeysTask
5558 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5559 ReduceKeysTask<K,V> nextRight,
5560 BiFun<? super K, ? super K, ? extends K> reducer) {
5561 super(m, p, b); this.nextRight = nextRight;
5562 this.reducer = reducer;
5563 }
5564 @SuppressWarnings("unchecked") public final boolean exec() {
5565 final BiFun<? super K, ? super K, ? extends K> reducer =
5566 this.reducer;
5567 if (reducer == null)
5568 return abortOnNullFunction();
5569 try {
5570 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5571 do {} while (!casPending(c = pending, c+1));
5572 (rights = new ReduceKeysTask<K,V>
5573 (map, this, b >>>= 1, rights, reducer)).fork();
5574 }
5575 K r = null;
5576 while (advance() != null) {
5577 K u = (K)nextKey;
5578 r = (r == null) ? u : reducer.apply(r, u);
5579 }
5580 result = r;
5581 for (ReduceKeysTask<K,V> t = this, s;;) {
5582 int c; BulkTask<K,V,?> par; K tr, sr;
5583 if ((c = t.pending) == 0) {
5584 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5585 if ((sr = s.result) != null)
5586 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5587 }
5588 if ((par = t.parent) == null ||
5589 !(par instanceof ReduceKeysTask)) {
5590 t.quietlyComplete();
5591 break;
5592 }
5593 t = (ReduceKeysTask<K,V>)par;
5594 }
5595 else if (t.casPending(c, c - 1))
5596 break;
5597 }
5598 } catch (Throwable ex) {
5599 return tryCompleteComputation(ex);
5600 }
5601 for (ReduceKeysTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5602 s.exec();
5603 return false;
5604 }
5605 public final K getRawResult() { return result; }
5606 }
5607
5608 @SuppressWarnings("serial") static final class ReduceValuesTask<K,V>
5609 extends BulkTask<K,V,V> {
5610 final BiFun<? super V, ? super V, ? extends V> reducer;
5611 V result;
5612 ReduceValuesTask<K,V> rights, nextRight;
5613 ReduceValuesTask
5614 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5615 ReduceValuesTask<K,V> nextRight,
5616 BiFun<? super V, ? super V, ? extends V> reducer) {
5617 super(m, p, b); this.nextRight = nextRight;
5618 this.reducer = reducer;
5619 }
5620 @SuppressWarnings("unchecked") public final boolean exec() {
5621 final BiFun<? super V, ? super V, ? extends V> reducer =
5622 this.reducer;
5623 if (reducer == null)
5624 return abortOnNullFunction();
5625 try {
5626 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5627 do {} while (!casPending(c = pending, c+1));
5628 (rights = new ReduceValuesTask<K,V>
5629 (map, this, b >>>= 1, rights, reducer)).fork();
5630 }
5631 V r = null;
5632 Object v;
5633 while ((v = advance()) != null) {
5634 V u = (V)v;
5635 r = (r == null) ? u : reducer.apply(r, u);
5636 }
5637 result = r;
5638 for (ReduceValuesTask<K,V> t = this, s;;) {
5639 int c; BulkTask<K,V,?> par; V tr, sr;
5640 if ((c = t.pending) == 0) {
5641 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5642 if ((sr = s.result) != null)
5643 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5644 }
5645 if ((par = t.parent) == null ||
5646 !(par instanceof ReduceValuesTask)) {
5647 t.quietlyComplete();
5648 break;
5649 }
5650 t = (ReduceValuesTask<K,V>)par;
5651 }
5652 else if (t.casPending(c, c - 1))
5653 break;
5654 }
5655 } catch (Throwable ex) {
5656 return tryCompleteComputation(ex);
5657 }
5658 for (ReduceValuesTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5659 s.exec();
5660 return false;
5661 }
5662 public final V getRawResult() { return result; }
5663 }
5664
5665 @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V>
5666 extends BulkTask<K,V,Map.Entry<K,V>> {
5667 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5668 Map.Entry<K,V> result;
5669 ReduceEntriesTask<K,V> rights, nextRight;
5670 ReduceEntriesTask
5671 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5672 ReduceEntriesTask<K,V> nextRight,
5673 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5674 super(m, p, b); this.nextRight = nextRight;
5675 this.reducer = reducer;
5676 }
5677 @SuppressWarnings("unchecked") public final boolean exec() {
5678 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer =
5679 this.reducer;
5680 if (reducer == null)
5681 return abortOnNullFunction();
5682 try {
5683 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5684 do {} while (!casPending(c = pending, c+1));
5685 (rights = new ReduceEntriesTask<K,V>
5686 (map, this, b >>>= 1, rights, reducer)).fork();
5687 }
5688 Map.Entry<K,V> r = null;
5689 Object v;
5690 while ((v = advance()) != null) {
5691 Map.Entry<K,V> u = entryFor((K)nextKey, (V)v);
5692 r = (r == null) ? u : reducer.apply(r, u);
5693 }
5694 result = r;
5695 for (ReduceEntriesTask<K,V> t = this, s;;) {
5696 int c; BulkTask<K,V,?> par; Map.Entry<K,V> tr, sr;
5697 if ((c = t.pending) == 0) {
5698 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5699 if ((sr = s.result) != null)
5700 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5701 }
5702 if ((par = t.parent) == null ||
5703 !(par instanceof ReduceEntriesTask)) {
5704 t.quietlyComplete();
5705 break;
5706 }
5707 t = (ReduceEntriesTask<K,V>)par;
5708 }
5709 else if (t.casPending(c, c - 1))
5710 break;
5711 }
5712 } catch (Throwable ex) {
5713 return tryCompleteComputation(ex);
5714 }
5715 for (ReduceEntriesTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5716 s.exec();
5717 return false;
5718 }
5719 public final Map.Entry<K,V> getRawResult() { return result; }
5720 }
5721
5722 @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U>
5723 extends BulkTask<K,V,U> {
5724 final Fun<? super K, ? extends U> transformer;
5725 final BiFun<? super U, ? super U, ? extends U> reducer;
5726 U result;
5727 MapReduceKeysTask<K,V,U> rights, nextRight;
5728 MapReduceKeysTask
5729 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5730 MapReduceKeysTask<K,V,U> nextRight,
5731 Fun<? super K, ? extends U> transformer,
5732 BiFun<? super U, ? super U, ? extends U> reducer) {
5733 super(m, p, b); this.nextRight = nextRight;
5734 this.transformer = transformer;
5735 this.reducer = reducer;
5736 }
5737 @SuppressWarnings("unchecked") public final boolean exec() {
5738 final Fun<? super K, ? extends U> transformer =
5739 this.transformer;
5740 final BiFun<? super U, ? super U, ? extends U> reducer =
5741 this.reducer;
5742 if (transformer == null || reducer == null)
5743 return abortOnNullFunction();
5744 try {
5745 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5746 do {} while (!casPending(c = pending, c+1));
5747 (rights = new MapReduceKeysTask<K,V,U>
5748 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5749 }
5750 U r = null, u;
5751 while (advance() != null) {
5752 if ((u = transformer.apply((K)nextKey)) != null)
5753 r = (r == null) ? u : reducer.apply(r, u);
5754 }
5755 result = r;
5756 for (MapReduceKeysTask<K,V,U> t = this, s;;) {
5757 int c; BulkTask<K,V,?> par; U tr, sr;
5758 if ((c = t.pending) == 0) {
5759 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5760 if ((sr = s.result) != null)
5761 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5762 }
5763 if ((par = t.parent) == null ||
5764 !(par instanceof MapReduceKeysTask)) {
5765 t.quietlyComplete();
5766 break;
5767 }
5768 t = (MapReduceKeysTask<K,V,U>)par;
5769 }
5770 else if (t.casPending(c, c - 1))
5771 break;
5772 }
5773 } catch (Throwable ex) {
5774 return tryCompleteComputation(ex);
5775 }
5776 for (MapReduceKeysTask<K,V,U> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5777 s.exec();
5778 return false;
5779 }
5780 public final U getRawResult() { return result; }
5781 }
5782
5783 @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U>
5784 extends BulkTask<K,V,U> {
5785 final Fun<? super V, ? extends U> transformer;
5786 final BiFun<? super U, ? super U, ? extends U> reducer;
5787 U result;
5788 MapReduceValuesTask<K,V,U> rights, nextRight;
5789 MapReduceValuesTask
5790 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5791 MapReduceValuesTask<K,V,U> nextRight,
5792 Fun<? super V, ? extends U> transformer,
5793 BiFun<? super U, ? super U, ? extends U> reducer) {
5794 super(m, p, b); this.nextRight = nextRight;
5795 this.transformer = transformer;
5796 this.reducer = reducer;
5797 }
5798 @SuppressWarnings("unchecked") public final boolean exec() {
5799 final Fun<? super V, ? extends U> transformer =
5800 this.transformer;
5801 final BiFun<? super U, ? super U, ? extends U> reducer =
5802 this.reducer;
5803 if (transformer == null || reducer == null)
5804 return abortOnNullFunction();
5805 try {
5806 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5807 do {} while (!casPending(c = pending, c+1));
5808 (rights = new MapReduceValuesTask<K,V,U>
5809 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5810 }
5811 U r = null, u;
5812 Object v;
5813 while ((v = advance()) != null) {
5814 if ((u = transformer.apply((V)v)) != null)
5815 r = (r == null) ? u : reducer.apply(r, u);
5816 }
5817 result = r;
5818 for (MapReduceValuesTask<K,V,U> t = this, s;;) {
5819 int c; BulkTask<K,V,?> par; U tr, sr;
5820 if ((c = t.pending) == 0) {
5821 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5822 if ((sr = s.result) != null)
5823 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5824 }
5825 if ((par = t.parent) == null ||
5826 !(par instanceof MapReduceValuesTask)) {
5827 t.quietlyComplete();
5828 break;
5829 }
5830 t = (MapReduceValuesTask<K,V,U>)par;
5831 }
5832 else if (t.casPending(c, c - 1))
5833 break;
5834 }
5835 } catch (Throwable ex) {
5836 return tryCompleteComputation(ex);
5837 }
5838 for (MapReduceValuesTask<K,V,U> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5839 s.exec();
5840 return false;
5841 }
5842 public final U getRawResult() { return result; }
5843 }
5844
5845 @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U>
5846 extends BulkTask<K,V,U> {
5847 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5848 final BiFun<? super U, ? super U, ? extends U> reducer;
5849 U result;
5850 MapReduceEntriesTask<K,V,U> rights, nextRight;
5851 MapReduceEntriesTask
5852 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5853 MapReduceEntriesTask<K,V,U> nextRight,
5854 Fun<Map.Entry<K,V>, ? extends U> transformer,
5855 BiFun<? super U, ? super U, ? extends U> reducer) {
5856 super(m, p, b); this.nextRight = nextRight;
5857 this.transformer = transformer;
5858 this.reducer = reducer;
5859 }
5860 @SuppressWarnings("unchecked") public final boolean exec() {
5861 final Fun<Map.Entry<K,V>, ? extends U> transformer =
5862 this.transformer;
5863 final BiFun<? super U, ? super U, ? extends U> reducer =
5864 this.reducer;
5865 if (transformer == null || reducer == null)
5866 return abortOnNullFunction();
5867 try {
5868 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5869 do {} while (!casPending(c = pending, c+1));
5870 (rights = new MapReduceEntriesTask<K,V,U>
5871 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5872 }
5873 U r = null, u;
5874 Object v;
5875 while ((v = advance()) != null) {
5876 if ((u = transformer.apply(entryFor((K)nextKey, (V)v))) != null)
5877 r = (r == null) ? u : reducer.apply(r, u);
5878 }
5879 result = r;
5880 for (MapReduceEntriesTask<K,V,U> t = this, s;;) {
5881 int c; BulkTask<K,V,?> par; U tr, sr;
5882 if ((c = t.pending) == 0) {
5883 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5884 if ((sr = s.result) != null)
5885 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5886 }
5887 if ((par = t.parent) == null ||
5888 !(par instanceof MapReduceEntriesTask)) {
5889 t.quietlyComplete();
5890 break;
5891 }
5892 t = (MapReduceEntriesTask<K,V,U>)par;
5893 }
5894 else if (t.casPending(c, c - 1))
5895 break;
5896 }
5897 } catch (Throwable ex) {
5898 return tryCompleteComputation(ex);
5899 }
5900 for (MapReduceEntriesTask<K,V,U> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5901 s.exec();
5902 return false;
5903 }
5904 public final U getRawResult() { return result; }
5905 }
5906
5907 @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U>
5908 extends BulkTask<K,V,U> {
5909 final BiFun<? super K, ? super V, ? extends U> transformer;
5910 final BiFun<? super U, ? super U, ? extends U> reducer;
5911 U result;
5912 MapReduceMappingsTask<K,V,U> rights, nextRight;
5913 MapReduceMappingsTask
5914 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5915 MapReduceMappingsTask<K,V,U> nextRight,
5916 BiFun<? super K, ? super V, ? extends U> transformer,
5917 BiFun<? super U, ? super U, ? extends U> reducer) {
5918 super(m, p, b); this.nextRight = nextRight;
5919 this.transformer = transformer;
5920 this.reducer = reducer;
5921 }
5922 @SuppressWarnings("unchecked") public final boolean exec() {
5923 final BiFun<? super K, ? super V, ? extends U> transformer =
5924 this.transformer;
5925 final BiFun<? super U, ? super U, ? extends U> reducer =
5926 this.reducer;
5927 if (transformer == null || reducer == null)
5928 return abortOnNullFunction();
5929 try {
5930 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5931 do {} while (!casPending(c = pending, c+1));
5932 (rights = new MapReduceMappingsTask<K,V,U>
5933 (map, this, b >>>= 1, rights, transformer, reducer)).fork();
5934 }
5935 U r = null, u;
5936 Object v;
5937 while ((v = advance()) != null) {
5938 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5939 r = (r == null) ? u : reducer.apply(r, u);
5940 }
5941 result = r;
5942 for (MapReduceMappingsTask<K,V,U> t = this, s;;) {
5943 int c; BulkTask<K,V,?> par; U tr, sr;
5944 if ((c = t.pending) == 0) {
5945 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
5946 if ((sr = s.result) != null)
5947 t.result = ((tr = t.result) == null) ? sr : reducer.apply(tr, sr);
5948 }
5949 if ((par = t.parent) == null ||
5950 !(par instanceof MapReduceMappingsTask)) {
5951 t.quietlyComplete();
5952 break;
5953 }
5954 t = (MapReduceMappingsTask<K,V,U>)par;
5955 }
5956 else if (t.casPending(c, c - 1))
5957 break;
5958 }
5959 } catch (Throwable ex) {
5960 return tryCompleteComputation(ex);
5961 }
5962 for (MapReduceMappingsTask<K,V,U> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
5963 s.exec();
5964 return false;
5965 }
5966 public final U getRawResult() { return result; }
5967 }
5968
5969 @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V>
5970 extends BulkTask<K,V,Double> {
5971 final ObjectToDouble<? super K> transformer;
5972 final DoubleByDoubleToDouble reducer;
5973 final double basis;
5974 double result;
5975 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5976 MapReduceKeysToDoubleTask
5977 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
5978 MapReduceKeysToDoubleTask<K,V> nextRight,
5979 ObjectToDouble<? super K> transformer,
5980 double basis,
5981 DoubleByDoubleToDouble reducer) {
5982 super(m, p, b); this.nextRight = nextRight;
5983 this.transformer = transformer;
5984 this.basis = basis; this.reducer = reducer;
5985 }
5986 @SuppressWarnings("unchecked") public final boolean exec() {
5987 final ObjectToDouble<? super K> transformer =
5988 this.transformer;
5989 final DoubleByDoubleToDouble reducer = this.reducer;
5990 if (transformer == null || reducer == null)
5991 return abortOnNullFunction();
5992 try {
5993 final double id = this.basis;
5994 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
5995 do {} while (!casPending(c = pending, c+1));
5996 (rights = new MapReduceKeysToDoubleTask<K,V>
5997 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
5998 }
5999 double r = id;
6000 while (advance() != null)
6001 r = reducer.apply(r, transformer.apply((K)nextKey));
6002 result = r;
6003 for (MapReduceKeysToDoubleTask<K,V> t = this, s;;) {
6004 int c; BulkTask<K,V,?> par;
6005 if ((c = t.pending) == 0) {
6006 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6007 t.result = reducer.apply(t.result, s.result);
6008 }
6009 if ((par = t.parent) == null ||
6010 !(par instanceof MapReduceKeysToDoubleTask)) {
6011 t.quietlyComplete();
6012 break;
6013 }
6014 t = (MapReduceKeysToDoubleTask<K,V>)par;
6015 }
6016 else if (t.casPending(c, c - 1))
6017 break;
6018 }
6019 } catch (Throwable ex) {
6020 return tryCompleteComputation(ex);
6021 }
6022 for (MapReduceKeysToDoubleTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6023 s.exec();
6024 return false;
6025 }
6026 public final Double getRawResult() { return result; }
6027 }
6028
6029 @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V>
6030 extends BulkTask<K,V,Double> {
6031 final ObjectToDouble<? super V> transformer;
6032 final DoubleByDoubleToDouble reducer;
6033 final double basis;
6034 double result;
6035 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
6036 MapReduceValuesToDoubleTask
6037 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6038 MapReduceValuesToDoubleTask<K,V> nextRight,
6039 ObjectToDouble<? super V> transformer,
6040 double basis,
6041 DoubleByDoubleToDouble reducer) {
6042 super(m, p, b); this.nextRight = nextRight;
6043 this.transformer = transformer;
6044 this.basis = basis; this.reducer = reducer;
6045 }
6046 @SuppressWarnings("unchecked") public final boolean exec() {
6047 final ObjectToDouble<? super V> transformer =
6048 this.transformer;
6049 final DoubleByDoubleToDouble reducer = this.reducer;
6050 if (transformer == null || reducer == null)
6051 return abortOnNullFunction();
6052 try {
6053 final double id = this.basis;
6054 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6055 do {} while (!casPending(c = pending, c+1));
6056 (rights = new MapReduceValuesToDoubleTask<K,V>
6057 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6058 }
6059 double r = id;
6060 Object v;
6061 while ((v = advance()) != null)
6062 r = reducer.apply(r, transformer.apply((V)v));
6063 result = r;
6064 for (MapReduceValuesToDoubleTask<K,V> t = this, s;;) {
6065 int c; BulkTask<K,V,?> par;
6066 if ((c = t.pending) == 0) {
6067 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6068 t.result = reducer.apply(t.result, s.result);
6069 }
6070 if ((par = t.parent) == null ||
6071 !(par instanceof MapReduceValuesToDoubleTask)) {
6072 t.quietlyComplete();
6073 break;
6074 }
6075 t = (MapReduceValuesToDoubleTask<K,V>)par;
6076 }
6077 else if (t.casPending(c, c - 1))
6078 break;
6079 }
6080 } catch (Throwable ex) {
6081 return tryCompleteComputation(ex);
6082 }
6083 for (MapReduceValuesToDoubleTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6084 s.exec();
6085 return false;
6086 }
6087 public final Double getRawResult() { return result; }
6088 }
6089
6090 @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V>
6091 extends BulkTask<K,V,Double> {
6092 final ObjectToDouble<Map.Entry<K,V>> transformer;
6093 final DoubleByDoubleToDouble reducer;
6094 final double basis;
6095 double result;
6096 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
6097 MapReduceEntriesToDoubleTask
6098 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6099 MapReduceEntriesToDoubleTask<K,V> nextRight,
6100 ObjectToDouble<Map.Entry<K,V>> transformer,
6101 double basis,
6102 DoubleByDoubleToDouble reducer) {
6103 super(m, p, b); this.nextRight = nextRight;
6104 this.transformer = transformer;
6105 this.basis = basis; this.reducer = reducer;
6106 }
6107 @SuppressWarnings("unchecked") public final boolean exec() {
6108 final ObjectToDouble<Map.Entry<K,V>> transformer =
6109 this.transformer;
6110 final DoubleByDoubleToDouble reducer = this.reducer;
6111 if (transformer == null || reducer == null)
6112 return abortOnNullFunction();
6113 try {
6114 final double id = this.basis;
6115 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6116 do {} while (!casPending(c = pending, c+1));
6117 (rights = new MapReduceEntriesToDoubleTask<K,V>
6118 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6119 }
6120 double r = id;
6121 Object v;
6122 while ((v = advance()) != null)
6123 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6124 result = r;
6125 for (MapReduceEntriesToDoubleTask<K,V> t = this, s;;) {
6126 int c; BulkTask<K,V,?> par;
6127 if ((c = t.pending) == 0) {
6128 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6129 t.result = reducer.apply(t.result, s.result);
6130 }
6131 if ((par = t.parent) == null ||
6132 !(par instanceof MapReduceEntriesToDoubleTask)) {
6133 t.quietlyComplete();
6134 break;
6135 }
6136 t = (MapReduceEntriesToDoubleTask<K,V>)par;
6137 }
6138 else if (t.casPending(c, c - 1))
6139 break;
6140 }
6141 } catch (Throwable ex) {
6142 return tryCompleteComputation(ex);
6143 }
6144 for (MapReduceEntriesToDoubleTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6145 s.exec();
6146 return false;
6147 }
6148 public final Double getRawResult() { return result; }
6149 }
6150
6151 @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V>
6152 extends BulkTask<K,V,Double> {
6153 final ObjectByObjectToDouble<? super K, ? super V> transformer;
6154 final DoubleByDoubleToDouble reducer;
6155 final double basis;
6156 double result;
6157 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
6158 MapReduceMappingsToDoubleTask
6159 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6160 MapReduceMappingsToDoubleTask<K,V> nextRight,
6161 ObjectByObjectToDouble<? super K, ? super V> transformer,
6162 double basis,
6163 DoubleByDoubleToDouble reducer) {
6164 super(m, p, b); this.nextRight = nextRight;
6165 this.transformer = transformer;
6166 this.basis = basis; this.reducer = reducer;
6167 }
6168 @SuppressWarnings("unchecked") public final boolean exec() {
6169 final ObjectByObjectToDouble<? super K, ? super V> transformer =
6170 this.transformer;
6171 final DoubleByDoubleToDouble reducer = this.reducer;
6172 if (transformer == null || reducer == null)
6173 return abortOnNullFunction();
6174 try {
6175 final double id = this.basis;
6176 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6177 do {} while (!casPending(c = pending, c+1));
6178 (rights = new MapReduceMappingsToDoubleTask<K,V>
6179 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6180 }
6181 double r = id;
6182 Object v;
6183 while ((v = advance()) != null)
6184 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6185 result = r;
6186 for (MapReduceMappingsToDoubleTask<K,V> t = this, s;;) {
6187 int c; BulkTask<K,V,?> par;
6188 if ((c = t.pending) == 0) {
6189 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6190 t.result = reducer.apply(t.result, s.result);
6191 }
6192 if ((par = t.parent) == null ||
6193 !(par instanceof MapReduceMappingsToDoubleTask)) {
6194 t.quietlyComplete();
6195 break;
6196 }
6197 t = (MapReduceMappingsToDoubleTask<K,V>)par;
6198 }
6199 else if (t.casPending(c, c - 1))
6200 break;
6201 }
6202 } catch (Throwable ex) {
6203 return tryCompleteComputation(ex);
6204 }
6205 for (MapReduceMappingsToDoubleTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6206 s.exec();
6207 return false;
6208 }
6209 public final Double getRawResult() { return result; }
6210 }
6211
6212 @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V>
6213 extends BulkTask<K,V,Long> {
6214 final ObjectToLong<? super K> transformer;
6215 final LongByLongToLong reducer;
6216 final long basis;
6217 long result;
6218 MapReduceKeysToLongTask<K,V> rights, nextRight;
6219 MapReduceKeysToLongTask
6220 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6221 MapReduceKeysToLongTask<K,V> nextRight,
6222 ObjectToLong<? super K> transformer,
6223 long basis,
6224 LongByLongToLong reducer) {
6225 super(m, p, b); this.nextRight = nextRight;
6226 this.transformer = transformer;
6227 this.basis = basis; this.reducer = reducer;
6228 }
6229 @SuppressWarnings("unchecked") public final boolean exec() {
6230 final ObjectToLong<? super K> transformer =
6231 this.transformer;
6232 final LongByLongToLong reducer = this.reducer;
6233 if (transformer == null || reducer == null)
6234 return abortOnNullFunction();
6235 try {
6236 final long id = this.basis;
6237 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6238 do {} while (!casPending(c = pending, c+1));
6239 (rights = new MapReduceKeysToLongTask<K,V>
6240 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6241 }
6242 long r = id;
6243 while (advance() != null)
6244 r = reducer.apply(r, transformer.apply((K)nextKey));
6245 result = r;
6246 for (MapReduceKeysToLongTask<K,V> t = this, s;;) {
6247 int c; BulkTask<K,V,?> par;
6248 if ((c = t.pending) == 0) {
6249 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6250 t.result = reducer.apply(t.result, s.result);
6251 }
6252 if ((par = t.parent) == null ||
6253 !(par instanceof MapReduceKeysToLongTask)) {
6254 t.quietlyComplete();
6255 break;
6256 }
6257 t = (MapReduceKeysToLongTask<K,V>)par;
6258 }
6259 else if (t.casPending(c, c - 1))
6260 break;
6261 }
6262 } catch (Throwable ex) {
6263 return tryCompleteComputation(ex);
6264 }
6265 for (MapReduceKeysToLongTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6266 s.exec();
6267 return false;
6268 }
6269 public final Long getRawResult() { return result; }
6270 }
6271
6272 @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V>
6273 extends BulkTask<K,V,Long> {
6274 final ObjectToLong<? super V> transformer;
6275 final LongByLongToLong reducer;
6276 final long basis;
6277 long result;
6278 MapReduceValuesToLongTask<K,V> rights, nextRight;
6279 MapReduceValuesToLongTask
6280 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6281 MapReduceValuesToLongTask<K,V> nextRight,
6282 ObjectToLong<? super V> transformer,
6283 long basis,
6284 LongByLongToLong reducer) {
6285 super(m, p, b); this.nextRight = nextRight;
6286 this.transformer = transformer;
6287 this.basis = basis; this.reducer = reducer;
6288 }
6289 @SuppressWarnings("unchecked") public final boolean exec() {
6290 final ObjectToLong<? super V> transformer =
6291 this.transformer;
6292 final LongByLongToLong reducer = this.reducer;
6293 if (transformer == null || reducer == null)
6294 return abortOnNullFunction();
6295 try {
6296 final long id = this.basis;
6297 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6298 do {} while (!casPending(c = pending, c+1));
6299 (rights = new MapReduceValuesToLongTask<K,V>
6300 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6301 }
6302 long r = id;
6303 Object v;
6304 while ((v = advance()) != null)
6305 r = reducer.apply(r, transformer.apply((V)v));
6306 result = r;
6307 for (MapReduceValuesToLongTask<K,V> t = this, s;;) {
6308 int c; BulkTask<K,V,?> par;
6309 if ((c = t.pending) == 0) {
6310 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6311 t.result = reducer.apply(t.result, s.result);
6312 }
6313 if ((par = t.parent) == null ||
6314 !(par instanceof MapReduceValuesToLongTask)) {
6315 t.quietlyComplete();
6316 break;
6317 }
6318 t = (MapReduceValuesToLongTask<K,V>)par;
6319 }
6320 else if (t.casPending(c, c - 1))
6321 break;
6322 }
6323 } catch (Throwable ex) {
6324 return tryCompleteComputation(ex);
6325 }
6326 for (MapReduceValuesToLongTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6327 s.exec();
6328 return false;
6329 }
6330 public final Long getRawResult() { return result; }
6331 }
6332
6333 @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V>
6334 extends BulkTask<K,V,Long> {
6335 final ObjectToLong<Map.Entry<K,V>> transformer;
6336 final LongByLongToLong reducer;
6337 final long basis;
6338 long result;
6339 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6340 MapReduceEntriesToLongTask
6341 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6342 MapReduceEntriesToLongTask<K,V> nextRight,
6343 ObjectToLong<Map.Entry<K,V>> transformer,
6344 long basis,
6345 LongByLongToLong reducer) {
6346 super(m, p, b); this.nextRight = nextRight;
6347 this.transformer = transformer;
6348 this.basis = basis; this.reducer = reducer;
6349 }
6350 @SuppressWarnings("unchecked") public final boolean exec() {
6351 final ObjectToLong<Map.Entry<K,V>> transformer =
6352 this.transformer;
6353 final LongByLongToLong reducer = this.reducer;
6354 if (transformer == null || reducer == null)
6355 return abortOnNullFunction();
6356 try {
6357 final long id = this.basis;
6358 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6359 do {} while (!casPending(c = pending, c+1));
6360 (rights = new MapReduceEntriesToLongTask<K,V>
6361 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6362 }
6363 long r = id;
6364 Object v;
6365 while ((v = advance()) != null)
6366 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6367 result = r;
6368 for (MapReduceEntriesToLongTask<K,V> t = this, s;;) {
6369 int c; BulkTask<K,V,?> par;
6370 if ((c = t.pending) == 0) {
6371 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6372 t.result = reducer.apply(t.result, s.result);
6373 }
6374 if ((par = t.parent) == null ||
6375 !(par instanceof MapReduceEntriesToLongTask)) {
6376 t.quietlyComplete();
6377 break;
6378 }
6379 t = (MapReduceEntriesToLongTask<K,V>)par;
6380 }
6381 else if (t.casPending(c, c - 1))
6382 break;
6383 }
6384 } catch (Throwable ex) {
6385 return tryCompleteComputation(ex);
6386 }
6387 for (MapReduceEntriesToLongTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6388 s.exec();
6389 return false;
6390 }
6391 public final Long getRawResult() { return result; }
6392 }
6393
6394 @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V>
6395 extends BulkTask<K,V,Long> {
6396 final ObjectByObjectToLong<? super K, ? super V> transformer;
6397 final LongByLongToLong reducer;
6398 final long basis;
6399 long result;
6400 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6401 MapReduceMappingsToLongTask
6402 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6403 MapReduceMappingsToLongTask<K,V> nextRight,
6404 ObjectByObjectToLong<? super K, ? super V> transformer,
6405 long basis,
6406 LongByLongToLong reducer) {
6407 super(m, p, b); this.nextRight = nextRight;
6408 this.transformer = transformer;
6409 this.basis = basis; this.reducer = reducer;
6410 }
6411 @SuppressWarnings("unchecked") public final boolean exec() {
6412 final ObjectByObjectToLong<? super K, ? super V> transformer =
6413 this.transformer;
6414 final LongByLongToLong reducer = this.reducer;
6415 if (transformer == null || reducer == null)
6416 return abortOnNullFunction();
6417 try {
6418 final long id = this.basis;
6419 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6420 do {} while (!casPending(c = pending, c+1));
6421 (rights = new MapReduceMappingsToLongTask<K,V>
6422 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6423 }
6424 long r = id;
6425 Object v;
6426 while ((v = advance()) != null)
6427 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6428 result = r;
6429 for (MapReduceMappingsToLongTask<K,V> t = this, s;;) {
6430 int c; BulkTask<K,V,?> par;
6431 if ((c = t.pending) == 0) {
6432 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6433 t.result = reducer.apply(t.result, s.result);
6434 }
6435 if ((par = t.parent) == null ||
6436 !(par instanceof MapReduceMappingsToLongTask)) {
6437 t.quietlyComplete();
6438 break;
6439 }
6440 t = (MapReduceMappingsToLongTask<K,V>)par;
6441 }
6442 else if (t.casPending(c, c - 1))
6443 break;
6444 }
6445 } catch (Throwable ex) {
6446 return tryCompleteComputation(ex);
6447 }
6448 for (MapReduceMappingsToLongTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6449 s.exec();
6450 return false;
6451 }
6452 public final Long getRawResult() { return result; }
6453 }
6454
6455 @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V>
6456 extends BulkTask<K,V,Integer> {
6457 final ObjectToInt<? super K> transformer;
6458 final IntByIntToInt reducer;
6459 final int basis;
6460 int result;
6461 MapReduceKeysToIntTask<K,V> rights, nextRight;
6462 MapReduceKeysToIntTask
6463 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6464 MapReduceKeysToIntTask<K,V> nextRight,
6465 ObjectToInt<? super K> transformer,
6466 int basis,
6467 IntByIntToInt reducer) {
6468 super(m, p, b); this.nextRight = nextRight;
6469 this.transformer = transformer;
6470 this.basis = basis; this.reducer = reducer;
6471 }
6472 @SuppressWarnings("unchecked") public final boolean exec() {
6473 final ObjectToInt<? super K> transformer =
6474 this.transformer;
6475 final IntByIntToInt reducer = this.reducer;
6476 if (transformer == null || reducer == null)
6477 return abortOnNullFunction();
6478 try {
6479 final int id = this.basis;
6480 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6481 do {} while (!casPending(c = pending, c+1));
6482 (rights = new MapReduceKeysToIntTask<K,V>
6483 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6484 }
6485 int r = id;
6486 while (advance() != null)
6487 r = reducer.apply(r, transformer.apply((K)nextKey));
6488 result = r;
6489 for (MapReduceKeysToIntTask<K,V> t = this, s;;) {
6490 int c; BulkTask<K,V,?> par;
6491 if ((c = t.pending) == 0) {
6492 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6493 t.result = reducer.apply(t.result, s.result);
6494 }
6495 if ((par = t.parent) == null ||
6496 !(par instanceof MapReduceKeysToIntTask)) {
6497 t.quietlyComplete();
6498 break;
6499 }
6500 t = (MapReduceKeysToIntTask<K,V>)par;
6501 }
6502 else if (t.casPending(c, c - 1))
6503 break;
6504 }
6505 } catch (Throwable ex) {
6506 return tryCompleteComputation(ex);
6507 }
6508 for (MapReduceKeysToIntTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6509 s.exec();
6510 return false;
6511 }
6512 public final Integer getRawResult() { return result; }
6513 }
6514
6515 @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V>
6516 extends BulkTask<K,V,Integer> {
6517 final ObjectToInt<? super V> transformer;
6518 final IntByIntToInt reducer;
6519 final int basis;
6520 int result;
6521 MapReduceValuesToIntTask<K,V> rights, nextRight;
6522 MapReduceValuesToIntTask
6523 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6524 MapReduceValuesToIntTask<K,V> nextRight,
6525 ObjectToInt<? super V> transformer,
6526 int basis,
6527 IntByIntToInt reducer) {
6528 super(m, p, b); this.nextRight = nextRight;
6529 this.transformer = transformer;
6530 this.basis = basis; this.reducer = reducer;
6531 }
6532 @SuppressWarnings("unchecked") public final boolean exec() {
6533 final ObjectToInt<? super V> transformer =
6534 this.transformer;
6535 final IntByIntToInt reducer = this.reducer;
6536 if (transformer == null || reducer == null)
6537 return abortOnNullFunction();
6538 try {
6539 final int id = this.basis;
6540 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6541 do {} while (!casPending(c = pending, c+1));
6542 (rights = new MapReduceValuesToIntTask<K,V>
6543 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6544 }
6545 int r = id;
6546 Object v;
6547 while ((v = advance()) != null)
6548 r = reducer.apply(r, transformer.apply((V)v));
6549 result = r;
6550 for (MapReduceValuesToIntTask<K,V> t = this, s;;) {
6551 int c; BulkTask<K,V,?> par;
6552 if ((c = t.pending) == 0) {
6553 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6554 t.result = reducer.apply(t.result, s.result);
6555 }
6556 if ((par = t.parent) == null ||
6557 !(par instanceof MapReduceValuesToIntTask)) {
6558 t.quietlyComplete();
6559 break;
6560 }
6561 t = (MapReduceValuesToIntTask<K,V>)par;
6562 }
6563 else if (t.casPending(c, c - 1))
6564 break;
6565 }
6566 } catch (Throwable ex) {
6567 return tryCompleteComputation(ex);
6568 }
6569 for (MapReduceValuesToIntTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6570 s.exec();
6571 return false;
6572 }
6573 public final Integer getRawResult() { return result; }
6574 }
6575
6576 @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V>
6577 extends BulkTask<K,V,Integer> {
6578 final ObjectToInt<Map.Entry<K,V>> transformer;
6579 final IntByIntToInt reducer;
6580 final int basis;
6581 int result;
6582 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6583 MapReduceEntriesToIntTask
6584 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6585 MapReduceEntriesToIntTask<K,V> nextRight,
6586 ObjectToInt<Map.Entry<K,V>> transformer,
6587 int basis,
6588 IntByIntToInt reducer) {
6589 super(m, p, b); this.nextRight = nextRight;
6590 this.transformer = transformer;
6591 this.basis = basis; this.reducer = reducer;
6592 }
6593 @SuppressWarnings("unchecked") public final boolean exec() {
6594 final ObjectToInt<Map.Entry<K,V>> transformer =
6595 this.transformer;
6596 final IntByIntToInt reducer = this.reducer;
6597 if (transformer == null || reducer == null)
6598 return abortOnNullFunction();
6599 try {
6600 final int id = this.basis;
6601 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6602 do {} while (!casPending(c = pending, c+1));
6603 (rights = new MapReduceEntriesToIntTask<K,V>
6604 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6605 }
6606 int r = id;
6607 Object v;
6608 while ((v = advance()) != null)
6609 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, (V)v)));
6610 result = r;
6611 for (MapReduceEntriesToIntTask<K,V> t = this, s;;) {
6612 int c; BulkTask<K,V,?> par;
6613 if ((c = t.pending) == 0) {
6614 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6615 t.result = reducer.apply(t.result, s.result);
6616 }
6617 if ((par = t.parent) == null ||
6618 !(par instanceof MapReduceEntriesToIntTask)) {
6619 t.quietlyComplete();
6620 break;
6621 }
6622 t = (MapReduceEntriesToIntTask<K,V>)par;
6623 }
6624 else if (t.casPending(c, c - 1))
6625 break;
6626 }
6627 } catch (Throwable ex) {
6628 return tryCompleteComputation(ex);
6629 }
6630 for (MapReduceEntriesToIntTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6631 s.exec();
6632 return false;
6633 }
6634 public final Integer getRawResult() { return result; }
6635 }
6636
6637 @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V>
6638 extends BulkTask<K,V,Integer> {
6639 final ObjectByObjectToInt<? super K, ? super V> transformer;
6640 final IntByIntToInt reducer;
6641 final int basis;
6642 int result;
6643 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6644 MapReduceMappingsToIntTask
6645 (ConcurrentHashMapV8<K,V> m, BulkTask<K,V,?> p, int b,
6646 MapReduceMappingsToIntTask<K,V> rights,
6647 ObjectByObjectToInt<? super K, ? super V> transformer,
6648 int basis,
6649 IntByIntToInt reducer) {
6650 super(m, p, b); this.nextRight = nextRight;
6651 this.transformer = transformer;
6652 this.basis = basis; this.reducer = reducer;
6653 }
6654 @SuppressWarnings("unchecked") public final boolean exec() {
6655 final ObjectByObjectToInt<? super K, ? super V> transformer =
6656 this.transformer;
6657 final IntByIntToInt reducer = this.reducer;
6658 if (transformer == null || reducer == null)
6659 return abortOnNullFunction();
6660 try {
6661 final int id = this.basis;
6662 for (int c, b = batch(); b > 1 && baseIndex != baseLimit;) {
6663 do {} while (!casPending(c = pending, c+1));
6664 (rights = new MapReduceMappingsToIntTask<K,V>
6665 (map, this, b >>>= 1, rights, transformer, id, reducer)).fork();
6666 }
6667 int r = id;
6668 Object v;
6669 while ((v = advance()) != null)
6670 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6671 result = r;
6672 for (MapReduceMappingsToIntTask<K,V> t = this, s;;) {
6673 int c; BulkTask<K,V,?> par;
6674 if ((c = t.pending) == 0) {
6675 for (s = t.rights; s != null; s = t.rights = s.nextRight) {
6676 t.result = reducer.apply(t.result, s.result);
6677 }
6678 if ((par = t.parent) == null ||
6679 !(par instanceof MapReduceMappingsToIntTask)) {
6680 t.quietlyComplete();
6681 break;
6682 }
6683 t = (MapReduceMappingsToIntTask<K,V>)par;
6684 }
6685 else if (t.casPending(c, c - 1))
6686 break;
6687 }
6688 } catch (Throwable ex) {
6689 return tryCompleteComputation(ex);
6690 }
6691 for (MapReduceMappingsToIntTask<K,V> s = rights; s != null && s.tryUnfork(); s = s.nextRight)
6692 s.exec();
6693 return false;
6694 }
6695 public final Integer getRawResult() { return result; }
6696 }
6697
6698 // Unsafe mechanics
6699 private static final sun.misc.Unsafe UNSAFE;
6700 private static final long counterOffset;
6701 private static final long sizeCtlOffset;
6702 private static final long ABASE;
6703 private static final int ASHIFT;
6704
6705 static {
6706 int ss;
6707 try {
6708 UNSAFE = getUnsafe();
6709 Class<?> k = ConcurrentHashMapV8.class;
6710 counterOffset = UNSAFE.objectFieldOffset
6711 (k.getDeclaredField("counter"));
6712 sizeCtlOffset = UNSAFE.objectFieldOffset
6713 (k.getDeclaredField("sizeCtl"));
6714 Class<?> sc = Node[].class;
6715 ABASE = UNSAFE.arrayBaseOffset(sc);
6716 ss = UNSAFE.arrayIndexScale(sc);
6717 } catch (Exception e) {
6718 throw new Error(e);
6719 }
6720 if ((ss & (ss-1)) != 0)
6721 throw new Error("data type scale not a power of two");
6722 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
6723 }
6724
6725 /**
6726 * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
6727 * Replace with a simple call to Unsafe.getUnsafe when integrating
6728 * into a jdk.
6729 *
6730 * @return a sun.misc.Unsafe
6731 */
6732 private static sun.misc.Unsafe getUnsafe() {
6733 try {
6734 return sun.misc.Unsafe.getUnsafe();
6735 } catch (SecurityException se) {
6736 try {
6737 return java.security.AccessController.doPrivileged
6738 (new java.security
6739 .PrivilegedExceptionAction<sun.misc.Unsafe>() {
6740 public sun.misc.Unsafe run() throws Exception {
6741 java.lang.reflect.Field f = sun.misc
6742 .Unsafe.class.getDeclaredField("theUnsafe");
6743 f.setAccessible(true);
6744 return (sun.misc.Unsafe) f.get(null);
6745 }});
6746 } catch (java.security.PrivilegedActionException e) {
6747 throw new RuntimeException("Could not initialize intrinsics",
6748 e.getCause());
6749 }
6750 }
6751 }
6752 }