ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
Revision: 1.149
Committed: Thu Dec 13 20:34:05 2012 UTC (11 years, 5 months ago) by dl
Branch: MAIN
Changes since 1.148: +1316 -1454 lines
Log Message:
Cooperative resizing, plus other fixes and improvements

File Contents

# Content
1 /*
2 * Written by Doug Lea with assistance from members of JCP JSR-166
3 * Expert Group and released to the public domain, as explained at
4 * http://creativecommons.org/publicdomain/zero/1.0/
5 */
6
7 package java.util.concurrent;
8 import java.util.concurrent.ForkJoinPool;
9 import java.util.concurrent.CountedCompleter;
10
11 import java.util.Comparator;
12 import java.util.Arrays;
13 import java.util.Map;
14 import java.util.Set;
15 import java.util.Collection;
16 import java.util.AbstractMap;
17 import java.util.AbstractSet;
18 import java.util.AbstractCollection;
19 import java.util.Hashtable;
20 import java.util.HashMap;
21 import java.util.Iterator;
22 import java.util.Enumeration;
23 import java.util.ConcurrentModificationException;
24 import java.util.NoSuchElementException;
25 import java.util.concurrent.ConcurrentMap;
26 import java.util.concurrent.locks.AbstractQueuedSynchronizer;
27 import java.util.concurrent.atomic.AtomicInteger;
28 import java.util.concurrent.atomic.AtomicReference;
29 import java.io.Serializable;
30
31 /**
32 * A hash table supporting full concurrency of retrievals and
33 * high expected concurrency for updates. This class obeys the
34 * same functional specification as {@link java.util.Hashtable}, and
35 * includes versions of methods corresponding to each method of
36 * {@code Hashtable}. However, even though all operations are
37 * thread-safe, retrieval operations do <em>not</em> entail locking,
38 * and there is <em>not</em> any support for locking the entire table
39 * in a way that prevents all access. This class is fully
40 * interoperable with {@code Hashtable} in programs that rely on its
41 * thread safety but not on its synchronization details.
42 *
43 * <p>Retrieval operations (including {@code get}) generally do not
44 * block, so may overlap with update operations (including {@code put}
45 * and {@code remove}). Retrievals reflect the results of the most
46 * recently <em>completed</em> update operations holding upon their
47 * onset. (More formally, an update operation for a given key bears a
48 * <em>happens-before</em> relation with any (non-null) retrieval for
49 * that key reporting the updated value.) For aggregate operations
50 * such as {@code putAll} and {@code clear}, concurrent retrievals may
51 * reflect insertion or removal of only some entries. Similarly,
52 * Iterators and Enumerations return elements reflecting the state of
53 * the hash table at some point at or since the creation of the
54 * iterator/enumeration. They do <em>not</em> throw {@link
55 * ConcurrentModificationException}. However, iterators are designed
56 * to be used by only one thread at a time. Bear in mind that the
57 * results of aggregate status methods including {@code size}, {@code
58 * isEmpty}, and {@code containsValue} are typically useful only when
59 * a map is not undergoing concurrent updates in other threads.
60 * Otherwise the results of these methods reflect transient states
61 * that may be adequate for monitoring or estimation purposes, but not
62 * for program control.
63 *
64 * <p>The table is dynamically expanded when there are too many
65 * collisions (i.e., keys that have distinct hash codes but fall into
66 * the same slot modulo the table size), with the expected average
67 * effect of maintaining roughly two bins per mapping (corresponding
68 * to a 0.75 load factor threshold for resizing). There may be much
69 * variance around this average as mappings are added and removed, but
70 * overall, this maintains a commonly accepted time/space tradeoff for
71 * hash tables. However, resizing this or any other kind of hash
72 * table may be a relatively slow operation. When possible, it is a
73 * good idea to provide a size estimate as an optional {@code
74 * initialCapacity} constructor argument. An additional optional
75 * {@code loadFactor} constructor argument provides a further means of
76 * customizing initial table capacity by specifying the table density
77 * to be used in calculating the amount of space to allocate for the
78 * given number of elements. Also, for compatibility with previous
79 * versions of this class, constructors may optionally specify an
80 * expected {@code concurrencyLevel} as an additional hint for
81 * internal sizing. Note that using many keys with exactly the same
82 * {@code hashCode()} is a sure way to slow down performance of any
83 * hash table.
84 *
85 * <p>A {@link Set} projection of a ConcurrentHashMap may be created
86 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
87 * (using {@link #keySet(Object)} when only keys are of interest, and the
88 * mapped values are (perhaps transiently) not used or all take the
89 * same mapping value.
90 *
91 * <p>A ConcurrentHashMap can be used as scalable frequency map (a
92 * form of histogram or multiset) by using {@link LongAdder} values
93 * and initializing via {@link #computeIfAbsent}. For example, to add
94 * a count to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you
95 * can use {@code freqs.computeIfAbsent(k -> new
96 * LongAdder()).increment();}
97 *
98 * <p>This class and its views and iterators implement all of the
99 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
100 * interfaces.
101 *
102 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
103 * does <em>not</em> allow {@code null} to be used as a key or value.
104 *
105 * <p>ConcurrentHashMaps support parallel operations using the {@link
106 * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
107 * are available in class {@link ForkJoinTasks}). These operations are
108 * designed to be safely, and often sensibly, applied even with maps
109 * that are being concurrently updated by other threads; for example,
110 * when computing a snapshot summary of the values in a shared
111 * registry. There are three kinds of operation, each with four
112 * forms, accepting functions with Keys, Values, Entries, and (Key,
113 * Value) arguments and/or return values. (The first three forms are
114 * also available via the {@link #keySet()}, {@link #values()} and
115 * {@link #entrySet()} views). Because the elements of a
116 * ConcurrentHashMap are not ordered in any particular way, and may be
117 * processed in different orders in different parallel executions, the
118 * correctness of supplied functions should not depend on any
119 * ordering, or on any other objects or values that may transiently
120 * change while computation is in progress; and except for forEach
121 * actions, should ideally be side-effect-free.
122 *
123 * <ul>
124 * <li> forEach: Perform a given action on each element.
125 * A variant form applies a given transformation on each element
126 * before performing the action.</li>
127 *
128 * <li> search: Return the first available non-null result of
129 * applying a given function on each element; skipping further
130 * search when a result is found.</li>
131 *
132 * <li> reduce: Accumulate each element. The supplied reduction
133 * function cannot rely on ordering (more formally, it should be
134 * both associative and commutative). There are five variants:
135 *
136 * <ul>
137 *
138 * <li> Plain reductions. (There is not a form of this method for
139 * (key, value) function arguments since there is no corresponding
140 * return type.)</li>
141 *
142 * <li> Mapped reductions that accumulate the results of a given
143 * function applied to each element.</li>
144 *
145 * <li> Reductions to scalar doubles, longs, and ints, using a
146 * given basis value.</li>
147 *
148 * </li>
149 * </ul>
150 * </ul>
151 *
152 * <p>The concurrency properties of bulk operations follow
153 * from those of ConcurrentHashMap: Any non-null result returned
154 * from {@code get(key)} and related access methods bears a
155 * happens-before relation with the associated insertion or
156 * update. The result of any bulk operation reflects the
157 * composition of these per-element relations (but is not
158 * necessarily atomic with respect to the map as a whole unless it
159 * is somehow known to be quiescent). Conversely, because keys
160 * and values in the map are never null, null serves as a reliable
161 * atomic indicator of the current lack of any result. To
162 * maintain this property, null serves as an implicit basis for
163 * all non-scalar reduction operations. For the double, long, and
164 * int versions, the basis should be one that, when combined with
165 * any other value, returns that other value (more formally, it
166 * should be the identity element for the reduction). Most common
167 * reductions have these properties; for example, computing a sum
168 * with basis 0 or a minimum with basis MAX_VALUE.
169 *
170 * <p>Search and transformation functions provided as arguments
171 * should similarly return null to indicate the lack of any result
172 * (in which case it is not used). In the case of mapped
173 * reductions, this also enables transformations to serve as
174 * filters, returning null (or, in the case of primitive
175 * specializations, the identity basis) if the element should not
176 * be combined. You can create compound transformations and
177 * filterings by composing them yourself under this "null means
178 * there is nothing there now" rule before using them in search or
179 * reduce operations.
180 *
181 * <p>Methods accepting and/or returning Entry arguments maintain
182 * key-value associations. They may be useful for example when
183 * finding the key for the greatest value. Note that "plain" Entry
184 * arguments can be supplied using {@code new
185 * AbstractMap.SimpleEntry(k,v)}.
186 *
187 * <p>Bulk operations may complete abruptly, throwing an
188 * exception encountered in the application of a supplied
189 * function. Bear in mind when handling such exceptions that other
190 * concurrently executing functions could also have thrown
191 * exceptions, or would have done so if the first exception had
192 * not occurred.
193 *
194 * <p>Parallel speedups for bulk operations compared to sequential
195 * processing are common but not guaranteed. Operations involving
196 * brief functions on small maps may execute more slowly than
197 * sequential loops if the underlying work to parallelize the
198 * computation is more expensive than the computation itself.
199 * Similarly, parallelization may not lead to much actual parallelism
200 * if all processors are busy performing unrelated tasks.
201 *
202 * <p>All arguments to all task methods must be non-null.
203 *
204 * <p><em>jsr166e note: During transition, this class
205 * uses nested functional interfaces with different names but the
206 * same forms as those expected for JDK8.</em>
207 *
208 * <p>This class is a member of the
209 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
210 * Java Collections Framework</a>.
211 *
212 * @since 1.5
213 * @author Doug Lea
214 * @param <K> the type of keys maintained by this map
215 * @param <V> the type of mapped values
216 */
217 public class ConcurrentHashMap<K, V>
218 implements ConcurrentMap<K, V>, Serializable {
219 private static final long serialVersionUID = 7249069246763182397L;
220
221 /**
222 * A partitionable iterator. A Spliterator can be traversed
223 * directly, but can also be partitioned (before traversal) by
224 * creating another Spliterator that covers a non-overlapping
225 * portion of the elements, and so may be amenable to parallel
226 * execution.
227 *
228 * <p>This interface exports a subset of expected JDK8
229 * functionality.
230 *
231 * <p>Sample usage: Here is one (of the several) ways to compute
232 * the sum of the values held in a map using the ForkJoin
233 * framework. As illustrated here, Spliterators are well suited to
234 * designs in which a task repeatedly splits off half its work
235 * into forked subtasks until small enough to process directly,
236 * and then joins these subtasks. Variants of this style can also
237 * be used in completion-based designs.
238 *
239 * <pre>
240 * {@code ConcurrentHashMap<String, Long> m = ...
241 * // split as if have 8 * parallelism, for load balance
242 * int n = m.size();
243 * int p = aForkJoinPool.getParallelism() * 8;
244 * int split = (n < p)? n : p;
245 * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
246 * // ...
247 * static class SumValues extends RecursiveTask<Long> {
248 * final Spliterator<Long> s;
249 * final int split; // split while > 1
250 * final SumValues nextJoin; // records forked subtasks to join
251 * SumValues(Spliterator<Long> s, int depth, SumValues nextJoin) {
252 * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
253 * }
254 * public Long compute() {
255 * long sum = 0;
256 * SumValues subtasks = null; // fork subtasks
257 * for (int s = split >>> 1; s > 0; s >>>= 1)
258 * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
259 * while (s.hasNext()) // directly process remaining elements
260 * sum += s.next();
261 * for (SumValues t = subtasks; t != null; t = t.nextJoin)
262 * sum += t.join(); // collect subtask results
263 * return sum;
264 * }
265 * }
266 * }</pre>
267 */
268 public static interface Spliterator<T> extends Iterator<T> {
269 /**
270 * Returns a Spliterator covering approximately half of the
271 * elements, guaranteed not to overlap with those subsequently
272 * returned by this Spliterator. After invoking this method,
273 * the current Spliterator will <em>not</em> produce any of
274 * the elements of the returned Spliterator, but the two
275 * Spliterators together will produce all of the elements that
276 * would have been produced by this Spliterator had this
277 * method not been called. The exact number of elements
278 * produced by the returned Spliterator is not guaranteed, and
279 * may be zero (i.e., with {@code hasNext()} reporting {@code
280 * false}) if this Spliterator cannot be further split.
281 *
282 * @return a Spliterator covering approximately half of the
283 * elements
284 * @throws IllegalStateException if this Spliterator has
285 * already commenced traversing elements
286 */
287 Spliterator<T> split();
288 }
289
290 /*
291 * Overview:
292 *
293 * The primary design goal of this hash table is to maintain
294 * concurrent readability (typically method get(), but also
295 * iterators and related methods) while minimizing update
296 * contention. Secondary goals are to keep space consumption about
297 * the same or better than java.util.HashMap, and to support high
298 * initial insertion rates on an empty table by many threads.
299 *
300 * Each key-value mapping is held in a Node. Because Node fields
301 * can contain special values, they are defined using plain Object
302 * types. Similarly in turn, all internal methods that use them
303 * work off Object types. And similarly, so do the internal
304 * methods of auxiliary iterator and view classes. This also
305 * allows many of the public methods to be factored into a smaller
306 * number of internal methods (although sadly not so for the five
307 * variants of put-related operations). The validation-based
308 * approach explained below leads to a lot of code sprawl because
309 * retry-control precludes factoring into smaller methods.
310 *
311 * The table is lazily initialized to a power-of-two size upon the
312 * first insertion. Each bin in the table normally contains a
313 * list of Nodes (most often, the list has only zero or one Node).
314 * Table accesses require volatile/atomic reads, writes, and
315 * CASes. Because there is no other way to arrange this without
316 * adding further indirections, we use intrinsics
317 * (sun.misc.Unsafe) operations. The lists of nodes within bins
318 * are always accurately traversable under volatile reads, so long
319 * as lookups check hash code and non-nullness of value before
320 * checking key equality.
321 *
322 * We use the top (sign) bit of Node hash fields for control
323 * purposes -- it is available anyway because of addressing
324 * constraints. Nodes with negative hash fields are forwarding
325 * nodes to either TreeBins or resized tables. The lower 31 bits
326 * of each normal Node's hash field contain a transformation of
327 * the key's hash code.
328 *
329 * Insertion (via put or its variants) of the first node in an
330 * empty bin is performed by just CASing it to the bin. This is
331 * by far the most common case for put operations under most
332 * key/hash distributions. Other update operations (insert,
333 * delete, and replace) require locks. We do not want to waste
334 * the space required to associate a distinct lock object with
335 * each bin, so instead use the first node of a bin list itself as
336 * a lock. Locking support for these locks relies on builtin
337 * "synchronized" monitors.
338 *
339 * Using the first node of a list as a lock does not by itself
340 * suffice though: When a node is locked, any update must first
341 * validate that it is still the first node after locking it, and
342 * retry if not. Because new nodes are always appended to lists,
343 * once a node is first in a bin, it remains first until deleted
344 * or the bin becomes invalidated (upon resizing). However,
345 * operations that only conditionally update may inspect nodes
346 * until the point of update. This is a converse of sorts to the
347 * lazy locking technique described by Herlihy & Shavit.
348 *
349 * The main disadvantage of per-bin locks is that other update
350 * operations on other nodes in a bin list protected by the same
351 * lock can stall, for example when user equals() or mapping
352 * functions take a long time. However, statistically, under
353 * random hash codes, this is not a common problem. Ideally, the
354 * frequency of nodes in bins follows a Poisson distribution
355 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
356 * parameter of about 0.5 on average, given the resizing threshold
357 * of 0.75, although with a large variance because of resizing
358 * granularity. Ignoring variance, the expected occurrences of
359 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
360 * first values are:
361 *
362 * 0: 0.60653066
363 * 1: 0.30326533
364 * 2: 0.07581633
365 * 3: 0.01263606
366 * 4: 0.00157952
367 * 5: 0.00015795
368 * 6: 0.00001316
369 * 7: 0.00000094
370 * 8: 0.00000006
371 * more: less than 1 in ten million
372 *
373 * Lock contention probability for two threads accessing distinct
374 * elements is roughly 1 / (8 * #elements) under random hashes.
375 *
376 * Actual hash code distributions encountered in practice
377 * sometimes deviate significantly from uniform randomness. This
378 * includes the case when N > (1<<30), so some keys MUST collide.
379 * Similarly for dumb or hostile usages in which multiple keys are
380 * designed to have identical hash codes. Also, although we guard
381 * against the worst effects of this (see method spread), sets of
382 * hashes may differ only in bits that do not impact their bin
383 * index for a given power-of-two mask. So we use a secondary
384 * strategy that applies when the number of nodes in a bin exceeds
385 * a threshold, and at least one of the keys implements
386 * Comparable. These TreeBins use a balanced tree to hold nodes
387 * (a specialized form of red-black trees), bounding search time
388 * to O(log N). Each search step in a TreeBin is around twice as
389 * slow as in a regular list, but given that N cannot exceed
390 * (1<<64) (before running out of addresses) this bounds search
391 * steps, lock hold times, etc, to reasonable constants (roughly
392 * 100 nodes inspected per operation worst case) so long as keys
393 * are Comparable (which is very common -- String, Long, etc).
394 * TreeBin nodes (TreeNodes) also maintain the same "next"
395 * traversal pointers as regular nodes, so can be traversed in
396 * iterators in the same way.
397 *
398 * The table is resized when occupancy exceeds a percentage
399 * threshold (nominally, 0.75, but see below). Any thread
400 * noticing an overfull bin may assist in resizing after the
401 * initiating thread allocates and sets up the replacement
402 * array. However, rather than stalling, these other threads may
403 * proceed with insertions etc. The use of TreeBins shields us
404 * from the worst case effects of overfilling while resizes are in
405 * progress. Resizing proceeds by transferring bins, one by one,
406 * from the table to the next table. To enable concurrency, the
407 * next table must be (incrementally) prefilled with place-holders
408 * serving as reverse forwarders to the old table. Because we are
409 * using power-of-two expansion, the elements from each bin must
410 * either stay at same index, or move with a power of two
411 * offset. We eliminate unnecessary node creation by catching
412 * cases where old nodes can be reused because their next fields
413 * won't change. On average, only about one-sixth of them need
414 * cloning when a table doubles. The nodes they replace will be
415 * garbage collectable as soon as they are no longer referenced by
416 * any reader thread that may be in the midst of concurrently
417 * traversing table. Upon transfer, the old table bin contains
418 * only a special forwarding node (with hash field "MOVED") that
419 * contains the next table as its key. On encountering a
420 * forwarding node, access and update operations restart, using
421 * the new table.
422 *
423 * Each bin transfer requires its bin lock, which can stall
424 * waiting for locks while resizing. However, because other
425 * threads can join in and help resize rather than contend for
426 * locks, average aggregate waits become shorter as resizing
427 * progresses. The transfer operation must also ensure that all
428 * accessible bins in both the old and new table are usable by any
429 * traversal. This is arranged by proceeding from the last bin
430 * (table.length - 1) up towards the first. Upon seeing a
431 * forwarding node, traversals (see class Traverser) arrange to
432 * move to the new table without revisiting nodes. However, to
433 * ensure that no intervening nodes are skipped, bin splitting can
434 * only begin after the associated reverse-forwarders are in
435 * place.
436 *
437 * The traversal scheme also applies to partial traversals of
438 * ranges of bins (via an alternate Traverser constructor)
439 * to support partitioned aggregate operations. Also, read-only
440 * operations give up if ever forwarded to a null table, which
441 * provides support for shutdown-style clearing, which is also not
442 * currently implemented.
443 *
444 * Lazy table initialization minimizes footprint until first use,
445 * and also avoids resizings when the first operation is from a
446 * putAll, constructor with map argument, or deserialization.
447 * These cases attempt to override the initial capacity settings,
448 * but harmlessly fail to take effect in cases of races.
449 *
450 * The element count is maintained using a specialization of
451 * LongAdder. We need to incorporate a specialization rather than
452 * just use a LongAdder in order to access implicit
453 * contention-sensing that leads to creation of multiple
454 * CounterCells. The counter mechanics avoid contention on
455 * updates but can encounter cache thrashing if read too
456 * frequently during concurrent access. To avoid reading so often,
457 * resizing under contention is attempted only upon adding to a
458 * bin already holding two or more nodes. Under uniform hash
459 * distributions, the probability of this occurring at threshold
460 * is around 13%, meaning that only about 1 in 8 puts check
461 * threshold (and after resizing, many fewer do so). The bulk
462 * putAll operation further reduces contention by only committing
463 * count updates upon these size checks.
464 *
465 * Maintaining API and serialization compatibility with previous
466 * versions of this class introduces several oddities. Mainly: We
467 * leave untouched but unused constructor arguments refering to
468 * concurrencyLevel. We accept a loadFactor constructor argument,
469 * but apply it only to initial table capacity (which is the only
470 * time that we can guarantee to honor it.) We also declare an
471 * unused "Segment" class that is instantiated in minimal form
472 * only when serializing.
473 */
474
475 /* ---------------- Constants -------------- */
476
477 /**
478 * The largest possible table capacity. This value must be
479 * exactly 1<<30 to stay within Java array allocation and indexing
480 * bounds for power of two table sizes, and is further required
481 * because the top two bits of 32bit hash fields are used for
482 * control purposes.
483 */
484 private static final int MAXIMUM_CAPACITY = 1 << 30;
485
486 /**
487 * The default initial table capacity. Must be a power of 2
488 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
489 */
490 private static final int DEFAULT_CAPACITY = 16;
491
492 /**
493 * The largest possible (non-power of two) array size.
494 * Needed by toArray and related methods.
495 */
496 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
497
498 /**
499 * The default concurrency level for this table. Unused but
500 * defined for compatibility with previous versions of this class.
501 */
502 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
503
504 /**
505 * The load factor for this table. Overrides of this value in
506 * constructors affect only the initial table capacity. The
507 * actual floating point value isn't normally used -- it is
508 * simpler to use expressions such as {@code n - (n >>> 2)} for
509 * the associated resizing threshold.
510 */
511 private static final float LOAD_FACTOR = 0.75f;
512
513 /**
514 * The bin count threshold for using a tree rather than list for a
515 * bin. The value reflects the approximate break-even point for
516 * using tree-based operations.
517 */
518 private static final int TREE_THRESHOLD = 8;
519
520 /**
521 * Minimum number of rebinnings per transfer step. Ranges are
522 * subdivided to allow multiple resizer threads. This value
523 * serves as a lower bound to avoid resizers encountering
524 * excessive memory contention. The value should be at least
525 * DEFAULT_CAPACITY.
526 */
527 private static final int MIN_TRANSFER_STRIDE = 16;
528
529 /*
530 * Encodings for Node hash fields. See above for explanation.
531 */
532 static final int MOVED = 0x80000000; // hash field for forwarding nodes
533 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
534
535 /** Number of CPUS, to place bounds on some sizings */
536 static final int NCPU = Runtime.getRuntime().availableProcessors();
537
538 /* ---------------- Counters -------------- */
539
540 // Adapted from LongAdder and Striped64.
541 // See their internal docs for explanation.
542
543 // A padded cell for distributing counts
544 static final class CounterCell {
545 volatile long p0, p1, p2, p3, p4, p5, p6;
546 volatile long value;
547 volatile long q0, q1, q2, q3, q4, q5, q6;
548 CounterCell(long x) { value = x; }
549 }
550
551 /**
552 * Holder for the thread-local hash code determining which
553 * CounterCell to use. The code is initialized via the
554 * counterHashCodeGenerator, but may be moved upon collisions.
555 */
556 static final class CounterHashCode {
557 int code;
558 }
559
560 /**
561 * Generates initial value for per-thread CounterHashCodes
562 */
563 static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
564
565 /**
566 * Increment for counterHashCodeGenerator. See class ThreadLocal
567 * for explanation.
568 */
569 static final int SEED_INCREMENT = 0x61c88647;
570
571 /**
572 * Per-thread counter hash codes. Shared across all instances
573 */
574 static final ThreadLocal<CounterHashCode> threadCounterHashCode =
575 new ThreadLocal<CounterHashCode>();
576
577 /* ---------------- Fields -------------- */
578
579 /**
580 * The array of bins. Lazily initialized upon first insertion.
581 * Size is always a power of two. Accessed directly by iterators.
582 */
583 transient volatile Node[] table;
584
585 /**
586 * The next table to use; non-null only while resizing.
587 */
588 private transient volatile Node[] nextTable;
589
590 /**
591 * Base counter value, used mainly when there is no contention,
592 * but also as a fallback during table initialization
593 * races. Updated via CAS.
594 */
595 private transient volatile long baseCount;
596
597 /**
598 * Table initialization and resizing control. When negative, the
599 * table is being initialized or resized: -1 for initialization,
600 * else -(1 + the number of active resizing threads). Otherwise,
601 * when table is null, holds the initial table size to use upon
602 * creation, or 0 for default. After initialization, holds the
603 * next element count value upon which to resize the table.
604 */
605 private transient volatile int sizeCtl;
606
607 /**
608 * The next table index (plus one) to split while resizing.
609 */
610 private transient volatile int transferIndex;
611
612 /**
613 * The least available table index to split while resizing.
614 */
615 private transient volatile int transferOrigin;
616
617 /**
618 * Spinlock (locked via CAS) used when resizing and/or creating Cells.
619 */
620 private transient volatile int counterBusy;
621
622 /**
623 * Table of counter cells. When non-null, size is a power of 2.
624 */
625 private transient volatile CounterCell[] counterCells;
626
627 // views
628 private transient KeySetView<K,V> keySet;
629 private transient ValuesView<K,V> values;
630 private transient EntrySetView<K,V> entrySet;
631
632 /** For serialization compatibility. Null unless serialized; see below */
633 private Segment<K,V>[] segments;
634
635 /* ---------------- Table element access -------------- */
636
637 /*
638 * Volatile access methods are used for table elements as well as
639 * elements of in-progress next table while resizing. Uses are
640 * null checked by callers, and implicitly bounds-checked, relying
641 * on the invariants that tab arrays have non-zero size, and all
642 * indices are masked with (tab.length - 1) which is never
643 * negative and always less than length. Note that, to be correct
644 * wrt arbitrary concurrency errors by users, bounds checks must
645 * operate on local variables, which accounts for some odd-looking
646 * inline assignments below.
647 */
648
649 static final Node tabAt(Node[] tab, int i) { // used by Traverser
650 return (Node)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
651 }
652
653 private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
654 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
655 }
656
657 private static final void setTabAt(Node[] tab, int i, Node v) {
658 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
659 }
660
661 /* ---------------- Nodes -------------- */
662
663 /**
664 * Key-value entry. Note that this is never exported out as a
665 * user-visible Map.Entry (see MapEntry below). Nodes with a hash
666 * field of MOVED are special, and do not contain user keys or
667 * values. Otherwise, keys are never null, and null val fields
668 * indicate that a node is in the process of being deleted or
669 * created. For purposes of read-only access, a key may be read
670 * before a val, but can only be used after checking val to be
671 * non-null.
672 */
673 static class Node {
674 final int hash;
675 final Object key;
676 volatile Object val;
677 volatile Node next;
678
679 Node(int hash, Object key, Object val, Node next) {
680 this.hash = hash;
681 this.key = key;
682 this.val = val;
683 this.next = next;
684 }
685 }
686
687 /* ---------------- TreeBins -------------- */
688
689 /**
690 * Nodes for use in TreeBins
691 */
692 static final class TreeNode extends Node {
693 TreeNode parent; // red-black tree links
694 TreeNode left;
695 TreeNode right;
696 TreeNode prev; // needed to unlink next upon deletion
697 boolean red;
698
699 TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) {
700 super(hash, key, val, next);
701 this.parent = parent;
702 }
703 }
704
705 /**
706 * A specialized form of red-black tree for use in bins
707 * whose size exceeds a threshold.
708 *
709 * TreeBins use a special form of comparison for search and
710 * related operations (which is the main reason we cannot use
711 * existing collections such as TreeMaps). TreeBins contain
712 * Comparable elements, but may contain others, as well as
713 * elements that are Comparable but not necessarily Comparable<T>
714 * for the same T, so we cannot invoke compareTo among them. To
715 * handle this, the tree is ordered primarily by hash value, then
716 * by getClass().getName() order, and then by Comparator order
717 * among elements of the same class. On lookup at a node, if
718 * elements are not comparable or compare as 0, both left and
719 * right children may need to be searched in the case of tied hash
720 * values. (This corresponds to the full list search that would be
721 * necessary if all elements were non-Comparable and had tied
722 * hashes.) The red-black balancing code is updated from
723 * pre-jdk-collections
724 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
725 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
726 * Algorithms" (CLR).
727 *
728 * TreeBins also maintain a separate locking discipline than
729 * regular bins. Because they are forwarded via special MOVED
730 * nodes at bin heads (which can never change once established),
731 * we cannot use those nodes as locks. Instead, TreeBin
732 * extends AbstractQueuedSynchronizer to support a simple form of
733 * read-write lock. For update operations and table validation,
734 * the exclusive form of lock behaves in the same way as bin-head
735 * locks. However, lookups use shared read-lock mechanics to allow
736 * multiple readers in the absence of writers. Additionally,
737 * these lookups do not ever block: While the lock is not
738 * available, they proceed along the slow traversal path (via
739 * next-pointers) until the lock becomes available or the list is
740 * exhausted, whichever comes first. (These cases are not fast,
741 * but maximize aggregate expected throughput.) The AQS mechanics
742 * for doing this are straightforward. The lock state is held as
743 * AQS getState(). Read counts are negative; the write count (1)
744 * is positive. There are no signalling preferences among readers
745 * and writers. Since we don't need to export full Lock API, we
746 * just override the minimal AQS methods and use them directly.
747 */
748 static final class TreeBin extends AbstractQueuedSynchronizer {
749 private static final long serialVersionUID = 2249069246763182397L;
750 transient TreeNode root; // root of tree
751 transient TreeNode first; // head of next-pointer list
752
753 /* AQS overrides */
754 public final boolean isHeldExclusively() { return getState() > 0; }
755 public final boolean tryAcquire(int ignore) {
756 if (compareAndSetState(0, 1)) {
757 setExclusiveOwnerThread(Thread.currentThread());
758 return true;
759 }
760 return false;
761 }
762 public final boolean tryRelease(int ignore) {
763 setExclusiveOwnerThread(null);
764 setState(0);
765 return true;
766 }
767 public final int tryAcquireShared(int ignore) {
768 for (int c;;) {
769 if ((c = getState()) > 0)
770 return -1;
771 if (compareAndSetState(c, c -1))
772 return 1;
773 }
774 }
775 public final boolean tryReleaseShared(int ignore) {
776 int c;
777 do {} while (!compareAndSetState(c = getState(), c + 1));
778 return c == -1;
779 }
780
781 /** From CLR */
782 private void rotateLeft(TreeNode p) {
783 if (p != null) {
784 TreeNode r = p.right, pp, rl;
785 if ((rl = p.right = r.left) != null)
786 rl.parent = p;
787 if ((pp = r.parent = p.parent) == null)
788 root = r;
789 else if (pp.left == p)
790 pp.left = r;
791 else
792 pp.right = r;
793 r.left = p;
794 p.parent = r;
795 }
796 }
797
798 /** From CLR */
799 private void rotateRight(TreeNode p) {
800 if (p != null) {
801 TreeNode l = p.left, pp, lr;
802 if ((lr = p.left = l.right) != null)
803 lr.parent = p;
804 if ((pp = l.parent = p.parent) == null)
805 root = l;
806 else if (pp.right == p)
807 pp.right = l;
808 else
809 pp.left = l;
810 l.right = p;
811 p.parent = l;
812 }
813 }
814
815 /**
816 * Returns the TreeNode (or null if not found) for the given key
817 * starting at given root.
818 */
819 @SuppressWarnings("unchecked") final TreeNode getTreeNode
820 (int h, Object k, TreeNode p) {
821 Class<?> c = k.getClass();
822 while (p != null) {
823 int dir, ph; Object pk; Class<?> pc;
824 if ((ph = p.hash) == h) {
825 if ((pk = p.key) == k || k.equals(pk))
826 return p;
827 if (c != (pc = pk.getClass()) ||
828 !(k instanceof Comparable) ||
829 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
830 if ((dir = (c == pc) ? 0 :
831 c.getName().compareTo(pc.getName())) == 0) {
832 TreeNode r = null, pl, pr; // check both sides
833 if ((pr = p.right) != null && h >= pr.hash &&
834 (r = getTreeNode(h, k, pr)) != null)
835 return r;
836 else if ((pl = p.left) != null && h <= pl.hash)
837 dir = -1;
838 else // nothing there
839 return null;
840 }
841 }
842 }
843 else
844 dir = (h < ph) ? -1 : 1;
845 p = (dir > 0) ? p.right : p.left;
846 }
847 return null;
848 }
849
850 /**
851 * Wrapper for getTreeNode used by CHM.get. Tries to obtain
852 * read-lock to call getTreeNode, but during failure to get
853 * lock, searches along next links.
854 */
855 final Object getValue(int h, Object k) {
856 Node r = null;
857 int c = getState(); // Must read lock state first
858 for (Node e = first; e != null; e = e.next) {
859 if (c <= 0 && compareAndSetState(c, c - 1)) {
860 try {
861 r = getTreeNode(h, k, root);
862 } finally {
863 releaseShared(0);
864 }
865 break;
866 }
867 else if (e.hash == h && k.equals(e.key)) {
868 r = e;
869 break;
870 }
871 else
872 c = getState();
873 }
874 return r == null ? null : r.val;
875 }
876
877 /**
878 * Finds or adds a node.
879 * @return null if added
880 */
881 @SuppressWarnings("unchecked") final TreeNode putTreeNode
882 (int h, Object k, Object v) {
883 Class<?> c = k.getClass();
884 TreeNode pp = root, p = null;
885 int dir = 0;
886 while (pp != null) { // find existing node or leaf to insert at
887 int ph; Object pk; Class<?> pc;
888 p = pp;
889 if ((ph = p.hash) == h) {
890 if ((pk = p.key) == k || k.equals(pk))
891 return p;
892 if (c != (pc = pk.getClass()) ||
893 !(k instanceof Comparable) ||
894 (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
895 TreeNode s = null, r = null, pr;
896 if ((dir = (c == pc) ? 0 :
897 c.getName().compareTo(pc.getName())) == 0) {
898 if ((pr = p.right) != null && h >= pr.hash &&
899 (r = getTreeNode(h, k, pr)) != null)
900 return r;
901 else // continue left
902 dir = -1;
903 }
904 else if ((pr = p.right) != null && h >= pr.hash)
905 s = pr;
906 if (s != null && (r = getTreeNode(h, k, s)) != null)
907 return r;
908 }
909 }
910 else
911 dir = (h < ph) ? -1 : 1;
912 pp = (dir > 0) ? p.right : p.left;
913 }
914
915 TreeNode f = first;
916 TreeNode x = first = new TreeNode(h, k, v, f, p);
917 if (p == null)
918 root = x;
919 else { // attach and rebalance; adapted from CLR
920 TreeNode xp, xpp;
921 if (f != null)
922 f.prev = x;
923 if (dir <= 0)
924 p.left = x;
925 else
926 p.right = x;
927 x.red = true;
928 while (x != null && (xp = x.parent) != null && xp.red &&
929 (xpp = xp.parent) != null) {
930 TreeNode xppl = xpp.left;
931 if (xp == xppl) {
932 TreeNode y = xpp.right;
933 if (y != null && y.red) {
934 y.red = false;
935 xp.red = false;
936 xpp.red = true;
937 x = xpp;
938 }
939 else {
940 if (x == xp.right) {
941 rotateLeft(x = xp);
942 xpp = (xp = x.parent) == null ? null : xp.parent;
943 }
944 if (xp != null) {
945 xp.red = false;
946 if (xpp != null) {
947 xpp.red = true;
948 rotateRight(xpp);
949 }
950 }
951 }
952 }
953 else {
954 TreeNode y = xppl;
955 if (y != null && y.red) {
956 y.red = false;
957 xp.red = false;
958 xpp.red = true;
959 x = xpp;
960 }
961 else {
962 if (x == xp.left) {
963 rotateRight(x = xp);
964 xpp = (xp = x.parent) == null ? null : xp.parent;
965 }
966 if (xp != null) {
967 xp.red = false;
968 if (xpp != null) {
969 xpp.red = true;
970 rotateLeft(xpp);
971 }
972 }
973 }
974 }
975 }
976 TreeNode r = root;
977 if (r != null && r.red)
978 r.red = false;
979 }
980 return null;
981 }
982
983 /**
984 * Removes the given node, that must be present before this
985 * call. This is messier than typical red-black deletion code
986 * because we cannot swap the contents of an interior node
987 * with a leaf successor that is pinned by "next" pointers
988 * that are accessible independently of lock. So instead we
989 * swap the tree linkages.
990 */
991 final void deleteTreeNode(TreeNode p) {
992 TreeNode next = (TreeNode)p.next; // unlink traversal pointers
993 TreeNode pred = p.prev;
994 if (pred == null)
995 first = next;
996 else
997 pred.next = next;
998 if (next != null)
999 next.prev = pred;
1000 TreeNode replacement;
1001 TreeNode pl = p.left;
1002 TreeNode pr = p.right;
1003 if (pl != null && pr != null) {
1004 TreeNode s = pr, sl;
1005 while ((sl = s.left) != null) // find successor
1006 s = sl;
1007 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1008 TreeNode sr = s.right;
1009 TreeNode pp = p.parent;
1010 if (s == pr) { // p was s's direct parent
1011 p.parent = s;
1012 s.right = p;
1013 }
1014 else {
1015 TreeNode sp = s.parent;
1016 if ((p.parent = sp) != null) {
1017 if (s == sp.left)
1018 sp.left = p;
1019 else
1020 sp.right = p;
1021 }
1022 if ((s.right = pr) != null)
1023 pr.parent = s;
1024 }
1025 p.left = null;
1026 if ((p.right = sr) != null)
1027 sr.parent = p;
1028 if ((s.left = pl) != null)
1029 pl.parent = s;
1030 if ((s.parent = pp) == null)
1031 root = s;
1032 else if (p == pp.left)
1033 pp.left = s;
1034 else
1035 pp.right = s;
1036 replacement = sr;
1037 }
1038 else
1039 replacement = (pl != null) ? pl : pr;
1040 TreeNode pp = p.parent;
1041 if (replacement == null) {
1042 if (pp == null) {
1043 root = null;
1044 return;
1045 }
1046 replacement = p;
1047 }
1048 else {
1049 replacement.parent = pp;
1050 if (pp == null)
1051 root = replacement;
1052 else if (p == pp.left)
1053 pp.left = replacement;
1054 else
1055 pp.right = replacement;
1056 p.left = p.right = p.parent = null;
1057 }
1058 if (!p.red) { // rebalance, from CLR
1059 TreeNode x = replacement;
1060 while (x != null) {
1061 TreeNode xp, xpl;
1062 if (x.red || (xp = x.parent) == null) {
1063 x.red = false;
1064 break;
1065 }
1066 if (x == (xpl = xp.left)) {
1067 TreeNode sib = xp.right;
1068 if (sib != null && sib.red) {
1069 sib.red = false;
1070 xp.red = true;
1071 rotateLeft(xp);
1072 sib = (xp = x.parent) == null ? null : xp.right;
1073 }
1074 if (sib == null)
1075 x = xp;
1076 else {
1077 TreeNode sl = sib.left, sr = sib.right;
1078 if ((sr == null || !sr.red) &&
1079 (sl == null || !sl.red)) {
1080 sib.red = true;
1081 x = xp;
1082 }
1083 else {
1084 if (sr == null || !sr.red) {
1085 if (sl != null)
1086 sl.red = false;
1087 sib.red = true;
1088 rotateRight(sib);
1089 sib = (xp = x.parent) == null ?
1090 null : xp.right;
1091 }
1092 if (sib != null) {
1093 sib.red = (xp == null) ? false : xp.red;
1094 if ((sr = sib.right) != null)
1095 sr.red = false;
1096 }
1097 if (xp != null) {
1098 xp.red = false;
1099 rotateLeft(xp);
1100 }
1101 x = root;
1102 }
1103 }
1104 }
1105 else { // symmetric
1106 TreeNode sib = xpl;
1107 if (sib != null && sib.red) {
1108 sib.red = false;
1109 xp.red = true;
1110 rotateRight(xp);
1111 sib = (xp = x.parent) == null ? null : xp.left;
1112 }
1113 if (sib == null)
1114 x = xp;
1115 else {
1116 TreeNode sl = sib.left, sr = sib.right;
1117 if ((sl == null || !sl.red) &&
1118 (sr == null || !sr.red)) {
1119 sib.red = true;
1120 x = xp;
1121 }
1122 else {
1123 if (sl == null || !sl.red) {
1124 if (sr != null)
1125 sr.red = false;
1126 sib.red = true;
1127 rotateLeft(sib);
1128 sib = (xp = x.parent) == null ?
1129 null : xp.left;
1130 }
1131 if (sib != null) {
1132 sib.red = (xp == null) ? false : xp.red;
1133 if ((sl = sib.left) != null)
1134 sl.red = false;
1135 }
1136 if (xp != null) {
1137 xp.red = false;
1138 rotateRight(xp);
1139 }
1140 x = root;
1141 }
1142 }
1143 }
1144 }
1145 }
1146 if (p == replacement && (pp = p.parent) != null) {
1147 if (p == pp.left) // detach pointers
1148 pp.left = null;
1149 else if (p == pp.right)
1150 pp.right = null;
1151 p.parent = null;
1152 }
1153 }
1154 }
1155
1156 /* ---------------- Collision reduction methods -------------- */
1157
1158 /**
1159 * Spreads higher bits to lower, and also forces top bit to 0.
1160 * Because the table uses power-of-two masking, sets of hashes
1161 * that vary only in bits above the current mask will always
1162 * collide. (Among known examples are sets of Float keys holding
1163 * consecutive whole numbers in small tables.) To counter this,
1164 * we apply a transform that spreads the impact of higher bits
1165 * downward. There is a tradeoff between speed, utility, and
1166 * quality of bit-spreading. Because many common sets of hashes
1167 * are already reasonably distributed across bits (so don't benefit
1168 * from spreading), and because we use trees to handle large sets
1169 * of collisions in bins, we don't need excessively high quality.
1170 */
1171 private static final int spread(int h) {
1172 h ^= (h >>> 18) ^ (h >>> 12);
1173 return (h ^ (h >>> 10)) & HASH_BITS;
1174 }
1175
1176 /**
1177 * Replaces a list bin with a tree bin if key is comparable. Call
1178 * only when locked.
1179 */
1180 private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
1181 if (key instanceof Comparable) {
1182 TreeBin t = new TreeBin();
1183 for (Node e = tabAt(tab, index); e != null; e = e.next)
1184 t.putTreeNode(e.hash, e.key, e.val);
1185 setTabAt(tab, index, new Node(MOVED, t, null, null));
1186 }
1187 }
1188
1189 /* ---------------- Internal access and update methods -------------- */
1190
1191 /** Implementation for get and containsKey */
1192 @SuppressWarnings("unchecked") private final V internalGet(Object k) {
1193 int h = spread(k.hashCode());
1194 retry: for (Node[] tab = table; tab != null;) {
1195 Node e; Object ek, ev; int eh; // locals to read fields once
1196 for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
1197 if ((eh = e.hash) < 0) {
1198 if ((ek = e.key) instanceof TreeBin) // search TreeBin
1199 return (V)((TreeBin)ek).getValue(h, k);
1200 else { // restart with new table
1201 tab = (Node[])ek;
1202 continue retry;
1203 }
1204 }
1205 else if (eh == h && (ev = e.val) != null &&
1206 ((ek = e.key) == k || k.equals(ek)))
1207 return (V)ev;
1208 }
1209 break;
1210 }
1211 return null;
1212 }
1213
1214 /**
1215 * Implementation for the four public remove/replace methods:
1216 * Replaces node value with v, conditional upon match of cv if
1217 * non-null. If resulting value is null, delete.
1218 */
1219 @SuppressWarnings("unchecked") private final V internalReplace
1220 (Object k, V v, Object cv) {
1221 int h = spread(k.hashCode());
1222 Object oldVal = null;
1223 for (Node[] tab = table;;) {
1224 Node f; int i, fh; Object fk;
1225 if (tab == null ||
1226 (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
1227 break;
1228 else if ((fh = f.hash) < 0) {
1229 if ((fk = f.key) instanceof TreeBin) {
1230 TreeBin t = (TreeBin)fk;
1231 boolean validated = false;
1232 boolean deleted = false;
1233 t.acquire(0);
1234 try {
1235 if (tabAt(tab, i) == f) {
1236 validated = true;
1237 TreeNode p = t.getTreeNode(h, k, t.root);
1238 if (p != null) {
1239 Object pv = p.val;
1240 if (cv == null || cv == pv || cv.equals(pv)) {
1241 oldVal = pv;
1242 if ((p.val = v) == null) {
1243 deleted = true;
1244 t.deleteTreeNode(p);
1245 }
1246 }
1247 }
1248 }
1249 } finally {
1250 t.release(0);
1251 }
1252 if (validated) {
1253 if (deleted)
1254 addCount(-1L, -1);
1255 break;
1256 }
1257 }
1258 else
1259 tab = (Node[])fk;
1260 }
1261 else if (fh != h && f.next == null) // precheck
1262 break; // rules out possible existence
1263 else {
1264 boolean validated = false;
1265 boolean deleted = false;
1266 synchronized(f) {
1267 if (tabAt(tab, i) == f) {
1268 validated = true;
1269 for (Node e = f, pred = null;;) {
1270 Object ek, ev;
1271 if (e.hash == h &&
1272 ((ev = e.val) != null) &&
1273 ((ek = e.key) == k || k.equals(ek))) {
1274 if (cv == null || cv == ev || cv.equals(ev)) {
1275 oldVal = ev;
1276 if ((e.val = v) == null) {
1277 deleted = true;
1278 Node en = e.next;
1279 if (pred != null)
1280 pred.next = en;
1281 else
1282 setTabAt(tab, i, en);
1283 }
1284 }
1285 break;
1286 }
1287 pred = e;
1288 if ((e = e.next) == null)
1289 break;
1290 }
1291 }
1292 }
1293 if (validated) {
1294 if (deleted)
1295 addCount(-1L, -1);
1296 break;
1297 }
1298 }
1299 }
1300 return (V)oldVal;
1301 }
1302
1303 /*
1304 * Internal versions of insertion methods
1305 * All have the same basic structure as the first (internalPut):
1306 * 1. If table uninitialized, create
1307 * 2. If bin empty, try to CAS new node
1308 * 3. If bin stale, use new table
1309 * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
1310 * 5. Lock and validate; if valid, scan and add or update
1311 *
1312 * The putAll method differs mainly in attempting to pre-allocate
1313 * enough table space, and also more lazily performs count updates
1314 * and checks.
1315 *
1316 * Most of the function-accepting methods can't be factored nicely
1317 * because they require different functional forms, so instead
1318 * sprawl out similar mechanics.
1319 */
1320
1321 /** Implementation for put and putIfAbsent */
1322 @SuppressWarnings("unchecked") private final V internalPut
1323 (K k, V v, boolean onlyIfAbsent) {
1324 if (k == null || v == null) throw new NullPointerException();
1325 int h = spread(k.hashCode());
1326 int len = 0;
1327 for (Node[] tab = table;;) {
1328 int i, fh; Node f; Object fk, fv;
1329 if (tab == null)
1330 tab = initTable();
1331 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1332 if (casTabAt(tab, i, null, new Node(h, k, v, null)))
1333 break; // no lock when adding to empty bin
1334 }
1335 else if ((fh = f.hash) < 0) {
1336 if ((fk = f.key) instanceof TreeBin) {
1337 TreeBin t = (TreeBin)fk;
1338 Object oldVal = null;
1339 t.acquire(0);
1340 try {
1341 if (tabAt(tab, i) == f) {
1342 len = 2;
1343 TreeNode p = t.putTreeNode(h, k, v);
1344 if (p != null) {
1345 oldVal = p.val;
1346 if (!onlyIfAbsent)
1347 p.val = v;
1348 }
1349 }
1350 } finally {
1351 t.release(0);
1352 }
1353 if (len != 0) {
1354 if (oldVal != null)
1355 return (V)oldVal;
1356 break;
1357 }
1358 }
1359 else
1360 tab = (Node[])fk;
1361 }
1362 else if (onlyIfAbsent && fh == h && (fv = f.val) != null &&
1363 ((fk = f.key) == k || k.equals(fk))) // peek while nearby
1364 return (V)fv;
1365 else {
1366 Object oldVal = null;
1367 synchronized(f) {
1368 if (tabAt(tab, i) == f) {
1369 len = 1;
1370 for (Node e = f;; ++len) {
1371 Object ek, ev;
1372 if (e.hash == h &&
1373 (ev = e.val) != null &&
1374 ((ek = e.key) == k || k.equals(ek))) {
1375 oldVal = ev;
1376 if (!onlyIfAbsent)
1377 e.val = v;
1378 break;
1379 }
1380 Node last = e;
1381 if ((e = e.next) == null) {
1382 last.next = new Node(h, k, v, null);
1383 if (len >= TREE_THRESHOLD)
1384 replaceWithTreeBin(tab, i, k);
1385 break;
1386 }
1387 }
1388 }
1389 }
1390 if (len != 0) {
1391 if (oldVal != null)
1392 return (V)oldVal;
1393 break;
1394 }
1395 }
1396 }
1397 addCount(1L, len);
1398 return null;
1399 }
1400
1401 /** Implementation for computeIfAbsent */
1402 @SuppressWarnings("unchecked") private final V internalComputeIfAbsent
1403 (K k, Fun<? super K, ?> mf) {
1404 if (k == null || mf == null)
1405 throw new NullPointerException();
1406 int h = spread(k.hashCode());
1407 Object val = null;
1408 int len = 0;
1409 for (Node[] tab = table;;) {
1410 Node f; int i; Object fk;
1411 if (tab == null)
1412 tab = initTable();
1413 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1414 Node node = new Node(h, k, null, null);
1415 synchronized(node) {
1416 if (casTabAt(tab, i, null, node)) {
1417 len = 1;
1418 try {
1419 if ((val = mf.apply(k)) != null)
1420 node.val = val;
1421 } finally {
1422 if (val == null)
1423 setTabAt(tab, i, null);
1424 }
1425 }
1426 }
1427 if (len != 0)
1428 break;
1429 }
1430 else if (f.hash < 0) {
1431 if ((fk = f.key) instanceof TreeBin) {
1432 TreeBin t = (TreeBin)fk;
1433 boolean added = false;
1434 t.acquire(0);
1435 try {
1436 if (tabAt(tab, i) == f) {
1437 len = 1;
1438 TreeNode p = t.getTreeNode(h, k, t.root);
1439 if (p != null)
1440 val = p.val;
1441 else if ((val = mf.apply(k)) != null) {
1442 added = true;
1443 len = 2;
1444 t.putTreeNode(h, k, val);
1445 }
1446 }
1447 } finally {
1448 t.release(0);
1449 }
1450 if (len != 0) {
1451 if (!added)
1452 return (V)val;
1453 break;
1454 }
1455 }
1456 else
1457 tab = (Node[])fk;
1458 }
1459 else {
1460 for (Node e = f; e != null; e = e.next) { // prescan
1461 Object ek, ev;
1462 if (e.hash == h && (ev = e.val) != null &&
1463 ((ek = e.key) == k || k.equals(ek)))
1464 return (V)ev;
1465 }
1466 boolean added = false;
1467 synchronized(f) {
1468 if (tabAt(tab, i) == f) {
1469 len = 1;
1470 for (Node e = f;; ++len) {
1471 Object ek, ev;
1472 if (e.hash == h &&
1473 (ev = e.val) != null &&
1474 ((ek = e.key) == k || k.equals(ek))) {
1475 val = ev;
1476 break;
1477 }
1478 Node last = e;
1479 if ((e = e.next) == null) {
1480 if ((val = mf.apply(k)) != null) {
1481 added = true;
1482 last.next = new Node(h, k, val, null);
1483 if (len >= TREE_THRESHOLD)
1484 replaceWithTreeBin(tab, i, k);
1485 }
1486 break;
1487 }
1488 }
1489 }
1490 }
1491 if (len != 0) {
1492 if (!added)
1493 return (V)val;
1494 break;
1495 }
1496 }
1497 }
1498 if (val != null)
1499 addCount(1L, len);
1500 return (V)val;
1501 }
1502
1503 /** Implementation for compute */
1504 @SuppressWarnings("unchecked") private final V internalCompute
1505 (K k, boolean onlyIfPresent,
1506 BiFun<? super K, ? super V, ? extends V> mf) {
1507 if (k == null || mf == null)
1508 throw new NullPointerException();
1509 int h = spread(k.hashCode());
1510 Object val = null;
1511 int delta = 0;
1512 int len = 0;
1513 for (Node[] tab = table;;) {
1514 Node f; int i, fh; Object fk;
1515 if (tab == null)
1516 tab = initTable();
1517 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1518 if (onlyIfPresent)
1519 break;
1520 Node node = new Node(h, k, null, null);
1521 synchronized(node) {
1522 if (casTabAt(tab, i, null, node)) {
1523 try {
1524 len = 1;
1525 if ((val = mf.apply(k, null)) != null) {
1526 node.val = val;
1527 delta = 1;
1528 }
1529 } finally {
1530 if (delta == 0)
1531 setTabAt(tab, i, null);
1532 }
1533 }
1534 }
1535 if (len != 0)
1536 break;
1537 }
1538 else if ((fh = f.hash) < 0) {
1539 if ((fk = f.key) instanceof TreeBin) {
1540 TreeBin t = (TreeBin)fk;
1541 t.acquire(0);
1542 try {
1543 if (tabAt(tab, i) == f) {
1544 len = 1;
1545 TreeNode p = t.getTreeNode(h, k, t.root);
1546 if (p == null && onlyIfPresent)
1547 break;
1548 Object pv = (p == null) ? null : p.val;
1549 if ((val = mf.apply(k, (V)pv)) != null) {
1550 if (p != null)
1551 p.val = val;
1552 else {
1553 len = 2;
1554 delta = 1;
1555 t.putTreeNode(h, k, val);
1556 }
1557 }
1558 else if (p != null) {
1559 delta = -1;
1560 t.deleteTreeNode(p);
1561 }
1562 }
1563 } finally {
1564 t.release(0);
1565 }
1566 if (len != 0)
1567 break;
1568 }
1569 else
1570 tab = (Node[])fk;
1571 }
1572 else {
1573 synchronized(f) {
1574 if (tabAt(tab, i) == f) {
1575 len = 1;
1576 for (Node e = f, pred = null;; ++len) {
1577 Object ek, ev;
1578 if (e.hash == h &&
1579 (ev = e.val) != null &&
1580 ((ek = e.key) == k || k.equals(ek))) {
1581 val = mf.apply(k, (V)ev);
1582 if (val != null)
1583 e.val = val;
1584 else {
1585 delta = -1;
1586 Node en = e.next;
1587 if (pred != null)
1588 pred.next = en;
1589 else
1590 setTabAt(tab, i, en);
1591 }
1592 break;
1593 }
1594 pred = e;
1595 if ((e = e.next) == null) {
1596 if (!onlyIfPresent &&
1597 (val = mf.apply(k, null)) != null) {
1598 pred.next = new Node(h, k, val, null);
1599 delta = 1;
1600 if (len >= TREE_THRESHOLD)
1601 replaceWithTreeBin(tab, i, k);
1602 }
1603 break;
1604 }
1605 }
1606 }
1607 }
1608 if (len != 0)
1609 break;
1610 }
1611 }
1612 if (delta != 0)
1613 addCount((long)delta, len);
1614 return (V)val;
1615 }
1616
1617 /** Implementation for merge */
1618 @SuppressWarnings("unchecked") private final V internalMerge
1619 (K k, V v, BiFun<? super V, ? super V, ? extends V> mf) {
1620 if (k == null || v == null || mf == null)
1621 throw new NullPointerException();
1622 int h = spread(k.hashCode());
1623 Object val = null;
1624 int delta = 0;
1625 int len = 0;
1626 for (Node[] tab = table;;) {
1627 int i; Node f; Object fk, fv;
1628 if (tab == null)
1629 tab = initTable();
1630 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
1631 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1632 delta = 1;
1633 val = v;
1634 break;
1635 }
1636 }
1637 else if (f.hash < 0) {
1638 if ((fk = f.key) instanceof TreeBin) {
1639 TreeBin t = (TreeBin)fk;
1640 t.acquire(0);
1641 try {
1642 if (tabAt(tab, i) == f) {
1643 len = 1;
1644 TreeNode p = t.getTreeNode(h, k, t.root);
1645 val = (p == null) ? v : mf.apply((V)p.val, v);
1646 if (val != null) {
1647 if (p != null)
1648 p.val = val;
1649 else {
1650 len = 2;
1651 delta = 1;
1652 t.putTreeNode(h, k, val);
1653 }
1654 }
1655 else if (p != null) {
1656 delta = -1;
1657 t.deleteTreeNode(p);
1658 }
1659 }
1660 } finally {
1661 t.release(0);
1662 }
1663 if (len != 0)
1664 break;
1665 }
1666 else
1667 tab = (Node[])fk;
1668 }
1669 else {
1670 synchronized(f) {
1671 if (tabAt(tab, i) == f) {
1672 len = 1;
1673 for (Node e = f, pred = null;; ++len) {
1674 Object ek, ev;
1675 if (e.hash == h &&
1676 (ev = e.val) != null &&
1677 ((ek = e.key) == k || k.equals(ek))) {
1678 val = mf.apply((V)ev, v);
1679 if (val != null)
1680 e.val = val;
1681 else {
1682 delta = -1;
1683 Node en = e.next;
1684 if (pred != null)
1685 pred.next = en;
1686 else
1687 setTabAt(tab, i, en);
1688 }
1689 break;
1690 }
1691 pred = e;
1692 if ((e = e.next) == null) {
1693 val = v;
1694 pred.next = new Node(h, k, val, null);
1695 delta = 1;
1696 if (len >= TREE_THRESHOLD)
1697 replaceWithTreeBin(tab, i, k);
1698 break;
1699 }
1700 }
1701 }
1702 }
1703 if (len != 0)
1704 break;
1705 }
1706 }
1707 if (delta != 0)
1708 addCount((long)delta, len);
1709 return (V)val;
1710 }
1711
1712 /** Implementation for putAll */
1713 private final void internalPutAll(Map<?, ?> m) {
1714 tryPresize(m.size());
1715 long delta = 0L; // number of uncommitted additions
1716 boolean npe = false; // to throw exception on exit for nulls
1717 try { // to clean up counts on other exceptions
1718 for (Map.Entry<?, ?> entry : m.entrySet()) {
1719 Object k, v;
1720 if (entry == null || (k = entry.getKey()) == null ||
1721 (v = entry.getValue()) == null) {
1722 npe = true;
1723 break;
1724 }
1725 int h = spread(k.hashCode());
1726 for (Node[] tab = table;;) {
1727 int i; Node f; int fh; Object fk;
1728 if (tab == null)
1729 tab = initTable();
1730 else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
1731 if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
1732 ++delta;
1733 break;
1734 }
1735 }
1736 else if ((fh = f.hash) < 0) {
1737 if ((fk = f.key) instanceof TreeBin) {
1738 TreeBin t = (TreeBin)fk;
1739 boolean validated = false;
1740 t.acquire(0);
1741 try {
1742 if (tabAt(tab, i) == f) {
1743 validated = true;
1744 TreeNode p = t.getTreeNode(h, k, t.root);
1745 if (p != null)
1746 p.val = v;
1747 else {
1748 t.putTreeNode(h, k, v);
1749 ++delta;
1750 }
1751 }
1752 } finally {
1753 t.release(0);
1754 }
1755 if (validated)
1756 break;
1757 }
1758 else
1759 tab = (Node[])fk;
1760 }
1761 else {
1762 int len = 0;
1763 synchronized(f) {
1764 if (tabAt(tab, i) == f) {
1765 len = 1;
1766 for (Node e = f;; ++len) {
1767 Object ek, ev;
1768 if (e.hash == h &&
1769 (ev = e.val) != null &&
1770 ((ek = e.key) == k || k.equals(ek))) {
1771 e.val = v;
1772 break;
1773 }
1774 Node last = e;
1775 if ((e = e.next) == null) {
1776 ++delta;
1777 last.next = new Node(h, k, v, null);
1778 if (len >= TREE_THRESHOLD)
1779 replaceWithTreeBin(tab, i, k);
1780 break;
1781 }
1782 }
1783 }
1784 }
1785 if (len != 0) {
1786 if (len > 1)
1787 addCount(delta, len);
1788 break;
1789 }
1790 }
1791 }
1792 }
1793 } finally {
1794 if (delta != 0L)
1795 addCount(delta, 2);
1796 }
1797 if (npe)
1798 throw new NullPointerException();
1799 }
1800
1801 /**
1802 * Implementation for clear. Steps through each bin, removing all
1803 * nodes.
1804 */
1805 private final void internalClear() {
1806 long delta = 0L; // negative number of deletions
1807 int i = 0;
1808 Node[] tab = table;
1809 while (tab != null && i < tab.length) {
1810 Node f = tabAt(tab, i);
1811 if (f == null)
1812 ++i;
1813 else if (f.hash < 0) {
1814 Object fk;
1815 if ((fk = f.key) instanceof TreeBin) {
1816 TreeBin t = (TreeBin)fk;
1817 t.acquire(0);
1818 try {
1819 if (tabAt(tab, i) == f) {
1820 for (Node p = t.first; p != null; p = p.next) {
1821 if (p.val != null) { // (currently always true)
1822 p.val = null;
1823 --delta;
1824 }
1825 }
1826 t.first = null;
1827 t.root = null;
1828 ++i;
1829 }
1830 } finally {
1831 t.release(0);
1832 }
1833 }
1834 else
1835 tab = (Node[])fk;
1836 }
1837 else {
1838 synchronized(f) {
1839 if (tabAt(tab, i) == f) {
1840 for (Node e = f; e != null; e = e.next) {
1841 if (e.val != null) { // (currently always true)
1842 e.val = null;
1843 --delta;
1844 }
1845 }
1846 setTabAt(tab, i, null);
1847 ++i;
1848 }
1849 }
1850 }
1851 }
1852 if (delta != 0L)
1853 addCount(delta, -1);
1854 }
1855
1856 /* ---------------- Table Initialization and Resizing -------------- */
1857
1858 /**
1859 * Returns a power of two table size for the given desired capacity.
1860 * See Hackers Delight, sec 3.2
1861 */
1862 private static final int tableSizeFor(int c) {
1863 int n = c - 1;
1864 n |= n >>> 1;
1865 n |= n >>> 2;
1866 n |= n >>> 4;
1867 n |= n >>> 8;
1868 n |= n >>> 16;
1869 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
1870 }
1871
1872 /**
1873 * Initializes table, using the size recorded in sizeCtl.
1874 */
1875 private final Node[] initTable() {
1876 Node[] tab; int sc;
1877 while ((tab = table) == null) {
1878 if ((sc = sizeCtl) < 0)
1879 Thread.yield(); // lost initialization race; just spin
1880 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
1881 try {
1882 if ((tab = table) == null) {
1883 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
1884 tab = table = new Node[n];
1885 sc = n - (n >>> 2);
1886 }
1887 } finally {
1888 sizeCtl = sc;
1889 }
1890 break;
1891 }
1892 }
1893 return tab;
1894 }
1895
1896 /**
1897 * Adds to count, and if table is too small and not already
1898 * resizing, initiates transfer. If already resizing, helps
1899 * perform transfer if work is available. Rechecks occupancy
1900 * after a transfer to see if another resize is already needed
1901 * because resizings are lagging additions.
1902 *
1903 * @param x the count to add
1904 * @param check if <0, don't check resize, if <= 1 only check if uncontended
1905 */
1906 private final void addCount(long x, int check) {
1907 CounterCell[] as; long b, s;
1908 if ((as = counterCells) != null ||
1909 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
1910 CounterHashCode hc; CounterCell a; long v; int m;
1911 boolean uncontended = true;
1912 if ((hc = threadCounterHashCode.get()) == null ||
1913 as == null || (m = as.length - 1) < 0 ||
1914 (a = as[m & hc.code]) == null ||
1915 !(uncontended =
1916 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
1917 fullAddCount(x, hc, uncontended);
1918 return;
1919 }
1920 if (check <= 1)
1921 return;
1922 s = sumCount();
1923 }
1924 if (check >= 0) {
1925 Node[] tab, nt; int sc;
1926 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
1927 tab.length < MAXIMUM_CAPACITY) {
1928 if (sc < 0) {
1929 if (sc == -1 || transferIndex <= transferOrigin ||
1930 (nt = nextTable) == null)
1931 break;
1932 if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
1933 transfer(tab, nt);
1934 }
1935 else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
1936 transfer(tab, null);
1937 s = sumCount();
1938 }
1939 }
1940 }
1941
1942 /**
1943 * Tries to presize table to accommodate the given number of elements.
1944 *
1945 * @param size number of elements (doesn't need to be perfectly accurate)
1946 */
1947 private final void tryPresize(int size) {
1948 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
1949 tableSizeFor(size + (size >>> 1) + 1);
1950 int sc;
1951 while ((sc = sizeCtl) >= 0) {
1952 Node[] tab = table; int n;
1953 if (tab == null || (n = tab.length) == 0) {
1954 n = (sc > c) ? sc : c;
1955 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
1956 try {
1957 if (table == tab) {
1958 table = new Node[n];
1959 sc = n - (n >>> 2);
1960 }
1961 } finally {
1962 sizeCtl = sc;
1963 }
1964 }
1965 }
1966 else if (c <= sc || n >= MAXIMUM_CAPACITY)
1967 break;
1968 else if (tab == table &&
1969 U.compareAndSwapInt(this, SIZECTL, sc, -2))
1970 transfer(tab, null);
1971 }
1972 }
1973
1974 /*
1975 * Moves and/or copies the nodes in each bin to new table. See
1976 * above for explanation.
1977 */
1978 private final void transfer(Node[] tab, Node[] nextTab) {
1979 int n = tab.length, stride;
1980 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
1981 stride = MIN_TRANSFER_STRIDE; // subdivide range
1982 if (nextTab == null) { // initiating
1983 try {
1984 nextTab = new Node[n << 1];
1985 } catch(Throwable ex) { // try to cope with OOME
1986 sizeCtl = Integer.MAX_VALUE;
1987 return;
1988 }
1989 nextTable = nextTab;
1990 transferOrigin = n;
1991 transferIndex = n;
1992 Node rev = new Node(MOVED, tab, null, null);
1993 for (int k = n; k > 0;) { // progressively reveal ready slots
1994 int nextk = k > stride? k - stride : 0;
1995 for (int m = nextk; m < k; ++m)
1996 nextTab[m] = rev;
1997 for (int m = n + nextk; m < n + k; ++m)
1998 nextTab[m] = rev;
1999 U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
2000 }
2001 }
2002 int nextn = nextTab.length;
2003 Node fwd = new Node(MOVED, nextTab, null, null);
2004 boolean advance = true;
2005 for (int i = 0, bound = 0;;) {
2006 int nextIndex, nextBound; Node f; Object fk;
2007 while (advance) {
2008 if (--i >= bound)
2009 advance = false;
2010 else if ((nextIndex = transferIndex) <= transferOrigin) {
2011 i = -1;
2012 advance = false;
2013 }
2014 else if (U.compareAndSwapInt
2015 (this, TRANSFERINDEX, nextIndex,
2016 nextBound = (nextIndex > stride?
2017 nextIndex - stride : 0))) {
2018 bound = nextBound;
2019 i = nextIndex - 1;
2020 advance = false;
2021 }
2022 }
2023 if (i < 0 || i >= n || i + n >= nextn) {
2024 for (int sc;;) {
2025 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
2026 if (sc == -1) {
2027 nextTable = null;
2028 table = nextTab;
2029 sizeCtl = (n << 1) - (n >>> 1);
2030 }
2031 return;
2032 }
2033 }
2034 }
2035 else if ((f = tabAt(tab, i)) == null) {
2036 if (casTabAt(tab, i, null, fwd)) {
2037 setTabAt(nextTab, i, null);
2038 setTabAt(nextTab, i + n, null);
2039 advance = true;
2040 }
2041 }
2042 else if (f.hash >= 0) {
2043 synchronized(f) {
2044 if (tabAt(tab, i) == f) {
2045 int runBit = f.hash & n;
2046 Node lastRun = f, lo = null, hi = null;
2047 for (Node p = f.next; p != null; p = p.next) {
2048 int b = p.hash & n;
2049 if (b != runBit) {
2050 runBit = b;
2051 lastRun = p;
2052 }
2053 }
2054 if (runBit == 0)
2055 lo = lastRun;
2056 else
2057 hi = lastRun;
2058 for (Node p = f; p != lastRun; p = p.next) {
2059 int ph = p.hash;
2060 Object pk = p.key, pv = p.val;
2061 if ((ph & n) == 0)
2062 lo = new Node(ph, pk, pv, lo);
2063 else
2064 hi = new Node(ph, pk, pv, hi);
2065 }
2066 setTabAt(nextTab, i, lo);
2067 setTabAt(nextTab, i + n, hi);
2068 setTabAt(tab, i, fwd);
2069 advance = true;
2070 }
2071 }
2072 }
2073 else if ((fk = f.key) instanceof TreeBin) {
2074 TreeBin t = (TreeBin)fk;
2075 t.acquire(0);
2076 try {
2077 if (tabAt(tab, i) == f) {
2078 TreeBin lt = new TreeBin();
2079 TreeBin ht = new TreeBin();
2080 int lc = 0, hc = 0;
2081 for (Node e = t.first; e != null; e = e.next) {
2082 int h = e.hash;
2083 Object k = e.key, v = e.val;
2084 if ((h & n) == 0) {
2085 ++lc;
2086 lt.putTreeNode(h, k, v);
2087 }
2088 else {
2089 ++hc;
2090 ht.putTreeNode(h, k, v);
2091 }
2092 }
2093 Node ln, hn; // throw away trees if too small
2094 if (lc < TREE_THRESHOLD) {
2095 ln = null;
2096 for (Node p = lt.first; p != null; p = p.next)
2097 ln = new Node(p.hash, p.key, p.val, ln);
2098 }
2099 else
2100 ln = new Node(MOVED, lt, null, null);
2101 setTabAt(nextTab, i, ln);
2102 if (hc < TREE_THRESHOLD) {
2103 hn = null;
2104 for (Node p = ht.first; p != null; p = p.next)
2105 hn = new Node(p.hash, p.key, p.val, hn);
2106 }
2107 else
2108 hn = new Node(MOVED, ht, null, null);
2109 setTabAt(nextTab, i + n, hn);
2110 setTabAt(tab, i, fwd);
2111 advance = true;
2112 }
2113 } finally {
2114 t.release(0);
2115 }
2116 }
2117 else
2118 advance = true; // already processed
2119 }
2120 }
2121
2122 /* ---------------- Counter support -------------- */
2123
2124 final long sumCount() {
2125 CounterCell[] as = counterCells; CounterCell a;
2126 long sum = baseCount;
2127 if (as != null) {
2128 for (int i = 0; i < as.length; ++i) {
2129 if ((a = as[i]) != null)
2130 sum += a.value;
2131 }
2132 }
2133 return sum;
2134 }
2135
2136 // See LongAdder version for explanation
2137 private final void fullAddCount(long x, CounterHashCode hc,
2138 boolean wasUncontended) {
2139 int h;
2140 if (hc == null) {
2141 hc = new CounterHashCode();
2142 int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
2143 h = hc.code = (s == 0) ? 1 : s; // Avoid zero
2144 threadCounterHashCode.set(hc);
2145 }
2146 else
2147 h = hc.code;
2148 boolean collide = false; // True if last slot nonempty
2149 for (;;) {
2150 CounterCell[] as; CounterCell a; int n; long v;
2151 if ((as = counterCells) != null && (n = as.length) > 0) {
2152 if ((a = as[(n - 1) & h]) == null) {
2153 if (counterBusy == 0) { // Try to attach new Cell
2154 CounterCell r = new CounterCell(x); // Optimistic create
2155 if (counterBusy == 0 &&
2156 U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) {
2157 boolean created = false;
2158 try { // Recheck under lock
2159 CounterCell[] rs; int m, j;
2160 if ((rs = counterCells) != null &&
2161 (m = rs.length) > 0 &&
2162 rs[j = (m - 1) & h] == null) {
2163 rs[j] = r;
2164 created = true;
2165 }
2166 } finally {
2167 counterBusy = 0;
2168 }
2169 if (created)
2170 break;
2171 continue; // Slot is now non-empty
2172 }
2173 }
2174 collide = false;
2175 }
2176 else if (!wasUncontended) // CAS already known to fail
2177 wasUncontended = true; // Continue after rehash
2178 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2179 break;
2180 else if (counterCells != as || n >= NCPU)
2181 collide = false; // At max size or stale
2182 else if (!collide)
2183 collide = true;
2184 else if (counterBusy == 0 &&
2185 U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) {
2186 try {
2187 if (counterCells == as) {// Expand table unless stale
2188 CounterCell[] rs = new CounterCell[n << 1];
2189 for (int i = 0; i < n; ++i)
2190 rs[i] = as[i];
2191 counterCells = rs;
2192 }
2193 } finally {
2194 counterBusy = 0;
2195 }
2196 collide = false;
2197 continue; // Retry with expanded table
2198 }
2199 h ^= h << 13; // Rehash
2200 h ^= h >>> 17;
2201 h ^= h << 5;
2202 }
2203 else if (counterBusy == 0 && counterCells == as &&
2204 U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) {
2205 boolean init = false;
2206 try { // Initialize table
2207 if (counterCells == as) {
2208 CounterCell[] rs = new CounterCell[2];
2209 rs[h & 1] = new CounterCell(x);
2210 counterCells = rs;
2211 init = true;
2212 }
2213 } finally {
2214 counterBusy = 0;
2215 }
2216 if (init)
2217 break;
2218 }
2219 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2220 break; // Fall back on using base
2221 }
2222 hc.code = h; // Record index for next time
2223 }
2224
2225 /* ----------------Table Traversal -------------- */
2226
2227 /**
2228 * Encapsulates traversal for methods such as containsValue; also
2229 * serves as a base class for other iterators and bulk tasks.
2230 *
2231 * At each step, the iterator snapshots the key ("nextKey") and
2232 * value ("nextVal") of a valid node (i.e., one that, at point of
2233 * snapshot, has a non-null user value). Because val fields can
2234 * change (including to null, indicating deletion), field nextVal
2235 * might not be accurate at point of use, but still maintains the
2236 * weak consistency property of holding a value that was once
2237 * valid. To support iterator.remove, the nextKey field is not
2238 * updated (nulled out) when the iterator cannot advance.
2239 *
2240 * Internal traversals directly access these fields, as in:
2241 * {@code while (it.advance() != null) { process(it.nextKey); }}
2242 *
2243 * Exported iterators must track whether the iterator has advanced
2244 * (in hasNext vs next) (by setting/checking/nulling field
2245 * nextVal), and then extract key, value, or key-value pairs as
2246 * return values of next().
2247 *
2248 * The iterator visits once each still-valid node that was
2249 * reachable upon iterator construction. It might miss some that
2250 * were added to a bin after the bin was visited, which is OK wrt
2251 * consistency guarantees. Maintaining this property in the face
2252 * of possible ongoing resizes requires a fair amount of
2253 * bookkeeping state that is difficult to optimize away amidst
2254 * volatile accesses. Even so, traversal maintains reasonable
2255 * throughput.
2256 *
2257 * Normally, iteration proceeds bin-by-bin traversing lists.
2258 * However, if the table has been resized, then all future steps
2259 * must traverse both the bin at the current index as well as at
2260 * (index + baseSize); and so on for further resizings. To
2261 * paranoically cope with potential sharing by users of iterators
2262 * across threads, iteration terminates if a bounds checks fails
2263 * for a table read.
2264 *
2265 * This class extends CountedCompleter to streamline parallel
2266 * iteration in bulk operations. This adds only a few fields of
2267 * space overhead, which is small enough in cases where it is not
2268 * needed to not worry about it. Because CountedCompleter is
2269 * Serializable, but iterators need not be, we need to add warning
2270 * suppressions.
2271 */
2272 @SuppressWarnings("serial") static class Traverser<K,V,R>
2273 extends CountedCompleter<R> {
2274 final ConcurrentHashMap<K, V> map;
2275 Node next; // the next entry to use
2276 Object nextKey; // cached key field of next
2277 Object nextVal; // cached val field of next
2278 Node[] tab; // current table; updated if resized
2279 int index; // index of bin to use next
2280 int baseIndex; // current index of initial table
2281 int baseLimit; // index bound for initial table
2282 int baseSize; // initial table size
2283 int batch; // split control
2284
2285 /** Creates iterator for all entries in the table. */
2286 Traverser(ConcurrentHashMap<K, V> map) {
2287 this.map = map;
2288 }
2289
2290 /** Creates iterator for split() methods and task constructors */
2291 Traverser(ConcurrentHashMap<K,V> map, Traverser<K,V,?> it, int batch) {
2292 super(it);
2293 this.batch = batch;
2294 if ((this.map = map) != null && it != null) { // split parent
2295 Node[] t;
2296 if ((t = it.tab) == null &&
2297 (t = it.tab = map.table) != null)
2298 it.baseLimit = it.baseSize = t.length;
2299 this.tab = t;
2300 this.baseSize = it.baseSize;
2301 int hi = this.baseLimit = it.baseLimit;
2302 it.baseLimit = this.index = this.baseIndex =
2303 (hi + it.baseIndex + 1) >>> 1;
2304 }
2305 }
2306
2307 /**
2308 * Advances next; returns nextVal or null if terminated.
2309 * See above for explanation.
2310 */
2311 final Object advance() {
2312 Node e = next;
2313 Object ev = null;
2314 outer: do {
2315 if (e != null) // advance past used/skipped node
2316 e = e.next;
2317 while (e == null) { // get to next non-null bin
2318 ConcurrentHashMap<K, V> m;
2319 Node[] t; int b, i, n; Object ek; // checks must use locals
2320 if ((t = tab) != null)
2321 n = t.length;
2322 else if ((m = map) != null && (t = tab = m.table) != null)
2323 n = baseLimit = baseSize = t.length;
2324 else
2325 break outer;
2326 if ((b = baseIndex) >= baseLimit ||
2327 (i = index) < 0 || i >= n)
2328 break outer;
2329 if ((e = tabAt(t, i)) != null && e.hash < 0) {
2330 if ((ek = e.key) instanceof TreeBin)
2331 e = ((TreeBin)ek).first;
2332 else {
2333 tab = (Node[])ek;
2334 continue; // restarts due to null val
2335 }
2336 } // visit upper slots if present
2337 index = (i += baseSize) < n ? i : (baseIndex = b + 1);
2338 }
2339 nextKey = e.key;
2340 } while ((ev = e.val) == null); // skip deleted or special nodes
2341 next = e;
2342 return nextVal = ev;
2343 }
2344
2345 public final void remove() {
2346 Object k = nextKey;
2347 if (k == null && (advance() == null || (k = nextKey) == null))
2348 throw new IllegalStateException();
2349 map.internalReplace(k, null, null);
2350 }
2351
2352 public final boolean hasNext() {
2353 return nextVal != null || advance() != null;
2354 }
2355
2356 public final boolean hasMoreElements() { return hasNext(); }
2357
2358 public void compute() { } // default no-op CountedCompleter body
2359
2360 /**
2361 * Returns a batch value > 0 if this task should (and must) be
2362 * split, if so, adding to pending count, and in any case
2363 * updating batch value. The initial batch value is approx
2364 * exp2 of the number of times (minus one) to split task by
2365 * two before executing leaf action. This value is faster to
2366 * compute and more convenient to use as a guide to splitting
2367 * than is the depth, since it is used while dividing by two
2368 * anyway.
2369 */
2370 final int preSplit() {
2371 ConcurrentHashMap<K, V> m; int b; Node[] t; ForkJoinPool pool;
2372 if ((b = batch) < 0 && (m = map) != null) { // force initialization
2373 if ((t = tab) == null && (t = tab = m.table) != null)
2374 baseLimit = baseSize = t.length;
2375 if (t != null) {
2376 long n = m.sumCount();
2377 int par = ((pool = getPool()) == null) ?
2378 ForkJoinPool.getCommonPoolParallelism() :
2379 pool.getParallelism();
2380 int sp = par << 3; // slack of 8
2381 b = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
2382 }
2383 }
2384 b = (b <= 1 || baseIndex == baseLimit) ? 0 : (b >>> 1);
2385 if ((batch = b) > 0)
2386 addToPendingCount(1);
2387 return b;
2388 }
2389
2390 }
2391
2392 /* ---------------- Public operations -------------- */
2393
2394 /**
2395 * Creates a new, empty map with the default initial table size (16).
2396 */
2397 public ConcurrentHashMap() {
2398 }
2399
2400 /**
2401 * Creates a new, empty map with an initial table size
2402 * accommodating the specified number of elements without the need
2403 * to dynamically resize.
2404 *
2405 * @param initialCapacity The implementation performs internal
2406 * sizing to accommodate this many elements.
2407 * @throws IllegalArgumentException if the initial capacity of
2408 * elements is negative
2409 */
2410 public ConcurrentHashMap(int initialCapacity) {
2411 if (initialCapacity < 0)
2412 throw new IllegalArgumentException();
2413 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
2414 MAXIMUM_CAPACITY :
2415 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
2416 this.sizeCtl = cap;
2417 }
2418
2419 /**
2420 * Creates a new map with the same mappings as the given map.
2421 *
2422 * @param m the map
2423 */
2424 public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
2425 this.sizeCtl = DEFAULT_CAPACITY;
2426 internalPutAll(m);
2427 }
2428
2429 /**
2430 * Creates a new, empty map with an initial table size based on
2431 * the given number of elements ({@code initialCapacity}) and
2432 * initial table density ({@code loadFactor}).
2433 *
2434 * @param initialCapacity the initial capacity. The implementation
2435 * performs internal sizing to accommodate this many elements,
2436 * given the specified load factor.
2437 * @param loadFactor the load factor (table density) for
2438 * establishing the initial table size
2439 * @throws IllegalArgumentException if the initial capacity of
2440 * elements is negative or the load factor is nonpositive
2441 *
2442 * @since 1.6
2443 */
2444 public ConcurrentHashMap(int initialCapacity, float loadFactor) {
2445 this(initialCapacity, loadFactor, 1);
2446 }
2447
2448 /**
2449 * Creates a new, empty map with an initial table size based on
2450 * the given number of elements ({@code initialCapacity}), table
2451 * density ({@code loadFactor}), and number of concurrently
2452 * updating threads ({@code concurrencyLevel}).
2453 *
2454 * @param initialCapacity the initial capacity. The implementation
2455 * performs internal sizing to accommodate this many elements,
2456 * given the specified load factor.
2457 * @param loadFactor the load factor (table density) for
2458 * establishing the initial table size
2459 * @param concurrencyLevel the estimated number of concurrently
2460 * updating threads. The implementation may use this value as
2461 * a sizing hint.
2462 * @throws IllegalArgumentException if the initial capacity is
2463 * negative or the load factor or concurrencyLevel are
2464 * nonpositive
2465 */
2466 public ConcurrentHashMap(int initialCapacity,
2467 float loadFactor, int concurrencyLevel) {
2468 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
2469 throw new IllegalArgumentException();
2470 if (initialCapacity < concurrencyLevel) // Use at least as many bins
2471 initialCapacity = concurrencyLevel; // as estimated threads
2472 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
2473 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
2474 MAXIMUM_CAPACITY : tableSizeFor((int)size);
2475 this.sizeCtl = cap;
2476 }
2477
2478 /**
2479 * Creates a new {@link Set} backed by a ConcurrentHashMap
2480 * from the given type to {@code Boolean.TRUE}.
2481 *
2482 * @return the new set
2483 */
2484 public static <K> KeySetView<K,Boolean> newKeySet() {
2485 return new KeySetView<K,Boolean>(new ConcurrentHashMap<K,Boolean>(),
2486 Boolean.TRUE);
2487 }
2488
2489 /**
2490 * Creates a new {@link Set} backed by a ConcurrentHashMap
2491 * from the given type to {@code Boolean.TRUE}.
2492 *
2493 * @param initialCapacity The implementation performs internal
2494 * sizing to accommodate this many elements.
2495 * @throws IllegalArgumentException if the initial capacity of
2496 * elements is negative
2497 * @return the new set
2498 */
2499 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2500 return new KeySetView<K,Boolean>
2501 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2502 }
2503
2504 /**
2505 * {@inheritDoc}
2506 */
2507 public boolean isEmpty() {
2508 return sumCount() <= 0L; // ignore transient negative values
2509 }
2510
2511 /**
2512 * {@inheritDoc}
2513 */
2514 public int size() {
2515 long n = sumCount();
2516 return ((n < 0L) ? 0 :
2517 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
2518 (int)n);
2519 }
2520
2521 /**
2522 * Returns the number of mappings. This method should be used
2523 * instead of {@link #size} because a ConcurrentHashMap may
2524 * contain more mappings than can be represented as an int. The
2525 * value returned is an estimate; the actual count may differ if
2526 * there are concurrent insertions or removals.
2527 *
2528 * @return the number of mappings
2529 */
2530 public long mappingCount() {
2531 long n = sumCount();
2532 return (n < 0L) ? 0L : n; // ignore transient negative values
2533 }
2534
2535 /**
2536 * Returns the value to which the specified key is mapped,
2537 * or {@code null} if this map contains no mapping for the key.
2538 *
2539 * <p>More formally, if this map contains a mapping from a key
2540 * {@code k} to a value {@code v} such that {@code key.equals(k)},
2541 * then this method returns {@code v}; otherwise it returns
2542 * {@code null}. (There can be at most one such mapping.)
2543 *
2544 * @throws NullPointerException if the specified key is null
2545 */
2546 public V get(Object key) {
2547 return internalGet(key);
2548 }
2549
2550 /**
2551 * Returns the value to which the specified key is mapped,
2552 * or the given defaultValue if this map contains no mapping for the key.
2553 *
2554 * @param key the key
2555 * @param defaultValue the value to return if this map contains
2556 * no mapping for the given key
2557 * @return the mapping for the key, if present; else the defaultValue
2558 * @throws NullPointerException if the specified key is null
2559 */
2560 public V getValueOrDefault(Object key, V defaultValue) {
2561 V v;
2562 return (v = internalGet(key)) == null ? defaultValue : v;
2563 }
2564
2565 /**
2566 * Tests if the specified object is a key in this table.
2567 *
2568 * @param key possible key
2569 * @return {@code true} if and only if the specified object
2570 * is a key in this table, as determined by the
2571 * {@code equals} method; {@code false} otherwise
2572 * @throws NullPointerException if the specified key is null
2573 */
2574 public boolean containsKey(Object key) {
2575 return internalGet(key) != null;
2576 }
2577
2578 /**
2579 * Returns {@code true} if this map maps one or more keys to the
2580 * specified value. Note: This method may require a full traversal
2581 * of the map, and is much slower than method {@code containsKey}.
2582 *
2583 * @param value value whose presence in this map is to be tested
2584 * @return {@code true} if this map maps one or more keys to the
2585 * specified value
2586 * @throws NullPointerException if the specified value is null
2587 */
2588 public boolean containsValue(Object value) {
2589 if (value == null)
2590 throw new NullPointerException();
2591 Object v;
2592 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2593 while ((v = it.advance()) != null) {
2594 if (v == value || value.equals(v))
2595 return true;
2596 }
2597 return false;
2598 }
2599
2600 /**
2601 * Legacy method testing if some key maps into the specified value
2602 * in this table. This method is identical in functionality to
2603 * {@link #containsValue}, and exists solely to ensure
2604 * full compatibility with class {@link java.util.Hashtable},
2605 * which supported this method prior to introduction of the
2606 * Java Collections framework.
2607 *
2608 * @param value a value to search for
2609 * @return {@code true} if and only if some key maps to the
2610 * {@code value} argument in this table as
2611 * determined by the {@code equals} method;
2612 * {@code false} otherwise
2613 * @throws NullPointerException if the specified value is null
2614 */
2615 public boolean contains(Object value) {
2616 return containsValue(value);
2617 }
2618
2619 /**
2620 * Maps the specified key to the specified value in this table.
2621 * Neither the key nor the value can be null.
2622 *
2623 * <p>The value can be retrieved by calling the {@code get} method
2624 * with a key that is equal to the original key.
2625 *
2626 * @param key key with which the specified value is to be associated
2627 * @param value value to be associated with the specified key
2628 * @return the previous value associated with {@code key}, or
2629 * {@code null} if there was no mapping for {@code key}
2630 * @throws NullPointerException if the specified key or value is null
2631 */
2632 public V put(K key, V value) {
2633 return internalPut(key, value, false);
2634 }
2635
2636 /**
2637 * {@inheritDoc}
2638 *
2639 * @return the previous value associated with the specified key,
2640 * or {@code null} if there was no mapping for the key
2641 * @throws NullPointerException if the specified key or value is null
2642 */
2643 public V putIfAbsent(K key, V value) {
2644 return internalPut(key, value, true);
2645 }
2646
2647 /**
2648 * Copies all of the mappings from the specified map to this one.
2649 * These mappings replace any mappings that this map had for any of the
2650 * keys currently in the specified map.
2651 *
2652 * @param m mappings to be stored in this map
2653 */
2654 public void putAll(Map<? extends K, ? extends V> m) {
2655 internalPutAll(m);
2656 }
2657
2658 /**
2659 * If the specified key is not already associated with a value,
2660 * computes its value using the given mappingFunction and enters
2661 * it into the map unless null. This is equivalent to
2662 * <pre> {@code
2663 * if (map.containsKey(key))
2664 * return map.get(key);
2665 * value = mappingFunction.apply(key);
2666 * if (value != null)
2667 * map.put(key, value);
2668 * return value;}</pre>
2669 *
2670 * except that the action is performed atomically. If the
2671 * function returns {@code null} no mapping is recorded. If the
2672 * function itself throws an (unchecked) exception, the exception
2673 * is rethrown to its caller, and no mapping is recorded. Some
2674 * attempted update operations on this map by other threads may be
2675 * blocked while computation is in progress, so the computation
2676 * should be short and simple, and must not attempt to update any
2677 * other mappings of this Map. The most appropriate usage is to
2678 * construct a new object serving as an initial mapped value, or
2679 * memoized result, as in:
2680 *
2681 * <pre> {@code
2682 * map.computeIfAbsent(key, new Fun<K, V>() {
2683 * public V map(K k) { return new Value(f(k)); }});}</pre>
2684 *
2685 * @param key key with which the specified value is to be associated
2686 * @param mappingFunction the function to compute a value
2687 * @return the current (existing or computed) value associated with
2688 * the specified key, or null if the computed value is null
2689 * @throws NullPointerException if the specified key or mappingFunction
2690 * is null
2691 * @throws IllegalStateException if the computation detectably
2692 * attempts a recursive update to this map that would
2693 * otherwise never complete
2694 * @throws RuntimeException or Error if the mappingFunction does so,
2695 * in which case the mapping is left unestablished
2696 */
2697 public V computeIfAbsent
2698 (K key, Fun<? super K, ? extends V> mappingFunction) {
2699 return internalComputeIfAbsent(key, mappingFunction);
2700 }
2701
2702 /**
2703 * If the given key is present, computes a new mapping value given a key and
2704 * its current mapped value. This is equivalent to
2705 * <pre> {@code
2706 * if (map.containsKey(key)) {
2707 * value = remappingFunction.apply(key, map.get(key));
2708 * if (value != null)
2709 * map.put(key, value);
2710 * else
2711 * map.remove(key);
2712 * }
2713 * }</pre>
2714 *
2715 * except that the action is performed atomically. If the
2716 * function returns {@code null}, the mapping is removed. If the
2717 * function itself throws an (unchecked) exception, the exception
2718 * is rethrown to its caller, and the current mapping is left
2719 * unchanged. Some attempted update operations on this map by
2720 * other threads may be blocked while computation is in progress,
2721 * so the computation should be short and simple, and must not
2722 * attempt to update any other mappings of this Map. For example,
2723 * to either create or append new messages to a value mapping:
2724 *
2725 * @param key key with which the specified value is to be associated
2726 * @param remappingFunction the function to compute a value
2727 * @return the new value associated with the specified key, or null if none
2728 * @throws NullPointerException if the specified key or remappingFunction
2729 * is null
2730 * @throws IllegalStateException if the computation detectably
2731 * attempts a recursive update to this map that would
2732 * otherwise never complete
2733 * @throws RuntimeException or Error if the remappingFunction does so,
2734 * in which case the mapping is unchanged
2735 */
2736 public V computeIfPresent
2737 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2738 return internalCompute(key, true, remappingFunction);
2739 }
2740
2741 /**
2742 * Computes a new mapping value given a key and
2743 * its current mapped value (or {@code null} if there is no current
2744 * mapping). This is equivalent to
2745 * <pre> {@code
2746 * value = remappingFunction.apply(key, map.get(key));
2747 * if (value != null)
2748 * map.put(key, value);
2749 * else
2750 * map.remove(key);
2751 * }</pre>
2752 *
2753 * except that the action is performed atomically. If the
2754 * function returns {@code null}, the mapping is removed. If the
2755 * function itself throws an (unchecked) exception, the exception
2756 * is rethrown to its caller, and the current mapping is left
2757 * unchanged. Some attempted update operations on this map by
2758 * other threads may be blocked while computation is in progress,
2759 * so the computation should be short and simple, and must not
2760 * attempt to update any other mappings of this Map. For example,
2761 * to either create or append new messages to a value mapping:
2762 *
2763 * <pre> {@code
2764 * Map<Key, String> map = ...;
2765 * final String msg = ...;
2766 * map.compute(key, new BiFun<Key, String, String>() {
2767 * public String apply(Key k, String v) {
2768 * return (v == null) ? msg : v + msg;});}}</pre>
2769 *
2770 * @param key key with which the specified value is to be associated
2771 * @param remappingFunction the function to compute a value
2772 * @return the new value associated with the specified key, or null if none
2773 * @throws NullPointerException if the specified key or remappingFunction
2774 * is null
2775 * @throws IllegalStateException if the computation detectably
2776 * attempts a recursive update to this map that would
2777 * otherwise never complete
2778 * @throws RuntimeException or Error if the remappingFunction does so,
2779 * in which case the mapping is unchanged
2780 */
2781 public V compute
2782 (K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) {
2783 return internalCompute(key, false, remappingFunction);
2784 }
2785
2786 /**
2787 * If the specified key is not already associated
2788 * with a value, associate it with the given value.
2789 * Otherwise, replace the value with the results of
2790 * the given remapping function. This is equivalent to:
2791 * <pre> {@code
2792 * if (!map.containsKey(key))
2793 * map.put(value);
2794 * else {
2795 * newValue = remappingFunction.apply(map.get(key), value);
2796 * if (value != null)
2797 * map.put(key, value);
2798 * else
2799 * map.remove(key);
2800 * }
2801 * }</pre>
2802 * except that the action is performed atomically. If the
2803 * function returns {@code null}, the mapping is removed. If the
2804 * function itself throws an (unchecked) exception, the exception
2805 * is rethrown to its caller, and the current mapping is left
2806 * unchanged. Some attempted update operations on this map by
2807 * other threads may be blocked while computation is in progress,
2808 * so the computation should be short and simple, and must not
2809 * attempt to update any other mappings of this Map.
2810 */
2811 public V merge
2812 (K key, V value,
2813 BiFun<? super V, ? super V, ? extends V> remappingFunction) {
2814 return internalMerge(key, value, remappingFunction);
2815 }
2816
2817 /**
2818 * Removes the key (and its corresponding value) from this map.
2819 * This method does nothing if the key is not in the map.
2820 *
2821 * @param key the key that needs to be removed
2822 * @return the previous value associated with {@code key}, or
2823 * {@code null} if there was no mapping for {@code key}
2824 * @throws NullPointerException if the specified key is null
2825 */
2826 public V remove(Object key) {
2827 return internalReplace(key, null, null);
2828 }
2829
2830 /**
2831 * {@inheritDoc}
2832 *
2833 * @throws NullPointerException if the specified key is null
2834 */
2835 public boolean remove(Object key, Object value) {
2836 return value != null && internalReplace(key, null, value) != null;
2837 }
2838
2839 /**
2840 * {@inheritDoc}
2841 *
2842 * @throws NullPointerException if any of the arguments are null
2843 */
2844 public boolean replace(K key, V oldValue, V newValue) {
2845 if (key == null || oldValue == null || newValue == null)
2846 throw new NullPointerException();
2847 return internalReplace(key, newValue, oldValue) != null;
2848 }
2849
2850 /**
2851 * {@inheritDoc}
2852 *
2853 * @return the previous value associated with the specified key,
2854 * or {@code null} if there was no mapping for the key
2855 * @throws NullPointerException if the specified key or value is null
2856 */
2857 public V replace(K key, V value) {
2858 if (key == null || value == null)
2859 throw new NullPointerException();
2860 return internalReplace(key, value, null);
2861 }
2862
2863 /**
2864 * Removes all of the mappings from this map.
2865 */
2866 public void clear() {
2867 internalClear();
2868 }
2869
2870 /**
2871 * Returns a {@link Set} view of the keys contained in this map.
2872 * The set is backed by the map, so changes to the map are
2873 * reflected in the set, and vice-versa.
2874 *
2875 * @return the set view
2876 */
2877 public KeySetView<K,V> keySet() {
2878 KeySetView<K,V> ks = keySet;
2879 return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
2880 }
2881
2882 /**
2883 * Returns a {@link Set} view of the keys in this map, using the
2884 * given common mapped value for any additions (i.e., {@link
2885 * Collection#add} and {@link Collection#addAll}). This is of
2886 * course only appropriate if it is acceptable to use the same
2887 * value for all additions from this view.
2888 *
2889 * @param mappedValue the mapped value to use for any
2890 * additions.
2891 * @return the set view
2892 * @throws NullPointerException if the mappedValue is null
2893 */
2894 public KeySetView<K,V> keySet(V mappedValue) {
2895 if (mappedValue == null)
2896 throw new NullPointerException();
2897 return new KeySetView<K,V>(this, mappedValue);
2898 }
2899
2900 /**
2901 * Returns a {@link Collection} view of the values contained in this map.
2902 * The collection is backed by the map, so changes to the map are
2903 * reflected in the collection, and vice-versa.
2904 */
2905 public ValuesView<K,V> values() {
2906 ValuesView<K,V> vs = values;
2907 return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
2908 }
2909
2910 /**
2911 * Returns a {@link Set} view of the mappings contained in this map.
2912 * The set is backed by the map, so changes to the map are
2913 * reflected in the set, and vice-versa. The set supports element
2914 * removal, which removes the corresponding mapping from the map,
2915 * via the {@code Iterator.remove}, {@code Set.remove},
2916 * {@code removeAll}, {@code retainAll}, and {@code clear}
2917 * operations. It does not support the {@code add} or
2918 * {@code addAll} operations.
2919 *
2920 * <p>The view's {@code iterator} is a "weakly consistent" iterator
2921 * that will never throw {@link ConcurrentModificationException},
2922 * and guarantees to traverse elements as they existed upon
2923 * construction of the iterator, and may (but is not guaranteed to)
2924 * reflect any modifications subsequent to construction.
2925 */
2926 public Set<Map.Entry<K,V>> entrySet() {
2927 EntrySetView<K,V> es = entrySet;
2928 return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
2929 }
2930
2931 /**
2932 * Returns an enumeration of the keys in this table.
2933 *
2934 * @return an enumeration of the keys in this table
2935 * @see #keySet()
2936 */
2937 public Enumeration<K> keys() {
2938 return new KeyIterator<K,V>(this);
2939 }
2940
2941 /**
2942 * Returns an enumeration of the values in this table.
2943 *
2944 * @return an enumeration of the values in this table
2945 * @see #values()
2946 */
2947 public Enumeration<V> elements() {
2948 return new ValueIterator<K,V>(this);
2949 }
2950
2951 /**
2952 * Returns a partitionable iterator of the keys in this map.
2953 *
2954 * @return a partitionable iterator of the keys in this map
2955 */
2956 public Spliterator<K> keySpliterator() {
2957 return new KeyIterator<K,V>(this);
2958 }
2959
2960 /**
2961 * Returns a partitionable iterator of the values in this map.
2962 *
2963 * @return a partitionable iterator of the values in this map
2964 */
2965 public Spliterator<V> valueSpliterator() {
2966 return new ValueIterator<K,V>(this);
2967 }
2968
2969 /**
2970 * Returns a partitionable iterator of the entries in this map.
2971 *
2972 * @return a partitionable iterator of the entries in this map
2973 */
2974 public Spliterator<Map.Entry<K,V>> entrySpliterator() {
2975 return new EntryIterator<K,V>(this);
2976 }
2977
2978 /**
2979 * Returns the hash code value for this {@link Map}, i.e.,
2980 * the sum of, for each key-value pair in the map,
2981 * {@code key.hashCode() ^ value.hashCode()}.
2982 *
2983 * @return the hash code value for this map
2984 */
2985 public int hashCode() {
2986 int h = 0;
2987 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
2988 Object v;
2989 while ((v = it.advance()) != null) {
2990 h += it.nextKey.hashCode() ^ v.hashCode();
2991 }
2992 return h;
2993 }
2994
2995 /**
2996 * Returns a string representation of this map. The string
2997 * representation consists of a list of key-value mappings (in no
2998 * particular order) enclosed in braces ("{@code {}}"). Adjacent
2999 * mappings are separated by the characters {@code ", "} (comma
3000 * and space). Each key-value mapping is rendered as the key
3001 * followed by an equals sign ("{@code =}") followed by the
3002 * associated value.
3003 *
3004 * @return a string representation of this map
3005 */
3006 public String toString() {
3007 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3008 StringBuilder sb = new StringBuilder();
3009 sb.append('{');
3010 Object v;
3011 if ((v = it.advance()) != null) {
3012 for (;;) {
3013 Object k = it.nextKey;
3014 sb.append(k == this ? "(this Map)" : k);
3015 sb.append('=');
3016 sb.append(v == this ? "(this Map)" : v);
3017 if ((v = it.advance()) == null)
3018 break;
3019 sb.append(',').append(' ');
3020 }
3021 }
3022 return sb.append('}').toString();
3023 }
3024
3025 /**
3026 * Compares the specified object with this map for equality.
3027 * Returns {@code true} if the given object is a map with the same
3028 * mappings as this map. This operation may return misleading
3029 * results if either map is concurrently modified during execution
3030 * of this method.
3031 *
3032 * @param o object to be compared for equality with this map
3033 * @return {@code true} if the specified object is equal to this map
3034 */
3035 public boolean equals(Object o) {
3036 if (o != this) {
3037 if (!(o instanceof Map))
3038 return false;
3039 Map<?,?> m = (Map<?,?>) o;
3040 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3041 Object val;
3042 while ((val = it.advance()) != null) {
3043 Object v = m.get(it.nextKey);
3044 if (v == null || (v != val && !v.equals(val)))
3045 return false;
3046 }
3047 for (Map.Entry<?,?> e : m.entrySet()) {
3048 Object mk, mv, v;
3049 if ((mk = e.getKey()) == null ||
3050 (mv = e.getValue()) == null ||
3051 (v = internalGet(mk)) == null ||
3052 (mv != v && !mv.equals(v)))
3053 return false;
3054 }
3055 }
3056 return true;
3057 }
3058
3059 /* ----------------Iterators -------------- */
3060
3061 @SuppressWarnings("serial") static final class KeyIterator<K,V>
3062 extends Traverser<K,V,Object>
3063 implements Spliterator<K>, Enumeration<K> {
3064 KeyIterator(ConcurrentHashMap<K, V> map) { super(map); }
3065 KeyIterator(ConcurrentHashMap<K, V> map, Traverser<K,V,Object> it) {
3066 super(map, it, -1);
3067 }
3068 public KeyIterator<K,V> split() {
3069 if (nextKey != null)
3070 throw new IllegalStateException();
3071 return new KeyIterator<K,V>(map, this);
3072 }
3073 @SuppressWarnings("unchecked") public final K next() {
3074 if (nextVal == null && advance() == null)
3075 throw new NoSuchElementException();
3076 Object k = nextKey;
3077 nextVal = null;
3078 return (K) k;
3079 }
3080
3081 public final K nextElement() { return next(); }
3082 }
3083
3084 @SuppressWarnings("serial") static final class ValueIterator<K,V>
3085 extends Traverser<K,V,Object>
3086 implements Spliterator<V>, Enumeration<V> {
3087 ValueIterator(ConcurrentHashMap<K, V> map) { super(map); }
3088 ValueIterator(ConcurrentHashMap<K, V> map, Traverser<K,V,Object> it) {
3089 super(map, it, -1);
3090 }
3091 public ValueIterator<K,V> split() {
3092 if (nextKey != null)
3093 throw new IllegalStateException();
3094 return new ValueIterator<K,V>(map, this);
3095 }
3096
3097 @SuppressWarnings("unchecked") public final V next() {
3098 Object v;
3099 if ((v = nextVal) == null && (v = advance()) == null)
3100 throw new NoSuchElementException();
3101 nextVal = null;
3102 return (V) v;
3103 }
3104
3105 public final V nextElement() { return next(); }
3106 }
3107
3108 @SuppressWarnings("serial") static final class EntryIterator<K,V>
3109 extends Traverser<K,V,Object>
3110 implements Spliterator<Map.Entry<K,V>> {
3111 EntryIterator(ConcurrentHashMap<K, V> map) { super(map); }
3112 EntryIterator(ConcurrentHashMap<K, V> map, Traverser<K,V,Object> it) {
3113 super(map, it, -1);
3114 }
3115 public EntryIterator<K,V> split() {
3116 if (nextKey != null)
3117 throw new IllegalStateException();
3118 return new EntryIterator<K,V>(map, this);
3119 }
3120
3121 @SuppressWarnings("unchecked") public final Map.Entry<K,V> next() {
3122 Object v;
3123 if ((v = nextVal) == null && (v = advance()) == null)
3124 throw new NoSuchElementException();
3125 Object k = nextKey;
3126 nextVal = null;
3127 return new MapEntry<K,V>((K)k, (V)v, map);
3128 }
3129 }
3130
3131 /**
3132 * Exported Entry for iterators
3133 */
3134 static final class MapEntry<K,V> implements Map.Entry<K, V> {
3135 final K key; // non-null
3136 V val; // non-null
3137 final ConcurrentHashMap<K, V> map;
3138 MapEntry(K key, V val, ConcurrentHashMap<K, V> map) {
3139 this.key = key;
3140 this.val = val;
3141 this.map = map;
3142 }
3143 public final K getKey() { return key; }
3144 public final V getValue() { return val; }
3145 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
3146 public final String toString(){ return key + "=" + val; }
3147
3148 public final boolean equals(Object o) {
3149 Object k, v; Map.Entry<?,?> e;
3150 return ((o instanceof Map.Entry) &&
3151 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3152 (v = e.getValue()) != null &&
3153 (k == key || k.equals(key)) &&
3154 (v == val || v.equals(val)));
3155 }
3156
3157 /**
3158 * Sets our entry's value and writes through to the map. The
3159 * value to return is somewhat arbitrary here. Since we do not
3160 * necessarily track asynchronous changes, the most recent
3161 * "previous" value could be different from what we return (or
3162 * could even have been removed in which case the put will
3163 * re-establish). We do not and cannot guarantee more.
3164 */
3165 public final V setValue(V value) {
3166 if (value == null) throw new NullPointerException();
3167 V v = val;
3168 val = value;
3169 map.put(key, value);
3170 return v;
3171 }
3172 }
3173
3174 /**
3175 * Returns exportable snapshot entry for the given key and value
3176 * when write-through can't or shouldn't be used.
3177 */
3178 static <K,V> AbstractMap.SimpleEntry<K,V> entryFor(K k, V v) {
3179 return new AbstractMap.SimpleEntry<K,V>(k, v);
3180 }
3181
3182 /* ---------------- Serialization Support -------------- */
3183
3184 /**
3185 * Stripped-down version of helper class used in previous version,
3186 * declared for the sake of serialization compatibility
3187 */
3188 static class Segment<K,V> implements Serializable {
3189 private static final long serialVersionUID = 2249069246763182397L;
3190 final float loadFactor;
3191 Segment(float lf) { this.loadFactor = lf; }
3192 }
3193
3194 /**
3195 * Saves the state of the {@code ConcurrentHashMap} instance to a
3196 * stream (i.e., serializes it).
3197 * @param s the stream
3198 * @serialData
3199 * the key (Object) and value (Object)
3200 * for each key-value mapping, followed by a null pair.
3201 * The key-value mappings are emitted in no particular order.
3202 */
3203 @SuppressWarnings("unchecked") private void writeObject
3204 (java.io.ObjectOutputStream s)
3205 throws java.io.IOException {
3206 if (segments == null) { // for serialization compatibility
3207 segments = (Segment<K,V>[])
3208 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
3209 for (int i = 0; i < segments.length; ++i)
3210 segments[i] = new Segment<K,V>(LOAD_FACTOR);
3211 }
3212 s.defaultWriteObject();
3213 Traverser<K,V,Object> it = new Traverser<K,V,Object>(this);
3214 Object v;
3215 while ((v = it.advance()) != null) {
3216 s.writeObject(it.nextKey);
3217 s.writeObject(v);
3218 }
3219 s.writeObject(null);
3220 s.writeObject(null);
3221 segments = null; // throw away
3222 }
3223
3224 /**
3225 * Reconstitutes the instance from a stream (that is, deserializes it).
3226 * @param s the stream
3227 */
3228 @SuppressWarnings("unchecked") private void readObject
3229 (java.io.ObjectInputStream s)
3230 throws java.io.IOException, ClassNotFoundException {
3231 s.defaultReadObject();
3232 this.segments = null; // unneeded
3233
3234 // Create all nodes, then place in table once size is known
3235 long size = 0L;
3236 Node p = null;
3237 for (;;) {
3238 K k = (K) s.readObject();
3239 V v = (V) s.readObject();
3240 if (k != null && v != null) {
3241 int h = spread(k.hashCode());
3242 p = new Node(h, k, v, p);
3243 ++size;
3244 }
3245 else
3246 break;
3247 }
3248 if (p != null) {
3249 boolean init = false;
3250 int n;
3251 if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
3252 n = MAXIMUM_CAPACITY;
3253 else {
3254 int sz = (int)size;
3255 n = tableSizeFor(sz + (sz >>> 1) + 1);
3256 }
3257 int sc = sizeCtl;
3258 boolean collide = false;
3259 if (n > sc &&
3260 U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
3261 try {
3262 if (table == null) {
3263 init = true;
3264 Node[] tab = new Node[n];
3265 int mask = n - 1;
3266 while (p != null) {
3267 int j = p.hash & mask;
3268 Node next = p.next;
3269 Node q = p.next = tabAt(tab, j);
3270 setTabAt(tab, j, p);
3271 if (!collide && q != null && q.hash == p.hash)
3272 collide = true;
3273 p = next;
3274 }
3275 table = tab;
3276 addCount(size, -1);
3277 sc = n - (n >>> 2);
3278 }
3279 } finally {
3280 sizeCtl = sc;
3281 }
3282 if (collide) { // rescan and convert to TreeBins
3283 Node[] tab = table;
3284 for (int i = 0; i < tab.length; ++i) {
3285 int c = 0;
3286 for (Node e = tabAt(tab, i); e != null; e = e.next) {
3287 if (++c > TREE_THRESHOLD &&
3288 (e.key instanceof Comparable)) {
3289 replaceWithTreeBin(tab, i, e.key);
3290 break;
3291 }
3292 }
3293 }
3294 }
3295 }
3296 if (!init) { // Can only happen if unsafely published.
3297 while (p != null) {
3298 internalPut((K)p.key, (V)p.val, false);
3299 p = p.next;
3300 }
3301 }
3302 }
3303 }
3304
3305 // -------------------------------------------------------
3306
3307 // Sams
3308 /** Interface describing a void action of one argument */
3309 public interface Action<A> { void apply(A a); }
3310 /** Interface describing a void action of two arguments */
3311 public interface BiAction<A,B> { void apply(A a, B b); }
3312 /** Interface describing a function of one argument */
3313 public interface Fun<A,T> { T apply(A a); }
3314 /** Interface describing a function of two arguments */
3315 public interface BiFun<A,B,T> { T apply(A a, B b); }
3316 /** Interface describing a function of no arguments */
3317 public interface Generator<T> { T apply(); }
3318 /** Interface describing a function mapping its argument to a double */
3319 public interface ObjectToDouble<A> { double apply(A a); }
3320 /** Interface describing a function mapping its argument to a long */
3321 public interface ObjectToLong<A> { long apply(A a); }
3322 /** Interface describing a function mapping its argument to an int */
3323 public interface ObjectToInt<A> {int apply(A a); }
3324 /** Interface describing a function mapping two arguments to a double */
3325 public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); }
3326 /** Interface describing a function mapping two arguments to a long */
3327 public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
3328 /** Interface describing a function mapping two arguments to an int */
3329 public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
3330 /** Interface describing a function mapping a double to a double */
3331 public interface DoubleToDouble { double apply(double a); }
3332 /** Interface describing a function mapping a long to a long */
3333 public interface LongToLong { long apply(long a); }
3334 /** Interface describing a function mapping an int to an int */
3335 public interface IntToInt { int apply(int a); }
3336 /** Interface describing a function mapping two doubles to a double */
3337 public interface DoubleByDoubleToDouble { double apply(double a, double b); }
3338 /** Interface describing a function mapping two longs to a long */
3339 public interface LongByLongToLong { long apply(long a, long b); }
3340 /** Interface describing a function mapping two ints to an int */
3341 public interface IntByIntToInt { int apply(int a, int b); }
3342
3343
3344 // -------------------------------------------------------
3345
3346 /**
3347 * Performs the given action for each (key, value).
3348 *
3349 * @param action the action
3350 */
3351 public void forEach(BiAction<K,V> action) {
3352 ForkJoinTasks.forEach
3353 (this, action).invoke();
3354 }
3355
3356 /**
3357 * Performs the given action for each non-null transformation
3358 * of each (key, value).
3359 *
3360 * @param transformer a function returning the transformation
3361 * for an element, or null of there is no transformation (in
3362 * which case the action is not applied).
3363 * @param action the action
3364 */
3365 public <U> void forEach(BiFun<? super K, ? super V, ? extends U> transformer,
3366 Action<U> action) {
3367 ForkJoinTasks.forEach
3368 (this, transformer, action).invoke();
3369 }
3370
3371 /**
3372 * Returns a non-null result from applying the given search
3373 * function on each (key, value), or null if none. Upon
3374 * success, further element processing is suppressed and the
3375 * results of any other parallel invocations of the search
3376 * function are ignored.
3377 *
3378 * @param searchFunction a function returning a non-null
3379 * result on success, else null
3380 * @return a non-null result from applying the given search
3381 * function on each (key, value), or null if none
3382 */
3383 public <U> U search(BiFun<? super K, ? super V, ? extends U> searchFunction) {
3384 return ForkJoinTasks.search
3385 (this, searchFunction).invoke();
3386 }
3387
3388 /**
3389 * Returns the result of accumulating the given transformation
3390 * of all (key, value) pairs using the given reducer to
3391 * combine values, or null if none.
3392 *
3393 * @param transformer a function returning the transformation
3394 * for an element, or null of there is no transformation (in
3395 * which case it is not combined).
3396 * @param reducer a commutative associative combining function
3397 * @return the result of accumulating the given transformation
3398 * of all (key, value) pairs
3399 */
3400 public <U> U reduce(BiFun<? super K, ? super V, ? extends U> transformer,
3401 BiFun<? super U, ? super U, ? extends U> reducer) {
3402 return ForkJoinTasks.reduce
3403 (this, transformer, reducer).invoke();
3404 }
3405
3406 /**
3407 * Returns the result of accumulating the given transformation
3408 * of all (key, value) pairs using the given reducer to
3409 * combine values, and the given basis as an identity value.
3410 *
3411 * @param transformer a function returning the transformation
3412 * for an element
3413 * @param basis the identity (initial default value) for the reduction
3414 * @param reducer a commutative associative combining function
3415 * @return the result of accumulating the given transformation
3416 * of all (key, value) pairs
3417 */
3418 public double reduceToDouble(ObjectByObjectToDouble<? super K, ? super V> transformer,
3419 double basis,
3420 DoubleByDoubleToDouble reducer) {
3421 return ForkJoinTasks.reduceToDouble
3422 (this, transformer, basis, reducer).invoke();
3423 }
3424
3425 /**
3426 * Returns the result of accumulating the given transformation
3427 * of all (key, value) pairs using the given reducer to
3428 * combine values, and the given basis as an identity value.
3429 *
3430 * @param transformer a function returning the transformation
3431 * for an element
3432 * @param basis the identity (initial default value) for the reduction
3433 * @param reducer a commutative associative combining function
3434 * @return the result of accumulating the given transformation
3435 * of all (key, value) pairs
3436 */
3437 public long reduceToLong(ObjectByObjectToLong<? super K, ? super V> transformer,
3438 long basis,
3439 LongByLongToLong reducer) {
3440 return ForkJoinTasks.reduceToLong
3441 (this, transformer, basis, reducer).invoke();
3442 }
3443
3444 /**
3445 * Returns the result of accumulating the given transformation
3446 * of all (key, value) pairs using the given reducer to
3447 * combine values, and the given basis as an identity value.
3448 *
3449 * @param transformer a function returning the transformation
3450 * for an element
3451 * @param basis the identity (initial default value) for the reduction
3452 * @param reducer a commutative associative combining function
3453 * @return the result of accumulating the given transformation
3454 * of all (key, value) pairs
3455 */
3456 public int reduceToInt(ObjectByObjectToInt<? super K, ? super V> transformer,
3457 int basis,
3458 IntByIntToInt reducer) {
3459 return ForkJoinTasks.reduceToInt
3460 (this, transformer, basis, reducer).invoke();
3461 }
3462
3463 /**
3464 * Performs the given action for each key.
3465 *
3466 * @param action the action
3467 */
3468 public void forEachKey(Action<K> action) {
3469 ForkJoinTasks.forEachKey
3470 (this, action).invoke();
3471 }
3472
3473 /**
3474 * Performs the given action for each non-null transformation
3475 * of each key.
3476 *
3477 * @param transformer a function returning the transformation
3478 * for an element, or null of there is no transformation (in
3479 * which case the action is not applied).
3480 * @param action the action
3481 */
3482 public <U> void forEachKey(Fun<? super K, ? extends U> transformer,
3483 Action<U> action) {
3484 ForkJoinTasks.forEachKey
3485 (this, transformer, action).invoke();
3486 }
3487
3488 /**
3489 * Returns a non-null result from applying the given search
3490 * function on each key, or null if none. Upon success,
3491 * further element processing is suppressed and the results of
3492 * any other parallel invocations of the search function are
3493 * ignored.
3494 *
3495 * @param searchFunction a function returning a non-null
3496 * result on success, else null
3497 * @return a non-null result from applying the given search
3498 * function on each key, or null if none
3499 */
3500 public <U> U searchKeys(Fun<? super K, ? extends U> searchFunction) {
3501 return ForkJoinTasks.searchKeys
3502 (this, searchFunction).invoke();
3503 }
3504
3505 /**
3506 * Returns the result of accumulating all keys using the given
3507 * reducer to combine values, or null if none.
3508 *
3509 * @param reducer a commutative associative combining function
3510 * @return the result of accumulating all keys using the given
3511 * reducer to combine values, or null if none
3512 */
3513 public K reduceKeys(BiFun<? super K, ? super K, ? extends K> reducer) {
3514 return ForkJoinTasks.reduceKeys
3515 (this, reducer).invoke();
3516 }
3517
3518 /**
3519 * Returns the result of accumulating the given transformation
3520 * of all keys using the given reducer to combine values, or
3521 * null if none.
3522 *
3523 * @param transformer a function returning the transformation
3524 * for an element, or null of there is no transformation (in
3525 * which case it is not combined).
3526 * @param reducer a commutative associative combining function
3527 * @return the result of accumulating the given transformation
3528 * of all keys
3529 */
3530 public <U> U reduceKeys(Fun<? super K, ? extends U> transformer,
3531 BiFun<? super U, ? super U, ? extends U> reducer) {
3532 return ForkJoinTasks.reduceKeys
3533 (this, transformer, reducer).invoke();
3534 }
3535
3536 /**
3537 * Returns the result of accumulating the given transformation
3538 * of all keys using the given reducer to combine values, and
3539 * the given basis as an identity value.
3540 *
3541 * @param transformer a function returning the transformation
3542 * for an element
3543 * @param basis the identity (initial default value) for the reduction
3544 * @param reducer a commutative associative combining function
3545 * @return the result of accumulating the given transformation
3546 * of all keys
3547 */
3548 public double reduceKeysToDouble(ObjectToDouble<? super K> transformer,
3549 double basis,
3550 DoubleByDoubleToDouble reducer) {
3551 return ForkJoinTasks.reduceKeysToDouble
3552 (this, transformer, basis, reducer).invoke();
3553 }
3554
3555 /**
3556 * Returns the result of accumulating the given transformation
3557 * of all keys using the given reducer to combine values, and
3558 * the given basis as an identity value.
3559 *
3560 * @param transformer a function returning the transformation
3561 * for an element
3562 * @param basis the identity (initial default value) for the reduction
3563 * @param reducer a commutative associative combining function
3564 * @return the result of accumulating the given transformation
3565 * of all keys
3566 */
3567 public long reduceKeysToLong(ObjectToLong<? super K> transformer,
3568 long basis,
3569 LongByLongToLong reducer) {
3570 return ForkJoinTasks.reduceKeysToLong
3571 (this, transformer, basis, reducer).invoke();
3572 }
3573
3574 /**
3575 * Returns the result of accumulating the given transformation
3576 * of all keys using the given reducer to combine values, and
3577 * the given basis as an identity value.
3578 *
3579 * @param transformer a function returning the transformation
3580 * for an element
3581 * @param basis the identity (initial default value) for the reduction
3582 * @param reducer a commutative associative combining function
3583 * @return the result of accumulating the given transformation
3584 * of all keys
3585 */
3586 public int reduceKeysToInt(ObjectToInt<? super K> transformer,
3587 int basis,
3588 IntByIntToInt reducer) {
3589 return ForkJoinTasks.reduceKeysToInt
3590 (this, transformer, basis, reducer).invoke();
3591 }
3592
3593 /**
3594 * Performs the given action for each value.
3595 *
3596 * @param action the action
3597 */
3598 public void forEachValue(Action<V> action) {
3599 ForkJoinTasks.forEachValue
3600 (this, action).invoke();
3601 }
3602
3603 /**
3604 * Performs the given action for each non-null transformation
3605 * of each value.
3606 *
3607 * @param transformer a function returning the transformation
3608 * for an element, or null of there is no transformation (in
3609 * which case the action is not applied).
3610 */
3611 public <U> void forEachValue(Fun<? super V, ? extends U> transformer,
3612 Action<U> action) {
3613 ForkJoinTasks.forEachValue
3614 (this, transformer, action).invoke();
3615 }
3616
3617 /**
3618 * Returns a non-null result from applying the given search
3619 * function on each value, or null if none. Upon success,
3620 * further element processing is suppressed and the results of
3621 * any other parallel invocations of the search function are
3622 * ignored.
3623 *
3624 * @param searchFunction a function returning a non-null
3625 * result on success, else null
3626 * @return a non-null result from applying the given search
3627 * function on each value, or null if none
3628 *
3629 */
3630 public <U> U searchValues(Fun<? super V, ? extends U> searchFunction) {
3631 return ForkJoinTasks.searchValues
3632 (this, searchFunction).invoke();
3633 }
3634
3635 /**
3636 * Returns the result of accumulating all values using the
3637 * given reducer to combine values, or null if none.
3638 *
3639 * @param reducer a commutative associative combining function
3640 * @return the result of accumulating all values
3641 */
3642 public V reduceValues(BiFun<? super V, ? super V, ? extends V> reducer) {
3643 return ForkJoinTasks.reduceValues
3644 (this, reducer).invoke();
3645 }
3646
3647 /**
3648 * Returns the result of accumulating the given transformation
3649 * of all values using the given reducer to combine values, or
3650 * null if none.
3651 *
3652 * @param transformer a function returning the transformation
3653 * for an element, or null of there is no transformation (in
3654 * which case it is not combined).
3655 * @param reducer a commutative associative combining function
3656 * @return the result of accumulating the given transformation
3657 * of all values
3658 */
3659 public <U> U reduceValues(Fun<? super V, ? extends U> transformer,
3660 BiFun<? super U, ? super U, ? extends U> reducer) {
3661 return ForkJoinTasks.reduceValues
3662 (this, transformer, reducer).invoke();
3663 }
3664
3665 /**
3666 * Returns the result of accumulating the given transformation
3667 * of all values using the given reducer to combine values,
3668 * and the given basis as an identity value.
3669 *
3670 * @param transformer a function returning the transformation
3671 * for an element
3672 * @param basis the identity (initial default value) for the reduction
3673 * @param reducer a commutative associative combining function
3674 * @return the result of accumulating the given transformation
3675 * of all values
3676 */
3677 public double reduceValuesToDouble(ObjectToDouble<? super V> transformer,
3678 double basis,
3679 DoubleByDoubleToDouble reducer) {
3680 return ForkJoinTasks.reduceValuesToDouble
3681 (this, transformer, basis, reducer).invoke();
3682 }
3683
3684 /**
3685 * Returns the result of accumulating the given transformation
3686 * of all values using the given reducer to combine values,
3687 * and the given basis as an identity value.
3688 *
3689 * @param transformer a function returning the transformation
3690 * for an element
3691 * @param basis the identity (initial default value) for the reduction
3692 * @param reducer a commutative associative combining function
3693 * @return the result of accumulating the given transformation
3694 * of all values
3695 */
3696 public long reduceValuesToLong(ObjectToLong<? super V> transformer,
3697 long basis,
3698 LongByLongToLong reducer) {
3699 return ForkJoinTasks.reduceValuesToLong
3700 (this, transformer, basis, reducer).invoke();
3701 }
3702
3703 /**
3704 * Returns the result of accumulating the given transformation
3705 * of all values using the given reducer to combine values,
3706 * and the given basis as an identity value.
3707 *
3708 * @param transformer a function returning the transformation
3709 * for an element
3710 * @param basis the identity (initial default value) for the reduction
3711 * @param reducer a commutative associative combining function
3712 * @return the result of accumulating the given transformation
3713 * of all values
3714 */
3715 public int reduceValuesToInt(ObjectToInt<? super V> transformer,
3716 int basis,
3717 IntByIntToInt reducer) {
3718 return ForkJoinTasks.reduceValuesToInt
3719 (this, transformer, basis, reducer).invoke();
3720 }
3721
3722 /**
3723 * Performs the given action for each entry.
3724 *
3725 * @param action the action
3726 */
3727 public void forEachEntry(Action<Map.Entry<K,V>> action) {
3728 ForkJoinTasks.forEachEntry
3729 (this, action).invoke();
3730 }
3731
3732 /**
3733 * Performs the given action for each non-null transformation
3734 * of each entry.
3735 *
3736 * @param transformer a function returning the transformation
3737 * for an element, or null of there is no transformation (in
3738 * which case the action is not applied).
3739 * @param action the action
3740 */
3741 public <U> void forEachEntry(Fun<Map.Entry<K,V>, ? extends U> transformer,
3742 Action<U> action) {
3743 ForkJoinTasks.forEachEntry
3744 (this, transformer, action).invoke();
3745 }
3746
3747 /**
3748 * Returns a non-null result from applying the given search
3749 * function on each entry, or null if none. Upon success,
3750 * further element processing is suppressed and the results of
3751 * any other parallel invocations of the search function are
3752 * ignored.
3753 *
3754 * @param searchFunction a function returning a non-null
3755 * result on success, else null
3756 * @return a non-null result from applying the given search
3757 * function on each entry, or null if none
3758 */
3759 public <U> U searchEntries(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
3760 return ForkJoinTasks.searchEntries
3761 (this, searchFunction).invoke();
3762 }
3763
3764 /**
3765 * Returns the result of accumulating all entries using the
3766 * given reducer to combine values, or null if none.
3767 *
3768 * @param reducer a commutative associative combining function
3769 * @return the result of accumulating all entries
3770 */
3771 public Map.Entry<K,V> reduceEntries(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
3772 return ForkJoinTasks.reduceEntries
3773 (this, reducer).invoke();
3774 }
3775
3776 /**
3777 * Returns the result of accumulating the given transformation
3778 * of all entries using the given reducer to combine values,
3779 * or null if none.
3780 *
3781 * @param transformer a function returning the transformation
3782 * for an element, or null of there is no transformation (in
3783 * which case it is not combined).
3784 * @param reducer a commutative associative combining function
3785 * @return the result of accumulating the given transformation
3786 * of all entries
3787 */
3788 public <U> U reduceEntries(Fun<Map.Entry<K,V>, ? extends U> transformer,
3789 BiFun<? super U, ? super U, ? extends U> reducer) {
3790 return ForkJoinTasks.reduceEntries
3791 (this, transformer, reducer).invoke();
3792 }
3793
3794 /**
3795 * Returns the result of accumulating the given transformation
3796 * of all entries using the given reducer to combine values,
3797 * and the given basis as an identity value.
3798 *
3799 * @param transformer a function returning the transformation
3800 * for an element
3801 * @param basis the identity (initial default value) for the reduction
3802 * @param reducer a commutative associative combining function
3803 * @return the result of accumulating the given transformation
3804 * of all entries
3805 */
3806 public double reduceEntriesToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
3807 double basis,
3808 DoubleByDoubleToDouble reducer) {
3809 return ForkJoinTasks.reduceEntriesToDouble
3810 (this, transformer, basis, reducer).invoke();
3811 }
3812
3813 /**
3814 * Returns the result of accumulating the given transformation
3815 * of all entries using the given reducer to combine values,
3816 * and the given basis as an identity value.
3817 *
3818 * @param transformer a function returning the transformation
3819 * for an element
3820 * @param basis the identity (initial default value) for the reduction
3821 * @param reducer a commutative associative combining function
3822 * @return the result of accumulating the given transformation
3823 * of all entries
3824 */
3825 public long reduceEntriesToLong(ObjectToLong<Map.Entry<K,V>> transformer,
3826 long basis,
3827 LongByLongToLong reducer) {
3828 return ForkJoinTasks.reduceEntriesToLong
3829 (this, transformer, basis, reducer).invoke();
3830 }
3831
3832 /**
3833 * Returns the result of accumulating the given transformation
3834 * of all entries using the given reducer to combine values,
3835 * and the given basis as an identity value.
3836 *
3837 * @param transformer a function returning the transformation
3838 * for an element
3839 * @param basis the identity (initial default value) for the reduction
3840 * @param reducer a commutative associative combining function
3841 * @return the result of accumulating the given transformation
3842 * of all entries
3843 */
3844 public int reduceEntriesToInt(ObjectToInt<Map.Entry<K,V>> transformer,
3845 int basis,
3846 IntByIntToInt reducer) {
3847 return ForkJoinTasks.reduceEntriesToInt
3848 (this, transformer, basis, reducer).invoke();
3849 }
3850
3851 /* ----------------Views -------------- */
3852
3853 /**
3854 * Base class for views.
3855 */
3856 static abstract class CHMView<K, V> {
3857 final ConcurrentHashMap<K, V> map;
3858 CHMView(ConcurrentHashMap<K, V> map) { this.map = map; }
3859
3860 /**
3861 * Returns the map backing this view.
3862 *
3863 * @return the map backing this view
3864 */
3865 public ConcurrentHashMap<K,V> getMap() { return map; }
3866
3867 public final int size() { return map.size(); }
3868 public final boolean isEmpty() { return map.isEmpty(); }
3869 public final void clear() { map.clear(); }
3870
3871 // implementations below rely on concrete classes supplying these
3872 abstract public Iterator<?> iterator();
3873 abstract public boolean contains(Object o);
3874 abstract public boolean remove(Object o);
3875
3876 private static final String oomeMsg = "Required array size too large";
3877
3878 public final Object[] toArray() {
3879 long sz = map.mappingCount();
3880 if (sz > (long)(MAX_ARRAY_SIZE))
3881 throw new OutOfMemoryError(oomeMsg);
3882 int n = (int)sz;
3883 Object[] r = new Object[n];
3884 int i = 0;
3885 Iterator<?> it = iterator();
3886 while (it.hasNext()) {
3887 if (i == n) {
3888 if (n >= MAX_ARRAY_SIZE)
3889 throw new OutOfMemoryError(oomeMsg);
3890 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3891 n = MAX_ARRAY_SIZE;
3892 else
3893 n += (n >>> 1) + 1;
3894 r = Arrays.copyOf(r, n);
3895 }
3896 r[i++] = it.next();
3897 }
3898 return (i == n) ? r : Arrays.copyOf(r, i);
3899 }
3900
3901 @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) {
3902 long sz = map.mappingCount();
3903 if (sz > (long)(MAX_ARRAY_SIZE))
3904 throw new OutOfMemoryError(oomeMsg);
3905 int m = (int)sz;
3906 T[] r = (a.length >= m) ? a :
3907 (T[])java.lang.reflect.Array
3908 .newInstance(a.getClass().getComponentType(), m);
3909 int n = r.length;
3910 int i = 0;
3911 Iterator<?> it = iterator();
3912 while (it.hasNext()) {
3913 if (i == n) {
3914 if (n >= MAX_ARRAY_SIZE)
3915 throw new OutOfMemoryError(oomeMsg);
3916 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
3917 n = MAX_ARRAY_SIZE;
3918 else
3919 n += (n >>> 1) + 1;
3920 r = Arrays.copyOf(r, n);
3921 }
3922 r[i++] = (T)it.next();
3923 }
3924 if (a == r && i < n) {
3925 r[i] = null; // null-terminate
3926 return r;
3927 }
3928 return (i == n) ? r : Arrays.copyOf(r, i);
3929 }
3930
3931 public final int hashCode() {
3932 int h = 0;
3933 for (Iterator<?> it = iterator(); it.hasNext();)
3934 h += it.next().hashCode();
3935 return h;
3936 }
3937
3938 public final String toString() {
3939 StringBuilder sb = new StringBuilder();
3940 sb.append('[');
3941 Iterator<?> it = iterator();
3942 if (it.hasNext()) {
3943 for (;;) {
3944 Object e = it.next();
3945 sb.append(e == this ? "(this Collection)" : e);
3946 if (!it.hasNext())
3947 break;
3948 sb.append(',').append(' ');
3949 }
3950 }
3951 return sb.append(']').toString();
3952 }
3953
3954 public final boolean containsAll(Collection<?> c) {
3955 if (c != this) {
3956 for (Iterator<?> it = c.iterator(); it.hasNext();) {
3957 Object e = it.next();
3958 if (e == null || !contains(e))
3959 return false;
3960 }
3961 }
3962 return true;
3963 }
3964
3965 public final boolean removeAll(Collection<?> c) {
3966 boolean modified = false;
3967 for (Iterator<?> it = iterator(); it.hasNext();) {
3968 if (c.contains(it.next())) {
3969 it.remove();
3970 modified = true;
3971 }
3972 }
3973 return modified;
3974 }
3975
3976 public final boolean retainAll(Collection<?> c) {
3977 boolean modified = false;
3978 for (Iterator<?> it = iterator(); it.hasNext();) {
3979 if (!c.contains(it.next())) {
3980 it.remove();
3981 modified = true;
3982 }
3983 }
3984 return modified;
3985 }
3986
3987 }
3988
3989 /**
3990 * A view of a ConcurrentHashMap as a {@link Set} of keys, in
3991 * which additions may optionally be enabled by mapping to a
3992 * common value. This class cannot be directly instantiated. See
3993 * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()},
3994 * {@link #newKeySet(int)}.
3995 */
3996 public static class KeySetView<K,V> extends CHMView<K,V>
3997 implements Set<K>, java.io.Serializable {
3998 private static final long serialVersionUID = 7249069246763182397L;
3999 private final V value;
4000 KeySetView(ConcurrentHashMap<K, V> map, V value) { // non-public
4001 super(map);
4002 this.value = value;
4003 }
4004
4005 /**
4006 * Returns the default mapped value for additions,
4007 * or {@code null} if additions are not supported.
4008 *
4009 * @return the default mapped value for additions, or {@code null}
4010 * if not supported.
4011 */
4012 public V getMappedValue() { return value; }
4013
4014 // implement Set API
4015
4016 public boolean contains(Object o) { return map.containsKey(o); }
4017 public boolean remove(Object o) { return map.remove(o) != null; }
4018
4019 /**
4020 * Returns a "weakly consistent" iterator that will never
4021 * throw {@link ConcurrentModificationException}, and
4022 * guarantees to traverse elements as they existed upon
4023 * construction of the iterator, and may (but is not
4024 * guaranteed to) reflect any modifications subsequent to
4025 * construction.
4026 *
4027 * @return an iterator over the keys of this map
4028 */
4029 public Iterator<K> iterator() { return new KeyIterator<K,V>(map); }
4030 public boolean add(K e) {
4031 V v;
4032 if ((v = value) == null)
4033 throw new UnsupportedOperationException();
4034 if (e == null)
4035 throw new NullPointerException();
4036 return map.internalPut(e, v, true) == null;
4037 }
4038 public boolean addAll(Collection<? extends K> c) {
4039 boolean added = false;
4040 V v;
4041 if ((v = value) == null)
4042 throw new UnsupportedOperationException();
4043 for (K e : c) {
4044 if (e == null)
4045 throw new NullPointerException();
4046 if (map.internalPut(e, v, true) == null)
4047 added = true;
4048 }
4049 return added;
4050 }
4051 public boolean equals(Object o) {
4052 Set<?> c;
4053 return ((o instanceof Set) &&
4054 ((c = (Set<?>)o) == this ||
4055 (containsAll(c) && c.containsAll(this))));
4056 }
4057
4058 /**
4059 * Performs the given action for each key.
4060 *
4061 * @param action the action
4062 */
4063 public void forEach(Action<K> action) {
4064 ForkJoinTasks.forEachKey
4065 (map, action).invoke();
4066 }
4067
4068 /**
4069 * Performs the given action for each non-null transformation
4070 * of each key.
4071 *
4072 * @param transformer a function returning the transformation
4073 * for an element, or null of there is no transformation (in
4074 * which case the action is not applied).
4075 * @param action the action
4076 */
4077 public <U> void forEach(Fun<? super K, ? extends U> transformer,
4078 Action<U> action) {
4079 ForkJoinTasks.forEachKey
4080 (map, transformer, action).invoke();
4081 }
4082
4083 /**
4084 * Returns a non-null result from applying the given search
4085 * function on each key, or null if none. Upon success,
4086 * further element processing is suppressed and the results of
4087 * any other parallel invocations of the search function are
4088 * ignored.
4089 *
4090 * @param searchFunction a function returning a non-null
4091 * result on success, else null
4092 * @return a non-null result from applying the given search
4093 * function on each key, or null if none
4094 */
4095 public <U> U search(Fun<? super K, ? extends U> searchFunction) {
4096 return ForkJoinTasks.searchKeys
4097 (map, searchFunction).invoke();
4098 }
4099
4100 /**
4101 * Returns the result of accumulating all keys using the given
4102 * reducer to combine values, or null if none.
4103 *
4104 * @param reducer a commutative associative combining function
4105 * @return the result of accumulating all keys using the given
4106 * reducer to combine values, or null if none
4107 */
4108 public K reduce(BiFun<? super K, ? super K, ? extends K> reducer) {
4109 return ForkJoinTasks.reduceKeys
4110 (map, reducer).invoke();
4111 }
4112
4113 /**
4114 * Returns the result of accumulating the given transformation
4115 * of all keys using the given reducer to combine values, and
4116 * the given basis as an identity value.
4117 *
4118 * @param transformer a function returning the transformation
4119 * for an element
4120 * @param basis the identity (initial default value) for the reduction
4121 * @param reducer a commutative associative combining function
4122 * @return the result of accumulating the given transformation
4123 * of all keys
4124 */
4125 public double reduceToDouble(ObjectToDouble<? super K> transformer,
4126 double basis,
4127 DoubleByDoubleToDouble reducer) {
4128 return ForkJoinTasks.reduceKeysToDouble
4129 (map, transformer, basis, reducer).invoke();
4130 }
4131
4132 /**
4133 * Returns the result of accumulating the given transformation
4134 * of all keys using the given reducer to combine values, and
4135 * the given basis as an identity value.
4136 *
4137 * @param transformer a function returning the transformation
4138 * for an element
4139 * @param basis the identity (initial default value) for the reduction
4140 * @param reducer a commutative associative combining function
4141 * @return the result of accumulating the given transformation
4142 * of all keys
4143 */
4144 public long reduceToLong(ObjectToLong<? super K> transformer,
4145 long basis,
4146 LongByLongToLong reducer) {
4147 return ForkJoinTasks.reduceKeysToLong
4148 (map, transformer, basis, reducer).invoke();
4149 }
4150
4151 /**
4152 * Returns the result of accumulating the given transformation
4153 * of all keys using the given reducer to combine values, and
4154 * the given basis as an identity value.
4155 *
4156 * @param transformer a function returning the transformation
4157 * for an element
4158 * @param basis the identity (initial default value) for the reduction
4159 * @param reducer a commutative associative combining function
4160 * @return the result of accumulating the given transformation
4161 * of all keys
4162 */
4163 public int reduceToInt(ObjectToInt<? super K> transformer,
4164 int basis,
4165 IntByIntToInt reducer) {
4166 return ForkJoinTasks.reduceKeysToInt
4167 (map, transformer, basis, reducer).invoke();
4168 }
4169
4170 }
4171
4172 /**
4173 * A view of a ConcurrentHashMap as a {@link Collection} of
4174 * values, in which additions are disabled. This class cannot be
4175 * directly instantiated. See {@link #values},
4176 *
4177 * <p>The view's {@code iterator} is a "weakly consistent" iterator
4178 * that will never throw {@link ConcurrentModificationException},
4179 * and guarantees to traverse elements as they existed upon
4180 * construction of the iterator, and may (but is not guaranteed to)
4181 * reflect any modifications subsequent to construction.
4182 */
4183 public static final class ValuesView<K,V> extends CHMView<K,V>
4184 implements Collection<V> {
4185 ValuesView(ConcurrentHashMap<K, V> map) { super(map); }
4186 public final boolean contains(Object o) { return map.containsValue(o); }
4187 public final boolean remove(Object o) {
4188 if (o != null) {
4189 Iterator<V> it = new ValueIterator<K,V>(map);
4190 while (it.hasNext()) {
4191 if (o.equals(it.next())) {
4192 it.remove();
4193 return true;
4194 }
4195 }
4196 }
4197 return false;
4198 }
4199
4200 /**
4201 * Returns a "weakly consistent" iterator that will never
4202 * throw {@link ConcurrentModificationException}, and
4203 * guarantees to traverse elements as they existed upon
4204 * construction of the iterator, and may (but is not
4205 * guaranteed to) reflect any modifications subsequent to
4206 * construction.
4207 *
4208 * @return an iterator over the values of this map
4209 */
4210 public final Iterator<V> iterator() {
4211 return new ValueIterator<K,V>(map);
4212 }
4213 public final boolean add(V e) {
4214 throw new UnsupportedOperationException();
4215 }
4216 public final boolean addAll(Collection<? extends V> c) {
4217 throw new UnsupportedOperationException();
4218 }
4219
4220 /**
4221 * Performs the given action for each value.
4222 *
4223 * @param action the action
4224 */
4225 public void forEach(Action<V> action) {
4226 ForkJoinTasks.forEachValue
4227 (map, action).invoke();
4228 }
4229
4230 /**
4231 * Performs the given action for each non-null transformation
4232 * of each value.
4233 *
4234 * @param transformer a function returning the transformation
4235 * for an element, or null of there is no transformation (in
4236 * which case the action is not applied).
4237 */
4238 public <U> void forEach(Fun<? super V, ? extends U> transformer,
4239 Action<U> action) {
4240 ForkJoinTasks.forEachValue
4241 (map, transformer, action).invoke();
4242 }
4243
4244 /**
4245 * Returns a non-null result from applying the given search
4246 * function on each value, or null if none. Upon success,
4247 * further element processing is suppressed and the results of
4248 * any other parallel invocations of the search function are
4249 * ignored.
4250 *
4251 * @param searchFunction a function returning a non-null
4252 * result on success, else null
4253 * @return a non-null result from applying the given search
4254 * function on each value, or null if none
4255 *
4256 */
4257 public <U> U search(Fun<? super V, ? extends U> searchFunction) {
4258 return ForkJoinTasks.searchValues
4259 (map, searchFunction).invoke();
4260 }
4261
4262 /**
4263 * Returns the result of accumulating all values using the
4264 * given reducer to combine values, or null if none.
4265 *
4266 * @param reducer a commutative associative combining function
4267 * @return the result of accumulating all values
4268 */
4269 public V reduce(BiFun<? super V, ? super V, ? extends V> reducer) {
4270 return ForkJoinTasks.reduceValues
4271 (map, reducer).invoke();
4272 }
4273
4274 /**
4275 * Returns the result of accumulating the given transformation
4276 * of all values using the given reducer to combine values, or
4277 * null if none.
4278 *
4279 * @param transformer a function returning the transformation
4280 * for an element, or null of there is no transformation (in
4281 * which case it is not combined).
4282 * @param reducer a commutative associative combining function
4283 * @return the result of accumulating the given transformation
4284 * of all values
4285 */
4286 public <U> U reduce(Fun<? super V, ? extends U> transformer,
4287 BiFun<? super U, ? super U, ? extends U> reducer) {
4288 return ForkJoinTasks.reduceValues
4289 (map, transformer, reducer).invoke();
4290 }
4291
4292 /**
4293 * Returns the result of accumulating the given transformation
4294 * of all values using the given reducer to combine values,
4295 * and the given basis as an identity value.
4296 *
4297 * @param transformer a function returning the transformation
4298 * for an element
4299 * @param basis the identity (initial default value) for the reduction
4300 * @param reducer a commutative associative combining function
4301 * @return the result of accumulating the given transformation
4302 * of all values
4303 */
4304 public double reduceToDouble(ObjectToDouble<? super V> transformer,
4305 double basis,
4306 DoubleByDoubleToDouble reducer) {
4307 return ForkJoinTasks.reduceValuesToDouble
4308 (map, transformer, basis, reducer).invoke();
4309 }
4310
4311 /**
4312 * Returns the result of accumulating the given transformation
4313 * of all values using the given reducer to combine values,
4314 * and the given basis as an identity value.
4315 *
4316 * @param transformer a function returning the transformation
4317 * for an element
4318 * @param basis the identity (initial default value) for the reduction
4319 * @param reducer a commutative associative combining function
4320 * @return the result of accumulating the given transformation
4321 * of all values
4322 */
4323 public long reduceToLong(ObjectToLong<? super V> transformer,
4324 long basis,
4325 LongByLongToLong reducer) {
4326 return ForkJoinTasks.reduceValuesToLong
4327 (map, transformer, basis, reducer).invoke();
4328 }
4329
4330 /**
4331 * Returns the result of accumulating the given transformation
4332 * of all values using the given reducer to combine values,
4333 * and the given basis as an identity value.
4334 *
4335 * @param transformer a function returning the transformation
4336 * for an element
4337 * @param basis the identity (initial default value) for the reduction
4338 * @param reducer a commutative associative combining function
4339 * @return the result of accumulating the given transformation
4340 * of all values
4341 */
4342 public int reduceToInt(ObjectToInt<? super V> transformer,
4343 int basis,
4344 IntByIntToInt reducer) {
4345 return ForkJoinTasks.reduceValuesToInt
4346 (map, transformer, basis, reducer).invoke();
4347 }
4348
4349 }
4350
4351 /**
4352 * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4353 * entries. This class cannot be directly instantiated. See
4354 * {@link #entrySet}.
4355 */
4356 public static final class EntrySetView<K,V> extends CHMView<K,V>
4357 implements Set<Map.Entry<K,V>> {
4358 EntrySetView(ConcurrentHashMap<K, V> map) { super(map); }
4359 public final boolean contains(Object o) {
4360 Object k, v, r; Map.Entry<?,?> e;
4361 return ((o instanceof Map.Entry) &&
4362 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4363 (r = map.get(k)) != null &&
4364 (v = e.getValue()) != null &&
4365 (v == r || v.equals(r)));
4366 }
4367 public final boolean remove(Object o) {
4368 Object k, v; Map.Entry<?,?> e;
4369 return ((o instanceof Map.Entry) &&
4370 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4371 (v = e.getValue()) != null &&
4372 map.remove(k, v));
4373 }
4374
4375 /**
4376 * Returns a "weakly consistent" iterator that will never
4377 * throw {@link ConcurrentModificationException}, and
4378 * guarantees to traverse elements as they existed upon
4379 * construction of the iterator, and may (but is not
4380 * guaranteed to) reflect any modifications subsequent to
4381 * construction.
4382 *
4383 * @return an iterator over the entries of this map
4384 */
4385 public final Iterator<Map.Entry<K,V>> iterator() {
4386 return new EntryIterator<K,V>(map);
4387 }
4388
4389 public final boolean add(Entry<K,V> e) {
4390 K key = e.getKey();
4391 V value = e.getValue();
4392 if (key == null || value == null)
4393 throw new NullPointerException();
4394 return map.internalPut(key, value, false) == null;
4395 }
4396 public final boolean addAll(Collection<? extends Entry<K,V>> c) {
4397 boolean added = false;
4398 for (Entry<K,V> e : c) {
4399 if (add(e))
4400 added = true;
4401 }
4402 return added;
4403 }
4404 public boolean equals(Object o) {
4405 Set<?> c;
4406 return ((o instanceof Set) &&
4407 ((c = (Set<?>)o) == this ||
4408 (containsAll(c) && c.containsAll(this))));
4409 }
4410
4411 /**
4412 * Performs the given action for each entry.
4413 *
4414 * @param action the action
4415 */
4416 public void forEach(Action<Map.Entry<K,V>> action) {
4417 ForkJoinTasks.forEachEntry
4418 (map, action).invoke();
4419 }
4420
4421 /**
4422 * Performs the given action for each non-null transformation
4423 * of each entry.
4424 *
4425 * @param transformer a function returning the transformation
4426 * for an element, or null of there is no transformation (in
4427 * which case the action is not applied).
4428 * @param action the action
4429 */
4430 public <U> void forEach(Fun<Map.Entry<K,V>, ? extends U> transformer,
4431 Action<U> action) {
4432 ForkJoinTasks.forEachEntry
4433 (map, transformer, action).invoke();
4434 }
4435
4436 /**
4437 * Returns a non-null result from applying the given search
4438 * function on each entry, or null if none. Upon success,
4439 * further element processing is suppressed and the results of
4440 * any other parallel invocations of the search function are
4441 * ignored.
4442 *
4443 * @param searchFunction a function returning a non-null
4444 * result on success, else null
4445 * @return a non-null result from applying the given search
4446 * function on each entry, or null if none
4447 */
4448 public <U> U search(Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
4449 return ForkJoinTasks.searchEntries
4450 (map, searchFunction).invoke();
4451 }
4452
4453 /**
4454 * Returns the result of accumulating all entries using the
4455 * given reducer to combine values, or null if none.
4456 *
4457 * @param reducer a commutative associative combining function
4458 * @return the result of accumulating all entries
4459 */
4460 public Map.Entry<K,V> reduce(BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4461 return ForkJoinTasks.reduceEntries
4462 (map, reducer).invoke();
4463 }
4464
4465 /**
4466 * Returns the result of accumulating the given transformation
4467 * of all entries using the given reducer to combine values,
4468 * or null if none.
4469 *
4470 * @param transformer a function returning the transformation
4471 * for an element, or null of there is no transformation (in
4472 * which case it is not combined).
4473 * @param reducer a commutative associative combining function
4474 * @return the result of accumulating the given transformation
4475 * of all entries
4476 */
4477 public <U> U reduce(Fun<Map.Entry<K,V>, ? extends U> transformer,
4478 BiFun<? super U, ? super U, ? extends U> reducer) {
4479 return ForkJoinTasks.reduceEntries
4480 (map, transformer, reducer).invoke();
4481 }
4482
4483 /**
4484 * Returns the result of accumulating the given transformation
4485 * of all entries using the given reducer to combine values,
4486 * and the given basis as an identity value.
4487 *
4488 * @param transformer a function returning the transformation
4489 * for an element
4490 * @param basis the identity (initial default value) for the reduction
4491 * @param reducer a commutative associative combining function
4492 * @return the result of accumulating the given transformation
4493 * of all entries
4494 */
4495 public double reduceToDouble(ObjectToDouble<Map.Entry<K,V>> transformer,
4496 double basis,
4497 DoubleByDoubleToDouble reducer) {
4498 return ForkJoinTasks.reduceEntriesToDouble
4499 (map, transformer, basis, reducer).invoke();
4500 }
4501
4502 /**
4503 * Returns the result of accumulating the given transformation
4504 * of all entries using the given reducer to combine values,
4505 * and the given basis as an identity value.
4506 *
4507 * @param transformer a function returning the transformation
4508 * for an element
4509 * @param basis the identity (initial default value) for the reduction
4510 * @param reducer a commutative associative combining function
4511 * @return the result of accumulating the given transformation
4512 * of all entries
4513 */
4514 public long reduceToLong(ObjectToLong<Map.Entry<K,V>> transformer,
4515 long basis,
4516 LongByLongToLong reducer) {
4517 return ForkJoinTasks.reduceEntriesToLong
4518 (map, transformer, basis, reducer).invoke();
4519 }
4520
4521 /**
4522 * Returns the result of accumulating the given transformation
4523 * of all entries using the given reducer to combine values,
4524 * and the given basis as an identity value.
4525 *
4526 * @param transformer a function returning the transformation
4527 * for an element
4528 * @param basis the identity (initial default value) for the reduction
4529 * @param reducer a commutative associative combining function
4530 * @return the result of accumulating the given transformation
4531 * of all entries
4532 */
4533 public int reduceToInt(ObjectToInt<Map.Entry<K,V>> transformer,
4534 int basis,
4535 IntByIntToInt reducer) {
4536 return ForkJoinTasks.reduceEntriesToInt
4537 (map, transformer, basis, reducer).invoke();
4538 }
4539
4540 }
4541
4542 // ---------------------------------------------------------------------
4543
4544 /**
4545 * Predefined tasks for performing bulk parallel operations on
4546 * ConcurrentHashMaps. These tasks follow the forms and rules used
4547 * for bulk operations. Each method has the same name, but returns
4548 * a task rather than invoking it. These methods may be useful in
4549 * custom applications such as submitting a task without waiting
4550 * for completion, using a custom pool, or combining with other
4551 * tasks.
4552 */
4553 public static class ForkJoinTasks {
4554 private ForkJoinTasks() {}
4555
4556 /**
4557 * Returns a task that when invoked, performs the given
4558 * action for each (key, value)
4559 *
4560 * @param map the map
4561 * @param action the action
4562 * @return the task
4563 */
4564 public static <K,V> ForkJoinTask<Void> forEach
4565 (ConcurrentHashMap<K,V> map,
4566 BiAction<K,V> action) {
4567 if (action == null) throw new NullPointerException();
4568 return new ForEachMappingTask<K,V>(map, null, -1, action);
4569 }
4570
4571 /**
4572 * Returns a task that when invoked, performs the given
4573 * action for each non-null transformation of each (key, value)
4574 *
4575 * @param map the map
4576 * @param transformer a function returning the transformation
4577 * for an element, or null if there is no transformation (in
4578 * which case the action is not applied)
4579 * @param action the action
4580 * @return the task
4581 */
4582 public static <K,V,U> ForkJoinTask<Void> forEach
4583 (ConcurrentHashMap<K,V> map,
4584 BiFun<? super K, ? super V, ? extends U> transformer,
4585 Action<U> action) {
4586 if (transformer == null || action == null)
4587 throw new NullPointerException();
4588 return new ForEachTransformedMappingTask<K,V,U>
4589 (map, null, -1, transformer, action);
4590 }
4591
4592 /**
4593 * Returns a task that when invoked, returns a non-null result
4594 * from applying the given search function on each (key,
4595 * value), or null if none. Upon success, further element
4596 * processing is suppressed and the results of any other
4597 * parallel invocations of the search function are ignored.
4598 *
4599 * @param map the map
4600 * @param searchFunction a function returning a non-null
4601 * result on success, else null
4602 * @return the task
4603 */
4604 public static <K,V,U> ForkJoinTask<U> search
4605 (ConcurrentHashMap<K,V> map,
4606 BiFun<? super K, ? super V, ? extends U> searchFunction) {
4607 if (searchFunction == null) throw new NullPointerException();
4608 return new SearchMappingsTask<K,V,U>
4609 (map, null, -1, searchFunction,
4610 new AtomicReference<U>());
4611 }
4612
4613 /**
4614 * Returns a task that when invoked, returns the result of
4615 * accumulating the given transformation of all (key, value) pairs
4616 * using the given reducer to combine values, or null if none.
4617 *
4618 * @param map the map
4619 * @param transformer a function returning the transformation
4620 * for an element, or null if there is no transformation (in
4621 * which case it is not combined).
4622 * @param reducer a commutative associative combining function
4623 * @return the task
4624 */
4625 public static <K,V,U> ForkJoinTask<U> reduce
4626 (ConcurrentHashMap<K,V> map,
4627 BiFun<? super K, ? super V, ? extends U> transformer,
4628 BiFun<? super U, ? super U, ? extends U> reducer) {
4629 if (transformer == null || reducer == null)
4630 throw new NullPointerException();
4631 return new MapReduceMappingsTask<K,V,U>
4632 (map, null, -1, null, transformer, reducer);
4633 }
4634
4635 /**
4636 * Returns a task that when invoked, returns the result of
4637 * accumulating the given transformation of all (key, value) pairs
4638 * using the given reducer to combine values, and the given
4639 * basis as an identity value.
4640 *
4641 * @param map the map
4642 * @param transformer a function returning the transformation
4643 * for an element
4644 * @param basis the identity (initial default value) for the reduction
4645 * @param reducer a commutative associative combining function
4646 * @return the task
4647 */
4648 public static <K,V> ForkJoinTask<Double> reduceToDouble
4649 (ConcurrentHashMap<K,V> map,
4650 ObjectByObjectToDouble<? super K, ? super V> transformer,
4651 double basis,
4652 DoubleByDoubleToDouble reducer) {
4653 if (transformer == null || reducer == null)
4654 throw new NullPointerException();
4655 return new MapReduceMappingsToDoubleTask<K,V>
4656 (map, null, -1, null, transformer, basis, reducer);
4657 }
4658
4659 /**
4660 * Returns a task that when invoked, returns the result of
4661 * accumulating the given transformation of all (key, value) pairs
4662 * using the given reducer to combine values, and the given
4663 * basis as an identity value.
4664 *
4665 * @param map the map
4666 * @param transformer a function returning the transformation
4667 * for an element
4668 * @param basis the identity (initial default value) for the reduction
4669 * @param reducer a commutative associative combining function
4670 * @return the task
4671 */
4672 public static <K,V> ForkJoinTask<Long> reduceToLong
4673 (ConcurrentHashMap<K,V> map,
4674 ObjectByObjectToLong<? super K, ? super V> transformer,
4675 long basis,
4676 LongByLongToLong reducer) {
4677 if (transformer == null || reducer == null)
4678 throw new NullPointerException();
4679 return new MapReduceMappingsToLongTask<K,V>
4680 (map, null, -1, null, transformer, basis, reducer);
4681 }
4682
4683 /**
4684 * Returns a task that when invoked, returns the result of
4685 * accumulating the given transformation of all (key, value) pairs
4686 * using the given reducer to combine values, and the given
4687 * basis as an identity value.
4688 *
4689 * @param transformer a function returning the transformation
4690 * for an element
4691 * @param basis the identity (initial default value) for the reduction
4692 * @param reducer a commutative associative combining function
4693 * @return the task
4694 */
4695 public static <K,V> ForkJoinTask<Integer> reduceToInt
4696 (ConcurrentHashMap<K,V> map,
4697 ObjectByObjectToInt<? super K, ? super V> transformer,
4698 int basis,
4699 IntByIntToInt reducer) {
4700 if (transformer == null || reducer == null)
4701 throw new NullPointerException();
4702 return new MapReduceMappingsToIntTask<K,V>
4703 (map, null, -1, null, transformer, basis, reducer);
4704 }
4705
4706 /**
4707 * Returns a task that when invoked, performs the given action
4708 * for each key.
4709 *
4710 * @param map the map
4711 * @param action the action
4712 * @return the task
4713 */
4714 public static <K,V> ForkJoinTask<Void> forEachKey
4715 (ConcurrentHashMap<K,V> map,
4716 Action<K> action) {
4717 if (action == null) throw new NullPointerException();
4718 return new ForEachKeyTask<K,V>(map, null, -1, action);
4719 }
4720
4721 /**
4722 * Returns a task that when invoked, performs the given action
4723 * for each non-null transformation of each key.
4724 *
4725 * @param map the map
4726 * @param transformer a function returning the transformation
4727 * for an element, or null if there is no transformation (in
4728 * which case the action is not applied)
4729 * @param action the action
4730 * @return the task
4731 */
4732 public static <K,V,U> ForkJoinTask<Void> forEachKey
4733 (ConcurrentHashMap<K,V> map,
4734 Fun<? super K, ? extends U> transformer,
4735 Action<U> action) {
4736 if (transformer == null || action == null)
4737 throw new NullPointerException();
4738 return new ForEachTransformedKeyTask<K,V,U>
4739 (map, null, -1, transformer, action);
4740 }
4741
4742 /**
4743 * Returns a task that when invoked, returns a non-null result
4744 * from applying the given search function on each key, or
4745 * null if none. Upon success, further element processing is
4746 * suppressed and the results of any other parallel
4747 * invocations of the search function are ignored.
4748 *
4749 * @param map the map
4750 * @param searchFunction a function returning a non-null
4751 * result on success, else null
4752 * @return the task
4753 */
4754 public static <K,V,U> ForkJoinTask<U> searchKeys
4755 (ConcurrentHashMap<K,V> map,
4756 Fun<? super K, ? extends U> searchFunction) {
4757 if (searchFunction == null) throw new NullPointerException();
4758 return new SearchKeysTask<K,V,U>
4759 (map, null, -1, searchFunction,
4760 new AtomicReference<U>());
4761 }
4762
4763 /**
4764 * Returns a task that when invoked, returns the result of
4765 * accumulating all keys using the given reducer to combine
4766 * values, or null if none.
4767 *
4768 * @param map the map
4769 * @param reducer a commutative associative combining function
4770 * @return the task
4771 */
4772 public static <K,V> ForkJoinTask<K> reduceKeys
4773 (ConcurrentHashMap<K,V> map,
4774 BiFun<? super K, ? super K, ? extends K> reducer) {
4775 if (reducer == null) throw new NullPointerException();
4776 return new ReduceKeysTask<K,V>
4777 (map, null, -1, null, reducer);
4778 }
4779
4780 /**
4781 * Returns a task that when invoked, returns the result of
4782 * accumulating the given transformation of all keys using the given
4783 * reducer to combine values, or null if none.
4784 *
4785 * @param map the map
4786 * @param transformer a function returning the transformation
4787 * for an element, or null if there is no transformation (in
4788 * which case it is not combined).
4789 * @param reducer a commutative associative combining function
4790 * @return the task
4791 */
4792 public static <K,V,U> ForkJoinTask<U> reduceKeys
4793 (ConcurrentHashMap<K,V> map,
4794 Fun<? super K, ? extends U> transformer,
4795 BiFun<? super U, ? super U, ? extends U> reducer) {
4796 if (transformer == null || reducer == null)
4797 throw new NullPointerException();
4798 return new MapReduceKeysTask<K,V,U>
4799 (map, null, -1, null, transformer, reducer);
4800 }
4801
4802 /**
4803 * Returns a task that when invoked, returns the result of
4804 * accumulating the given transformation of all keys using the given
4805 * reducer to combine values, and the given basis as an
4806 * identity value.
4807 *
4808 * @param map the map
4809 * @param transformer a function returning the transformation
4810 * for an element
4811 * @param basis the identity (initial default value) for the reduction
4812 * @param reducer a commutative associative combining function
4813 * @return the task
4814 */
4815 public static <K,V> ForkJoinTask<Double> reduceKeysToDouble
4816 (ConcurrentHashMap<K,V> map,
4817 ObjectToDouble<? super K> transformer,
4818 double basis,
4819 DoubleByDoubleToDouble reducer) {
4820 if (transformer == null || reducer == null)
4821 throw new NullPointerException();
4822 return new MapReduceKeysToDoubleTask<K,V>
4823 (map, null, -1, null, transformer, basis, reducer);
4824 }
4825
4826 /**
4827 * Returns a task that when invoked, returns the result of
4828 * accumulating the given transformation of all keys using the given
4829 * reducer to combine values, and the given basis as an
4830 * identity value.
4831 *
4832 * @param map the map
4833 * @param transformer a function returning the transformation
4834 * for an element
4835 * @param basis the identity (initial default value) for the reduction
4836 * @param reducer a commutative associative combining function
4837 * @return the task
4838 */
4839 public static <K,V> ForkJoinTask<Long> reduceKeysToLong
4840 (ConcurrentHashMap<K,V> map,
4841 ObjectToLong<? super K> transformer,
4842 long basis,
4843 LongByLongToLong reducer) {
4844 if (transformer == null || reducer == null)
4845 throw new NullPointerException();
4846 return new MapReduceKeysToLongTask<K,V>
4847 (map, null, -1, null, transformer, basis, reducer);
4848 }
4849
4850 /**
4851 * Returns a task that when invoked, returns the result of
4852 * accumulating the given transformation of all keys using the given
4853 * reducer to combine values, and the given basis as an
4854 * identity value.
4855 *
4856 * @param map the map
4857 * @param transformer a function returning the transformation
4858 * for an element
4859 * @param basis the identity (initial default value) for the reduction
4860 * @param reducer a commutative associative combining function
4861 * @return the task
4862 */
4863 public static <K,V> ForkJoinTask<Integer> reduceKeysToInt
4864 (ConcurrentHashMap<K,V> map,
4865 ObjectToInt<? super K> transformer,
4866 int basis,
4867 IntByIntToInt reducer) {
4868 if (transformer == null || reducer == null)
4869 throw new NullPointerException();
4870 return new MapReduceKeysToIntTask<K,V>
4871 (map, null, -1, null, transformer, basis, reducer);
4872 }
4873
4874 /**
4875 * Returns a task that when invoked, performs the given action
4876 * for each value.
4877 *
4878 * @param map the map
4879 * @param action the action
4880 */
4881 public static <K,V> ForkJoinTask<Void> forEachValue
4882 (ConcurrentHashMap<K,V> map,
4883 Action<V> action) {
4884 if (action == null) throw new NullPointerException();
4885 return new ForEachValueTask<K,V>(map, null, -1, action);
4886 }
4887
4888 /**
4889 * Returns a task that when invoked, performs the given action
4890 * for each non-null transformation of each value.
4891 *
4892 * @param map the map
4893 * @param transformer a function returning the transformation
4894 * for an element, or null if there is no transformation (in
4895 * which case the action is not applied)
4896 * @param action the action
4897 */
4898 public static <K,V,U> ForkJoinTask<Void> forEachValue
4899 (ConcurrentHashMap<K,V> map,
4900 Fun<? super V, ? extends U> transformer,
4901 Action<U> action) {
4902 if (transformer == null || action == null)
4903 throw new NullPointerException();
4904 return new ForEachTransformedValueTask<K,V,U>
4905 (map, null, -1, transformer, action);
4906 }
4907
4908 /**
4909 * Returns a task that when invoked, returns a non-null result
4910 * from applying the given search function on each value, or
4911 * null if none. Upon success, further element processing is
4912 * suppressed and the results of any other parallel
4913 * invocations of the search function are ignored.
4914 *
4915 * @param map the map
4916 * @param searchFunction a function returning a non-null
4917 * result on success, else null
4918 * @return the task
4919 */
4920 public static <K,V,U> ForkJoinTask<U> searchValues
4921 (ConcurrentHashMap<K,V> map,
4922 Fun<? super V, ? extends U> searchFunction) {
4923 if (searchFunction == null) throw new NullPointerException();
4924 return new SearchValuesTask<K,V,U>
4925 (map, null, -1, searchFunction,
4926 new AtomicReference<U>());
4927 }
4928
4929 /**
4930 * Returns a task that when invoked, returns the result of
4931 * accumulating all values using the given reducer to combine
4932 * values, or null if none.
4933 *
4934 * @param map the map
4935 * @param reducer a commutative associative combining function
4936 * @return the task
4937 */
4938 public static <K,V> ForkJoinTask<V> reduceValues
4939 (ConcurrentHashMap<K,V> map,
4940 BiFun<? super V, ? super V, ? extends V> reducer) {
4941 if (reducer == null) throw new NullPointerException();
4942 return new ReduceValuesTask<K,V>
4943 (map, null, -1, null, reducer);
4944 }
4945
4946 /**
4947 * Returns a task that when invoked, returns the result of
4948 * accumulating the given transformation of all values using the
4949 * given reducer to combine values, or null if none.
4950 *
4951 * @param map the map
4952 * @param transformer a function returning the transformation
4953 * for an element, or null if there is no transformation (in
4954 * which case it is not combined).
4955 * @param reducer a commutative associative combining function
4956 * @return the task
4957 */
4958 public static <K,V,U> ForkJoinTask<U> reduceValues
4959 (ConcurrentHashMap<K,V> map,
4960 Fun<? super V, ? extends U> transformer,
4961 BiFun<? super U, ? super U, ? extends U> reducer) {
4962 if (transformer == null || reducer == null)
4963 throw new NullPointerException();
4964 return new MapReduceValuesTask<K,V,U>
4965 (map, null, -1, null, transformer, reducer);
4966 }
4967
4968 /**
4969 * Returns a task that when invoked, returns the result of
4970 * accumulating the given transformation of all values using the
4971 * given reducer to combine values, and the given basis as an
4972 * identity value.
4973 *
4974 * @param map the map
4975 * @param transformer a function returning the transformation
4976 * for an element
4977 * @param basis the identity (initial default value) for the reduction
4978 * @param reducer a commutative associative combining function
4979 * @return the task
4980 */
4981 public static <K,V> ForkJoinTask<Double> reduceValuesToDouble
4982 (ConcurrentHashMap<K,V> map,
4983 ObjectToDouble<? super V> transformer,
4984 double basis,
4985 DoubleByDoubleToDouble reducer) {
4986 if (transformer == null || reducer == null)
4987 throw new NullPointerException();
4988 return new MapReduceValuesToDoubleTask<K,V>
4989 (map, null, -1, null, transformer, basis, reducer);
4990 }
4991
4992 /**
4993 * Returns a task that when invoked, returns the result of
4994 * accumulating the given transformation of all values using the
4995 * given reducer to combine values, and the given basis as an
4996 * identity value.
4997 *
4998 * @param map the map
4999 * @param transformer a function returning the transformation
5000 * for an element
5001 * @param basis the identity (initial default value) for the reduction
5002 * @param reducer a commutative associative combining function
5003 * @return the task
5004 */
5005 public static <K,V> ForkJoinTask<Long> reduceValuesToLong
5006 (ConcurrentHashMap<K,V> map,
5007 ObjectToLong<? super V> transformer,
5008 long basis,
5009 LongByLongToLong reducer) {
5010 if (transformer == null || reducer == null)
5011 throw new NullPointerException();
5012 return new MapReduceValuesToLongTask<K,V>
5013 (map, null, -1, null, transformer, basis, reducer);
5014 }
5015
5016 /**
5017 * Returns a task that when invoked, returns the result of
5018 * accumulating the given transformation of all values using the
5019 * given reducer to combine values, and the given basis as an
5020 * identity value.
5021 *
5022 * @param map the map
5023 * @param transformer a function returning the transformation
5024 * for an element
5025 * @param basis the identity (initial default value) for the reduction
5026 * @param reducer a commutative associative combining function
5027 * @return the task
5028 */
5029 public static <K,V> ForkJoinTask<Integer> reduceValuesToInt
5030 (ConcurrentHashMap<K,V> map,
5031 ObjectToInt<? super V> transformer,
5032 int basis,
5033 IntByIntToInt reducer) {
5034 if (transformer == null || reducer == null)
5035 throw new NullPointerException();
5036 return new MapReduceValuesToIntTask<K,V>
5037 (map, null, -1, null, transformer, basis, reducer);
5038 }
5039
5040 /**
5041 * Returns a task that when invoked, perform the given action
5042 * for each entry.
5043 *
5044 * @param map the map
5045 * @param action the action
5046 */
5047 public static <K,V> ForkJoinTask<Void> forEachEntry
5048 (ConcurrentHashMap<K,V> map,
5049 Action<Map.Entry<K,V>> action) {
5050 if (action == null) throw new NullPointerException();
5051 return new ForEachEntryTask<K,V>(map, null, -1, action);
5052 }
5053
5054 /**
5055 * Returns a task that when invoked, perform the given action
5056 * for each non-null transformation of each entry.
5057 *
5058 * @param map the map
5059 * @param transformer a function returning the transformation
5060 * for an element, or null if there is no transformation (in
5061 * which case the action is not applied)
5062 * @param action the action
5063 */
5064 public static <K,V,U> ForkJoinTask<Void> forEachEntry
5065 (ConcurrentHashMap<K,V> map,
5066 Fun<Map.Entry<K,V>, ? extends U> transformer,
5067 Action<U> action) {
5068 if (transformer == null || action == null)
5069 throw new NullPointerException();
5070 return new ForEachTransformedEntryTask<K,V,U>
5071 (map, null, -1, transformer, action);
5072 }
5073
5074 /**
5075 * Returns a task that when invoked, returns a non-null result
5076 * from applying the given search function on each entry, or
5077 * null if none. Upon success, further element processing is
5078 * suppressed and the results of any other parallel
5079 * invocations of the search function are ignored.
5080 *
5081 * @param map the map
5082 * @param searchFunction a function returning a non-null
5083 * result on success, else null
5084 * @return the task
5085 */
5086 public static <K,V,U> ForkJoinTask<U> searchEntries
5087 (ConcurrentHashMap<K,V> map,
5088 Fun<Map.Entry<K,V>, ? extends U> searchFunction) {
5089 if (searchFunction == null) throw new NullPointerException();
5090 return new SearchEntriesTask<K,V,U>
5091 (map, null, -1, searchFunction,
5092 new AtomicReference<U>());
5093 }
5094
5095 /**
5096 * Returns a task that when invoked, returns the result of
5097 * accumulating all entries using the given reducer to combine
5098 * values, or null if none.
5099 *
5100 * @param map the map
5101 * @param reducer a commutative associative combining function
5102 * @return the task
5103 */
5104 public static <K,V> ForkJoinTask<Map.Entry<K,V>> reduceEntries
5105 (ConcurrentHashMap<K,V> map,
5106 BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5107 if (reducer == null) throw new NullPointerException();
5108 return new ReduceEntriesTask<K,V>
5109 (map, null, -1, null, reducer);
5110 }
5111
5112 /**
5113 * Returns a task that when invoked, returns the result of
5114 * accumulating the given transformation of all entries using the
5115 * given reducer to combine values, or null if none.
5116 *
5117 * @param map the map
5118 * @param transformer a function returning the transformation
5119 * for an element, or null if there is no transformation (in
5120 * which case it is not combined).
5121 * @param reducer a commutative associative combining function
5122 * @return the task
5123 */
5124 public static <K,V,U> ForkJoinTask<U> reduceEntries
5125 (ConcurrentHashMap<K,V> map,
5126 Fun<Map.Entry<K,V>, ? extends U> transformer,
5127 BiFun<? super U, ? super U, ? extends U> reducer) {
5128 if (transformer == null || reducer == null)
5129 throw new NullPointerException();
5130 return new MapReduceEntriesTask<K,V,U>
5131 (map, null, -1, null, transformer, reducer);
5132 }
5133
5134 /**
5135 * Returns a task that when invoked, returns the result of
5136 * accumulating the given transformation of all entries using the
5137 * given reducer to combine values, and the given basis as an
5138 * identity value.
5139 *
5140 * @param map the map
5141 * @param transformer a function returning the transformation
5142 * for an element
5143 * @param basis the identity (initial default value) for the reduction
5144 * @param reducer a commutative associative combining function
5145 * @return the task
5146 */
5147 public static <K,V> ForkJoinTask<Double> reduceEntriesToDouble
5148 (ConcurrentHashMap<K,V> map,
5149 ObjectToDouble<Map.Entry<K,V>> transformer,
5150 double basis,
5151 DoubleByDoubleToDouble reducer) {
5152 if (transformer == null || reducer == null)
5153 throw new NullPointerException();
5154 return new MapReduceEntriesToDoubleTask<K,V>
5155 (map, null, -1, null, transformer, basis, reducer);
5156 }
5157
5158 /**
5159 * Returns a task that when invoked, returns the result of
5160 * accumulating the given transformation of all entries using the
5161 * given reducer to combine values, and the given basis as an
5162 * identity value.
5163 *
5164 * @param map the map
5165 * @param transformer a function returning the transformation
5166 * for an element
5167 * @param basis the identity (initial default value) for the reduction
5168 * @param reducer a commutative associative combining function
5169 * @return the task
5170 */
5171 public static <K,V> ForkJoinTask<Long> reduceEntriesToLong
5172 (ConcurrentHashMap<K,V> map,
5173 ObjectToLong<Map.Entry<K,V>> transformer,
5174 long basis,
5175 LongByLongToLong reducer) {
5176 if (transformer == null || reducer == null)
5177 throw new NullPointerException();
5178 return new MapReduceEntriesToLongTask<K,V>
5179 (map, null, -1, null, transformer, basis, reducer);
5180 }
5181
5182 /**
5183 * Returns a task that when invoked, returns the result of
5184 * accumulating the given transformation of all entries using the
5185 * given reducer to combine values, and the given basis as an
5186 * identity value.
5187 *
5188 * @param map the map
5189 * @param transformer a function returning the transformation
5190 * for an element
5191 * @param basis the identity (initial default value) for the reduction
5192 * @param reducer a commutative associative combining function
5193 * @return the task
5194 */
5195 public static <K,V> ForkJoinTask<Integer> reduceEntriesToInt
5196 (ConcurrentHashMap<K,V> map,
5197 ObjectToInt<Map.Entry<K,V>> transformer,
5198 int basis,
5199 IntByIntToInt reducer) {
5200 if (transformer == null || reducer == null)
5201 throw new NullPointerException();
5202 return new MapReduceEntriesToIntTask<K,V>
5203 (map, null, -1, null, transformer, basis, reducer);
5204 }
5205 }
5206
5207 // -------------------------------------------------------
5208
5209 /*
5210 * Task classes. Coded in a regular but ugly format/style to
5211 * simplify checks that each variant differs in the right way from
5212 * others. The null screenings exist because compilers cannot tell
5213 * that we've already null-checked task arguments, so we force
5214 * simplest hoisted bypass to help avoid convoluted traps.
5215 */
5216
5217 @SuppressWarnings("serial") static final class ForEachKeyTask<K,V>
5218 extends Traverser<K,V,Void> {
5219 final Action<K> action;
5220 ForEachKeyTask
5221 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5222 Action<K> action) {
5223 super(m, p, b);
5224 this.action = action;
5225 }
5226 @SuppressWarnings("unchecked") public final void compute() {
5227 final Action<K> action;
5228 if ((action = this.action) != null) {
5229 for (int b; (b = preSplit()) > 0;)
5230 new ForEachKeyTask<K,V>(map, this, b, action).fork();
5231 while (advance() != null)
5232 action.apply((K)nextKey);
5233 propagateCompletion();
5234 }
5235 }
5236 }
5237
5238 @SuppressWarnings("serial") static final class ForEachValueTask<K,V>
5239 extends Traverser<K,V,Void> {
5240 final Action<V> action;
5241 ForEachValueTask
5242 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5243 Action<V> action) {
5244 super(m, p, b);
5245 this.action = action;
5246 }
5247 @SuppressWarnings("unchecked") public final void compute() {
5248 final Action<V> action;
5249 if ((action = this.action) != null) {
5250 for (int b; (b = preSplit()) > 0;)
5251 new ForEachValueTask<K,V>(map, this, b, action).fork();
5252 Object v;
5253 while ((v = advance()) != null)
5254 action.apply((V)v);
5255 propagateCompletion();
5256 }
5257 }
5258 }
5259
5260 @SuppressWarnings("serial") static final class ForEachEntryTask<K,V>
5261 extends Traverser<K,V,Void> {
5262 final Action<Entry<K,V>> action;
5263 ForEachEntryTask
5264 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5265 Action<Entry<K,V>> action) {
5266 super(m, p, b);
5267 this.action = action;
5268 }
5269 @SuppressWarnings("unchecked") public final void compute() {
5270 final Action<Entry<K,V>> action;
5271 if ((action = this.action) != null) {
5272 for (int b; (b = preSplit()) > 0;)
5273 new ForEachEntryTask<K,V>(map, this, b, action).fork();
5274 Object v;
5275 while ((v = advance()) != null)
5276 action.apply(entryFor((K)nextKey, (V)v));
5277 propagateCompletion();
5278 }
5279 }
5280 }
5281
5282 @SuppressWarnings("serial") static final class ForEachMappingTask<K,V>
5283 extends Traverser<K,V,Void> {
5284 final BiAction<K,V> action;
5285 ForEachMappingTask
5286 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5287 BiAction<K,V> action) {
5288 super(m, p, b);
5289 this.action = action;
5290 }
5291 @SuppressWarnings("unchecked") public final void compute() {
5292 final BiAction<K,V> action;
5293 if ((action = this.action) != null) {
5294 for (int b; (b = preSplit()) > 0;)
5295 new ForEachMappingTask<K,V>(map, this, b, action).fork();
5296 Object v;
5297 while ((v = advance()) != null)
5298 action.apply((K)nextKey, (V)v);
5299 propagateCompletion();
5300 }
5301 }
5302 }
5303
5304 @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U>
5305 extends Traverser<K,V,Void> {
5306 final Fun<? super K, ? extends U> transformer;
5307 final Action<U> action;
5308 ForEachTransformedKeyTask
5309 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5310 Fun<? super K, ? extends U> transformer, Action<U> action) {
5311 super(m, p, b);
5312 this.transformer = transformer; this.action = action;
5313 }
5314 @SuppressWarnings("unchecked") public final void compute() {
5315 final Fun<? super K, ? extends U> transformer;
5316 final Action<U> action;
5317 if ((transformer = this.transformer) != null &&
5318 (action = this.action) != null) {
5319 for (int b; (b = preSplit()) > 0;)
5320 new ForEachTransformedKeyTask<K,V,U>
5321 (map, this, b, transformer, action).fork();
5322 U u;
5323 while (advance() != null) {
5324 if ((u = transformer.apply((K)nextKey)) != null)
5325 action.apply(u);
5326 }
5327 propagateCompletion();
5328 }
5329 }
5330 }
5331
5332 @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U>
5333 extends Traverser<K,V,Void> {
5334 final Fun<? super V, ? extends U> transformer;
5335 final Action<U> action;
5336 ForEachTransformedValueTask
5337 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5338 Fun<? super V, ? extends U> transformer, Action<U> action) {
5339 super(m, p, b);
5340 this.transformer = transformer; this.action = action;
5341 }
5342 @SuppressWarnings("unchecked") public final void compute() {
5343 final Fun<? super V, ? extends U> transformer;
5344 final Action<U> action;
5345 if ((transformer = this.transformer) != null &&
5346 (action = this.action) != null) {
5347 for (int b; (b = preSplit()) > 0;)
5348 new ForEachTransformedValueTask<K,V,U>
5349 (map, this, b, transformer, action).fork();
5350 Object v; U u;
5351 while ((v = advance()) != null) {
5352 if ((u = transformer.apply((V)v)) != null)
5353 action.apply(u);
5354 }
5355 propagateCompletion();
5356 }
5357 }
5358 }
5359
5360 @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U>
5361 extends Traverser<K,V,Void> {
5362 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5363 final Action<U> action;
5364 ForEachTransformedEntryTask
5365 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5366 Fun<Map.Entry<K,V>, ? extends U> transformer, Action<U> action) {
5367 super(m, p, b);
5368 this.transformer = transformer; this.action = action;
5369 }
5370 @SuppressWarnings("unchecked") public final void compute() {
5371 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5372 final Action<U> action;
5373 if ((transformer = this.transformer) != null &&
5374 (action = this.action) != null) {
5375 for (int b; (b = preSplit()) > 0;)
5376 new ForEachTransformedEntryTask<K,V,U>
5377 (map, this, b, transformer, action).fork();
5378 Object v; U u;
5379 while ((v = advance()) != null) {
5380 if ((u = transformer.apply(entryFor((K)nextKey,
5381 (V)v))) != null)
5382 action.apply(u);
5383 }
5384 propagateCompletion();
5385 }
5386 }
5387 }
5388
5389 @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U>
5390 extends Traverser<K,V,Void> {
5391 final BiFun<? super K, ? super V, ? extends U> transformer;
5392 final Action<U> action;
5393 ForEachTransformedMappingTask
5394 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5395 BiFun<? super K, ? super V, ? extends U> transformer,
5396 Action<U> action) {
5397 super(m, p, b);
5398 this.transformer = transformer; this.action = action;
5399 }
5400 @SuppressWarnings("unchecked") public final void compute() {
5401 final BiFun<? super K, ? super V, ? extends U> transformer;
5402 final Action<U> action;
5403 if ((transformer = this.transformer) != null &&
5404 (action = this.action) != null) {
5405 for (int b; (b = preSplit()) > 0;)
5406 new ForEachTransformedMappingTask<K,V,U>
5407 (map, this, b, transformer, action).fork();
5408 Object v; U u;
5409 while ((v = advance()) != null) {
5410 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5411 action.apply(u);
5412 }
5413 propagateCompletion();
5414 }
5415 }
5416 }
5417
5418 @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U>
5419 extends Traverser<K,V,U> {
5420 final Fun<? super K, ? extends U> searchFunction;
5421 final AtomicReference<U> result;
5422 SearchKeysTask
5423 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5424 Fun<? super K, ? extends U> searchFunction,
5425 AtomicReference<U> result) {
5426 super(m, p, b);
5427 this.searchFunction = searchFunction; this.result = result;
5428 }
5429 public final U getRawResult() { return result.get(); }
5430 @SuppressWarnings("unchecked") public final void compute() {
5431 final Fun<? super K, ? extends U> searchFunction;
5432 final AtomicReference<U> result;
5433 if ((searchFunction = this.searchFunction) != null &&
5434 (result = this.result) != null) {
5435 for (int b;;) {
5436 if (result.get() != null)
5437 return;
5438 if ((b = preSplit()) <= 0)
5439 break;
5440 new SearchKeysTask<K,V,U>
5441 (map, this, b, searchFunction, result).fork();
5442 }
5443 while (result.get() == null) {
5444 U u;
5445 if (advance() == null) {
5446 propagateCompletion();
5447 break;
5448 }
5449 if ((u = searchFunction.apply((K)nextKey)) != null) {
5450 if (result.compareAndSet(null, u))
5451 quietlyCompleteRoot();
5452 break;
5453 }
5454 }
5455 }
5456 }
5457 }
5458
5459 @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U>
5460 extends Traverser<K,V,U> {
5461 final Fun<? super V, ? extends U> searchFunction;
5462 final AtomicReference<U> result;
5463 SearchValuesTask
5464 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5465 Fun<? super V, ? extends U> searchFunction,
5466 AtomicReference<U> result) {
5467 super(m, p, b);
5468 this.searchFunction = searchFunction; this.result = result;
5469 }
5470 public final U getRawResult() { return result.get(); }
5471 @SuppressWarnings("unchecked") public final void compute() {
5472 final Fun<? super V, ? extends U> searchFunction;
5473 final AtomicReference<U> result;
5474 if ((searchFunction = this.searchFunction) != null &&
5475 (result = this.result) != null) {
5476 for (int b;;) {
5477 if (result.get() != null)
5478 return;
5479 if ((b = preSplit()) <= 0)
5480 break;
5481 new SearchValuesTask<K,V,U>
5482 (map, this, b, searchFunction, result).fork();
5483 }
5484 while (result.get() == null) {
5485 Object v; U u;
5486 if ((v = advance()) == null) {
5487 propagateCompletion();
5488 break;
5489 }
5490 if ((u = searchFunction.apply((V)v)) != null) {
5491 if (result.compareAndSet(null, u))
5492 quietlyCompleteRoot();
5493 break;
5494 }
5495 }
5496 }
5497 }
5498 }
5499
5500 @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U>
5501 extends Traverser<K,V,U> {
5502 final Fun<Entry<K,V>, ? extends U> searchFunction;
5503 final AtomicReference<U> result;
5504 SearchEntriesTask
5505 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5506 Fun<Entry<K,V>, ? extends U> searchFunction,
5507 AtomicReference<U> result) {
5508 super(m, p, b);
5509 this.searchFunction = searchFunction; this.result = result;
5510 }
5511 public final U getRawResult() { return result.get(); }
5512 @SuppressWarnings("unchecked") public final void compute() {
5513 final Fun<Entry<K,V>, ? extends U> searchFunction;
5514 final AtomicReference<U> result;
5515 if ((searchFunction = this.searchFunction) != null &&
5516 (result = this.result) != null) {
5517 for (int b;;) {
5518 if (result.get() != null)
5519 return;
5520 if ((b = preSplit()) <= 0)
5521 break;
5522 new SearchEntriesTask<K,V,U>
5523 (map, this, b, searchFunction, result).fork();
5524 }
5525 while (result.get() == null) {
5526 Object v; U u;
5527 if ((v = advance()) == null) {
5528 propagateCompletion();
5529 break;
5530 }
5531 if ((u = searchFunction.apply(entryFor((K)nextKey,
5532 (V)v))) != null) {
5533 if (result.compareAndSet(null, u))
5534 quietlyCompleteRoot();
5535 return;
5536 }
5537 }
5538 }
5539 }
5540 }
5541
5542 @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U>
5543 extends Traverser<K,V,U> {
5544 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5545 final AtomicReference<U> result;
5546 SearchMappingsTask
5547 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5548 BiFun<? super K, ? super V, ? extends U> searchFunction,
5549 AtomicReference<U> result) {
5550 super(m, p, b);
5551 this.searchFunction = searchFunction; this.result = result;
5552 }
5553 public final U getRawResult() { return result.get(); }
5554 @SuppressWarnings("unchecked") public final void compute() {
5555 final BiFun<? super K, ? super V, ? extends U> searchFunction;
5556 final AtomicReference<U> result;
5557 if ((searchFunction = this.searchFunction) != null &&
5558 (result = this.result) != null) {
5559 for (int b;;) {
5560 if (result.get() != null)
5561 return;
5562 if ((b = preSplit()) <= 0)
5563 break;
5564 new SearchMappingsTask<K,V,U>
5565 (map, this, b, searchFunction, result).fork();
5566 }
5567 while (result.get() == null) {
5568 Object v; U u;
5569 if ((v = advance()) == null) {
5570 propagateCompletion();
5571 break;
5572 }
5573 if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) {
5574 if (result.compareAndSet(null, u))
5575 quietlyCompleteRoot();
5576 break;
5577 }
5578 }
5579 }
5580 }
5581 }
5582
5583 @SuppressWarnings("serial") static final class ReduceKeysTask<K,V>
5584 extends Traverser<K,V,K> {
5585 final BiFun<? super K, ? super K, ? extends K> reducer;
5586 K result;
5587 ReduceKeysTask<K,V> rights, nextRight;
5588 ReduceKeysTask
5589 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5590 ReduceKeysTask<K,V> nextRight,
5591 BiFun<? super K, ? super K, ? extends K> reducer) {
5592 super(m, p, b); this.nextRight = nextRight;
5593 this.reducer = reducer;
5594 }
5595 public final K getRawResult() { return result; }
5596 @SuppressWarnings("unchecked") public final void compute() {
5597 final BiFun<? super K, ? super K, ? extends K> reducer;
5598 if ((reducer = this.reducer) != null) {
5599 for (int b; (b = preSplit()) > 0;)
5600 (rights = new ReduceKeysTask<K,V>
5601 (map, this, b, rights, reducer)).fork();
5602 K r = null;
5603 while (advance() != null) {
5604 K u = (K)nextKey;
5605 r = (r == null) ? u : reducer.apply(r, u);
5606 }
5607 result = r;
5608 CountedCompleter<?> c;
5609 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5610 ReduceKeysTask<K,V>
5611 t = (ReduceKeysTask<K,V>)c,
5612 s = t.rights;
5613 while (s != null) {
5614 K tr, sr;
5615 if ((sr = s.result) != null)
5616 t.result = (((tr = t.result) == null) ? sr :
5617 reducer.apply(tr, sr));
5618 s = t.rights = s.nextRight;
5619 }
5620 }
5621 }
5622 }
5623 }
5624
5625 @SuppressWarnings("serial") static final class ReduceValuesTask<K,V>
5626 extends Traverser<K,V,V> {
5627 final BiFun<? super V, ? super V, ? extends V> reducer;
5628 V result;
5629 ReduceValuesTask<K,V> rights, nextRight;
5630 ReduceValuesTask
5631 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5632 ReduceValuesTask<K,V> nextRight,
5633 BiFun<? super V, ? super V, ? extends V> reducer) {
5634 super(m, p, b); this.nextRight = nextRight;
5635 this.reducer = reducer;
5636 }
5637 public final V getRawResult() { return result; }
5638 @SuppressWarnings("unchecked") public final void compute() {
5639 final BiFun<? super V, ? super V, ? extends V> reducer;
5640 if ((reducer = this.reducer) != null) {
5641 for (int b; (b = preSplit()) > 0;)
5642 (rights = new ReduceValuesTask<K,V>
5643 (map, this, b, rights, reducer)).fork();
5644 V r = null;
5645 Object v;
5646 while ((v = advance()) != null) {
5647 V u = (V)v;
5648 r = (r == null) ? u : reducer.apply(r, u);
5649 }
5650 result = r;
5651 CountedCompleter<?> c;
5652 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5653 ReduceValuesTask<K,V>
5654 t = (ReduceValuesTask<K,V>)c,
5655 s = t.rights;
5656 while (s != null) {
5657 V tr, sr;
5658 if ((sr = s.result) != null)
5659 t.result = (((tr = t.result) == null) ? sr :
5660 reducer.apply(tr, sr));
5661 s = t.rights = s.nextRight;
5662 }
5663 }
5664 }
5665 }
5666 }
5667
5668 @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V>
5669 extends Traverser<K,V,Map.Entry<K,V>> {
5670 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5671 Map.Entry<K,V> result;
5672 ReduceEntriesTask<K,V> rights, nextRight;
5673 ReduceEntriesTask
5674 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5675 ReduceEntriesTask<K,V> nextRight,
5676 BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5677 super(m, p, b); this.nextRight = nextRight;
5678 this.reducer = reducer;
5679 }
5680 public final Map.Entry<K,V> getRawResult() { return result; }
5681 @SuppressWarnings("unchecked") public final void compute() {
5682 final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5683 if ((reducer = this.reducer) != null) {
5684 for (int b; (b = preSplit()) > 0;)
5685 (rights = new ReduceEntriesTask<K,V>
5686 (map, this, b, rights, reducer)).fork();
5687 Map.Entry<K,V> r = null;
5688 Object v;
5689 while ((v = advance()) != null) {
5690 Map.Entry<K,V> u = entryFor((K)nextKey, (V)v);
5691 r = (r == null) ? u : reducer.apply(r, u);
5692 }
5693 result = r;
5694 CountedCompleter<?> c;
5695 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5696 ReduceEntriesTask<K,V>
5697 t = (ReduceEntriesTask<K,V>)c,
5698 s = t.rights;
5699 while (s != null) {
5700 Map.Entry<K,V> tr, sr;
5701 if ((sr = s.result) != null)
5702 t.result = (((tr = t.result) == null) ? sr :
5703 reducer.apply(tr, sr));
5704 s = t.rights = s.nextRight;
5705 }
5706 }
5707 }
5708 }
5709 }
5710
5711 @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U>
5712 extends Traverser<K,V,U> {
5713 final Fun<? super K, ? extends U> transformer;
5714 final BiFun<? super U, ? super U, ? extends U> reducer;
5715 U result;
5716 MapReduceKeysTask<K,V,U> rights, nextRight;
5717 MapReduceKeysTask
5718 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5719 MapReduceKeysTask<K,V,U> nextRight,
5720 Fun<? super K, ? extends U> transformer,
5721 BiFun<? super U, ? super U, ? extends U> reducer) {
5722 super(m, p, b); this.nextRight = nextRight;
5723 this.transformer = transformer;
5724 this.reducer = reducer;
5725 }
5726 public final U getRawResult() { return result; }
5727 @SuppressWarnings("unchecked") public final void compute() {
5728 final Fun<? super K, ? extends U> transformer;
5729 final BiFun<? super U, ? super U, ? extends U> reducer;
5730 if ((transformer = this.transformer) != null &&
5731 (reducer = this.reducer) != null) {
5732 for (int b; (b = preSplit()) > 0;)
5733 (rights = new MapReduceKeysTask<K,V,U>
5734 (map, this, b, rights, transformer, reducer)).fork();
5735 U r = null, u;
5736 while (advance() != null) {
5737 if ((u = transformer.apply((K)nextKey)) != null)
5738 r = (r == null) ? u : reducer.apply(r, u);
5739 }
5740 result = r;
5741 CountedCompleter<?> c;
5742 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5743 MapReduceKeysTask<K,V,U>
5744 t = (MapReduceKeysTask<K,V,U>)c,
5745 s = t.rights;
5746 while (s != null) {
5747 U tr, sr;
5748 if ((sr = s.result) != null)
5749 t.result = (((tr = t.result) == null) ? sr :
5750 reducer.apply(tr, sr));
5751 s = t.rights = s.nextRight;
5752 }
5753 }
5754 }
5755 }
5756 }
5757
5758 @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U>
5759 extends Traverser<K,V,U> {
5760 final Fun<? super V, ? extends U> transformer;
5761 final BiFun<? super U, ? super U, ? extends U> reducer;
5762 U result;
5763 MapReduceValuesTask<K,V,U> rights, nextRight;
5764 MapReduceValuesTask
5765 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5766 MapReduceValuesTask<K,V,U> nextRight,
5767 Fun<? super V, ? extends U> transformer,
5768 BiFun<? super U, ? super U, ? extends U> reducer) {
5769 super(m, p, b); this.nextRight = nextRight;
5770 this.transformer = transformer;
5771 this.reducer = reducer;
5772 }
5773 public final U getRawResult() { return result; }
5774 @SuppressWarnings("unchecked") public final void compute() {
5775 final Fun<? super V, ? extends U> transformer;
5776 final BiFun<? super U, ? super U, ? extends U> reducer;
5777 if ((transformer = this.transformer) != null &&
5778 (reducer = this.reducer) != null) {
5779 for (int b; (b = preSplit()) > 0;)
5780 (rights = new MapReduceValuesTask<K,V,U>
5781 (map, this, b, rights, transformer, reducer)).fork();
5782 U r = null, u;
5783 Object v;
5784 while ((v = advance()) != null) {
5785 if ((u = transformer.apply((V)v)) != null)
5786 r = (r == null) ? u : reducer.apply(r, u);
5787 }
5788 result = r;
5789 CountedCompleter<?> c;
5790 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5791 MapReduceValuesTask<K,V,U>
5792 t = (MapReduceValuesTask<K,V,U>)c,
5793 s = t.rights;
5794 while (s != null) {
5795 U tr, sr;
5796 if ((sr = s.result) != null)
5797 t.result = (((tr = t.result) == null) ? sr :
5798 reducer.apply(tr, sr));
5799 s = t.rights = s.nextRight;
5800 }
5801 }
5802 }
5803 }
5804 }
5805
5806 @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U>
5807 extends Traverser<K,V,U> {
5808 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5809 final BiFun<? super U, ? super U, ? extends U> reducer;
5810 U result;
5811 MapReduceEntriesTask<K,V,U> rights, nextRight;
5812 MapReduceEntriesTask
5813 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5814 MapReduceEntriesTask<K,V,U> nextRight,
5815 Fun<Map.Entry<K,V>, ? extends U> transformer,
5816 BiFun<? super U, ? super U, ? extends U> reducer) {
5817 super(m, p, b); this.nextRight = nextRight;
5818 this.transformer = transformer;
5819 this.reducer = reducer;
5820 }
5821 public final U getRawResult() { return result; }
5822 @SuppressWarnings("unchecked") public final void compute() {
5823 final Fun<Map.Entry<K,V>, ? extends U> transformer;
5824 final BiFun<? super U, ? super U, ? extends U> reducer;
5825 if ((transformer = this.transformer) != null &&
5826 (reducer = this.reducer) != null) {
5827 for (int b; (b = preSplit()) > 0;)
5828 (rights = new MapReduceEntriesTask<K,V,U>
5829 (map, this, b, rights, transformer, reducer)).fork();
5830 U r = null, u;
5831 Object v;
5832 while ((v = advance()) != null) {
5833 if ((u = transformer.apply(entryFor((K)nextKey,
5834 (V)v))) != null)
5835 r = (r == null) ? u : reducer.apply(r, u);
5836 }
5837 result = r;
5838 CountedCompleter<?> c;
5839 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5840 MapReduceEntriesTask<K,V,U>
5841 t = (MapReduceEntriesTask<K,V,U>)c,
5842 s = t.rights;
5843 while (s != null) {
5844 U tr, sr;
5845 if ((sr = s.result) != null)
5846 t.result = (((tr = t.result) == null) ? sr :
5847 reducer.apply(tr, sr));
5848 s = t.rights = s.nextRight;
5849 }
5850 }
5851 }
5852 }
5853 }
5854
5855 @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U>
5856 extends Traverser<K,V,U> {
5857 final BiFun<? super K, ? super V, ? extends U> transformer;
5858 final BiFun<? super U, ? super U, ? extends U> reducer;
5859 U result;
5860 MapReduceMappingsTask<K,V,U> rights, nextRight;
5861 MapReduceMappingsTask
5862 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5863 MapReduceMappingsTask<K,V,U> nextRight,
5864 BiFun<? super K, ? super V, ? extends U> transformer,
5865 BiFun<? super U, ? super U, ? extends U> reducer) {
5866 super(m, p, b); this.nextRight = nextRight;
5867 this.transformer = transformer;
5868 this.reducer = reducer;
5869 }
5870 public final U getRawResult() { return result; }
5871 @SuppressWarnings("unchecked") public final void compute() {
5872 final BiFun<? super K, ? super V, ? extends U> transformer;
5873 final BiFun<? super U, ? super U, ? extends U> reducer;
5874 if ((transformer = this.transformer) != null &&
5875 (reducer = this.reducer) != null) {
5876 for (int b; (b = preSplit()) > 0;)
5877 (rights = new MapReduceMappingsTask<K,V,U>
5878 (map, this, b, rights, transformer, reducer)).fork();
5879 U r = null, u;
5880 Object v;
5881 while ((v = advance()) != null) {
5882 if ((u = transformer.apply((K)nextKey, (V)v)) != null)
5883 r = (r == null) ? u : reducer.apply(r, u);
5884 }
5885 result = r;
5886 CountedCompleter<?> c;
5887 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5888 MapReduceMappingsTask<K,V,U>
5889 t = (MapReduceMappingsTask<K,V,U>)c,
5890 s = t.rights;
5891 while (s != null) {
5892 U tr, sr;
5893 if ((sr = s.result) != null)
5894 t.result = (((tr = t.result) == null) ? sr :
5895 reducer.apply(tr, sr));
5896 s = t.rights = s.nextRight;
5897 }
5898 }
5899 }
5900 }
5901 }
5902
5903 @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V>
5904 extends Traverser<K,V,Double> {
5905 final ObjectToDouble<? super K> transformer;
5906 final DoubleByDoubleToDouble reducer;
5907 final double basis;
5908 double result;
5909 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5910 MapReduceKeysToDoubleTask
5911 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5912 MapReduceKeysToDoubleTask<K,V> nextRight,
5913 ObjectToDouble<? super K> transformer,
5914 double basis,
5915 DoubleByDoubleToDouble reducer) {
5916 super(m, p, b); this.nextRight = nextRight;
5917 this.transformer = transformer;
5918 this.basis = basis; this.reducer = reducer;
5919 }
5920 public final Double getRawResult() { return result; }
5921 @SuppressWarnings("unchecked") public final void compute() {
5922 final ObjectToDouble<? super K> transformer;
5923 final DoubleByDoubleToDouble reducer;
5924 if ((transformer = this.transformer) != null &&
5925 (reducer = this.reducer) != null) {
5926 double r = this.basis;
5927 for (int b; (b = preSplit()) > 0;)
5928 (rights = new MapReduceKeysToDoubleTask<K,V>
5929 (map, this, b, rights, transformer, r, reducer)).fork();
5930 while (advance() != null)
5931 r = reducer.apply(r, transformer.apply((K)nextKey));
5932 result = r;
5933 CountedCompleter<?> c;
5934 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5935 MapReduceKeysToDoubleTask<K,V>
5936 t = (MapReduceKeysToDoubleTask<K,V>)c,
5937 s = t.rights;
5938 while (s != null) {
5939 t.result = reducer.apply(t.result, s.result);
5940 s = t.rights = s.nextRight;
5941 }
5942 }
5943 }
5944 }
5945 }
5946
5947 @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V>
5948 extends Traverser<K,V,Double> {
5949 final ObjectToDouble<? super V> transformer;
5950 final DoubleByDoubleToDouble reducer;
5951 final double basis;
5952 double result;
5953 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5954 MapReduceValuesToDoubleTask
5955 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
5956 MapReduceValuesToDoubleTask<K,V> nextRight,
5957 ObjectToDouble<? super V> transformer,
5958 double basis,
5959 DoubleByDoubleToDouble reducer) {
5960 super(m, p, b); this.nextRight = nextRight;
5961 this.transformer = transformer;
5962 this.basis = basis; this.reducer = reducer;
5963 }
5964 public final Double getRawResult() { return result; }
5965 @SuppressWarnings("unchecked") public final void compute() {
5966 final ObjectToDouble<? super V> transformer;
5967 final DoubleByDoubleToDouble reducer;
5968 if ((transformer = this.transformer) != null &&
5969 (reducer = this.reducer) != null) {
5970 double r = this.basis;
5971 for (int b; (b = preSplit()) > 0;)
5972 (rights = new MapReduceValuesToDoubleTask<K,V>
5973 (map, this, b, rights, transformer, r, reducer)).fork();
5974 Object v;
5975 while ((v = advance()) != null)
5976 r = reducer.apply(r, transformer.apply((V)v));
5977 result = r;
5978 CountedCompleter<?> c;
5979 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5980 MapReduceValuesToDoubleTask<K,V>
5981 t = (MapReduceValuesToDoubleTask<K,V>)c,
5982 s = t.rights;
5983 while (s != null) {
5984 t.result = reducer.apply(t.result, s.result);
5985 s = t.rights = s.nextRight;
5986 }
5987 }
5988 }
5989 }
5990 }
5991
5992 @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V>
5993 extends Traverser<K,V,Double> {
5994 final ObjectToDouble<Map.Entry<K,V>> transformer;
5995 final DoubleByDoubleToDouble reducer;
5996 final double basis;
5997 double result;
5998 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5999 MapReduceEntriesToDoubleTask
6000 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6001 MapReduceEntriesToDoubleTask<K,V> nextRight,
6002 ObjectToDouble<Map.Entry<K,V>> transformer,
6003 double basis,
6004 DoubleByDoubleToDouble reducer) {
6005 super(m, p, b); this.nextRight = nextRight;
6006 this.transformer = transformer;
6007 this.basis = basis; this.reducer = reducer;
6008 }
6009 public final Double getRawResult() { return result; }
6010 @SuppressWarnings("unchecked") public final void compute() {
6011 final ObjectToDouble<Map.Entry<K,V>> transformer;
6012 final DoubleByDoubleToDouble reducer;
6013 if ((transformer = this.transformer) != null &&
6014 (reducer = this.reducer) != null) {
6015 double r = this.basis;
6016 for (int b; (b = preSplit()) > 0;)
6017 (rights = new MapReduceEntriesToDoubleTask<K,V>
6018 (map, this, b, rights, transformer, r, reducer)).fork();
6019 Object v;
6020 while ((v = advance()) != null)
6021 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey,
6022 (V)v)));
6023 result = r;
6024 CountedCompleter<?> c;
6025 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6026 MapReduceEntriesToDoubleTask<K,V>
6027 t = (MapReduceEntriesToDoubleTask<K,V>)c,
6028 s = t.rights;
6029 while (s != null) {
6030 t.result = reducer.apply(t.result, s.result);
6031 s = t.rights = s.nextRight;
6032 }
6033 }
6034 }
6035 }
6036 }
6037
6038 @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V>
6039 extends Traverser<K,V,Double> {
6040 final ObjectByObjectToDouble<? super K, ? super V> transformer;
6041 final DoubleByDoubleToDouble reducer;
6042 final double basis;
6043 double result;
6044 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
6045 MapReduceMappingsToDoubleTask
6046 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6047 MapReduceMappingsToDoubleTask<K,V> nextRight,
6048 ObjectByObjectToDouble<? super K, ? super V> transformer,
6049 double basis,
6050 DoubleByDoubleToDouble reducer) {
6051 super(m, p, b); this.nextRight = nextRight;
6052 this.transformer = transformer;
6053 this.basis = basis; this.reducer = reducer;
6054 }
6055 public final Double getRawResult() { return result; }
6056 @SuppressWarnings("unchecked") public final void compute() {
6057 final ObjectByObjectToDouble<? super K, ? super V> transformer;
6058 final DoubleByDoubleToDouble reducer;
6059 if ((transformer = this.transformer) != null &&
6060 (reducer = this.reducer) != null) {
6061 double r = this.basis;
6062 for (int b; (b = preSplit()) > 0;)
6063 (rights = new MapReduceMappingsToDoubleTask<K,V>
6064 (map, this, b, rights, transformer, r, reducer)).fork();
6065 Object v;
6066 while ((v = advance()) != null)
6067 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6068 result = r;
6069 CountedCompleter<?> c;
6070 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6071 MapReduceMappingsToDoubleTask<K,V>
6072 t = (MapReduceMappingsToDoubleTask<K,V>)c,
6073 s = t.rights;
6074 while (s != null) {
6075 t.result = reducer.apply(t.result, s.result);
6076 s = t.rights = s.nextRight;
6077 }
6078 }
6079 }
6080 }
6081 }
6082
6083 @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V>
6084 extends Traverser<K,V,Long> {
6085 final ObjectToLong<? super K> transformer;
6086 final LongByLongToLong reducer;
6087 final long basis;
6088 long result;
6089 MapReduceKeysToLongTask<K,V> rights, nextRight;
6090 MapReduceKeysToLongTask
6091 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6092 MapReduceKeysToLongTask<K,V> nextRight,
6093 ObjectToLong<? super K> transformer,
6094 long basis,
6095 LongByLongToLong reducer) {
6096 super(m, p, b); this.nextRight = nextRight;
6097 this.transformer = transformer;
6098 this.basis = basis; this.reducer = reducer;
6099 }
6100 public final Long getRawResult() { return result; }
6101 @SuppressWarnings("unchecked") public final void compute() {
6102 final ObjectToLong<? super K> transformer;
6103 final LongByLongToLong reducer;
6104 if ((transformer = this.transformer) != null &&
6105 (reducer = this.reducer) != null) {
6106 long r = this.basis;
6107 for (int b; (b = preSplit()) > 0;)
6108 (rights = new MapReduceKeysToLongTask<K,V>
6109 (map, this, b, rights, transformer, r, reducer)).fork();
6110 while (advance() != null)
6111 r = reducer.apply(r, transformer.apply((K)nextKey));
6112 result = r;
6113 CountedCompleter<?> c;
6114 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6115 MapReduceKeysToLongTask<K,V>
6116 t = (MapReduceKeysToLongTask<K,V>)c,
6117 s = t.rights;
6118 while (s != null) {
6119 t.result = reducer.apply(t.result, s.result);
6120 s = t.rights = s.nextRight;
6121 }
6122 }
6123 }
6124 }
6125 }
6126
6127 @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V>
6128 extends Traverser<K,V,Long> {
6129 final ObjectToLong<? super V> transformer;
6130 final LongByLongToLong reducer;
6131 final long basis;
6132 long result;
6133 MapReduceValuesToLongTask<K,V> rights, nextRight;
6134 MapReduceValuesToLongTask
6135 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6136 MapReduceValuesToLongTask<K,V> nextRight,
6137 ObjectToLong<? super V> transformer,
6138 long basis,
6139 LongByLongToLong reducer) {
6140 super(m, p, b); this.nextRight = nextRight;
6141 this.transformer = transformer;
6142 this.basis = basis; this.reducer = reducer;
6143 }
6144 public final Long getRawResult() { return result; }
6145 @SuppressWarnings("unchecked") public final void compute() {
6146 final ObjectToLong<? super V> transformer;
6147 final LongByLongToLong reducer;
6148 if ((transformer = this.transformer) != null &&
6149 (reducer = this.reducer) != null) {
6150 long r = this.basis;
6151 for (int b; (b = preSplit()) > 0;)
6152 (rights = new MapReduceValuesToLongTask<K,V>
6153 (map, this, b, rights, transformer, r, reducer)).fork();
6154 Object v;
6155 while ((v = advance()) != null)
6156 r = reducer.apply(r, transformer.apply((V)v));
6157 result = r;
6158 CountedCompleter<?> c;
6159 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6160 MapReduceValuesToLongTask<K,V>
6161 t = (MapReduceValuesToLongTask<K,V>)c,
6162 s = t.rights;
6163 while (s != null) {
6164 t.result = reducer.apply(t.result, s.result);
6165 s = t.rights = s.nextRight;
6166 }
6167 }
6168 }
6169 }
6170 }
6171
6172 @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V>
6173 extends Traverser<K,V,Long> {
6174 final ObjectToLong<Map.Entry<K,V>> transformer;
6175 final LongByLongToLong reducer;
6176 final long basis;
6177 long result;
6178 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6179 MapReduceEntriesToLongTask
6180 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6181 MapReduceEntriesToLongTask<K,V> nextRight,
6182 ObjectToLong<Map.Entry<K,V>> transformer,
6183 long basis,
6184 LongByLongToLong reducer) {
6185 super(m, p, b); this.nextRight = nextRight;
6186 this.transformer = transformer;
6187 this.basis = basis; this.reducer = reducer;
6188 }
6189 public final Long getRawResult() { return result; }
6190 @SuppressWarnings("unchecked") public final void compute() {
6191 final ObjectToLong<Map.Entry<K,V>> transformer;
6192 final LongByLongToLong reducer;
6193 if ((transformer = this.transformer) != null &&
6194 (reducer = this.reducer) != null) {
6195 long r = this.basis;
6196 for (int b; (b = preSplit()) > 0;)
6197 (rights = new MapReduceEntriesToLongTask<K,V>
6198 (map, this, b, rights, transformer, r, reducer)).fork();
6199 Object v;
6200 while ((v = advance()) != null)
6201 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey,
6202 (V)v)));
6203 result = r;
6204 CountedCompleter<?> c;
6205 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6206 MapReduceEntriesToLongTask<K,V>
6207 t = (MapReduceEntriesToLongTask<K,V>)c,
6208 s = t.rights;
6209 while (s != null) {
6210 t.result = reducer.apply(t.result, s.result);
6211 s = t.rights = s.nextRight;
6212 }
6213 }
6214 }
6215 }
6216 }
6217
6218 @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V>
6219 extends Traverser<K,V,Long> {
6220 final ObjectByObjectToLong<? super K, ? super V> transformer;
6221 final LongByLongToLong reducer;
6222 final long basis;
6223 long result;
6224 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6225 MapReduceMappingsToLongTask
6226 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6227 MapReduceMappingsToLongTask<K,V> nextRight,
6228 ObjectByObjectToLong<? super K, ? super V> transformer,
6229 long basis,
6230 LongByLongToLong reducer) {
6231 super(m, p, b); this.nextRight = nextRight;
6232 this.transformer = transformer;
6233 this.basis = basis; this.reducer = reducer;
6234 }
6235 public final Long getRawResult() { return result; }
6236 @SuppressWarnings("unchecked") public final void compute() {
6237 final ObjectByObjectToLong<? super K, ? super V> transformer;
6238 final LongByLongToLong reducer;
6239 if ((transformer = this.transformer) != null &&
6240 (reducer = this.reducer) != null) {
6241 long r = this.basis;
6242 for (int b; (b = preSplit()) > 0;)
6243 (rights = new MapReduceMappingsToLongTask<K,V>
6244 (map, this, b, rights, transformer, r, reducer)).fork();
6245 Object v;
6246 while ((v = advance()) != null)
6247 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6248 result = r;
6249 CountedCompleter<?> c;
6250 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6251 MapReduceMappingsToLongTask<K,V>
6252 t = (MapReduceMappingsToLongTask<K,V>)c,
6253 s = t.rights;
6254 while (s != null) {
6255 t.result = reducer.apply(t.result, s.result);
6256 s = t.rights = s.nextRight;
6257 }
6258 }
6259 }
6260 }
6261 }
6262
6263 @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V>
6264 extends Traverser<K,V,Integer> {
6265 final ObjectToInt<? super K> transformer;
6266 final IntByIntToInt reducer;
6267 final int basis;
6268 int result;
6269 MapReduceKeysToIntTask<K,V> rights, nextRight;
6270 MapReduceKeysToIntTask
6271 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6272 MapReduceKeysToIntTask<K,V> nextRight,
6273 ObjectToInt<? super K> transformer,
6274 int basis,
6275 IntByIntToInt reducer) {
6276 super(m, p, b); this.nextRight = nextRight;
6277 this.transformer = transformer;
6278 this.basis = basis; this.reducer = reducer;
6279 }
6280 public final Integer getRawResult() { return result; }
6281 @SuppressWarnings("unchecked") public final void compute() {
6282 final ObjectToInt<? super K> transformer;
6283 final IntByIntToInt reducer;
6284 if ((transformer = this.transformer) != null &&
6285 (reducer = this.reducer) != null) {
6286 int r = this.basis;
6287 for (int b; (b = preSplit()) > 0;)
6288 (rights = new MapReduceKeysToIntTask<K,V>
6289 (map, this, b, rights, transformer, r, reducer)).fork();
6290 while (advance() != null)
6291 r = reducer.apply(r, transformer.apply((K)nextKey));
6292 result = r;
6293 CountedCompleter<?> c;
6294 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6295 MapReduceKeysToIntTask<K,V>
6296 t = (MapReduceKeysToIntTask<K,V>)c,
6297 s = t.rights;
6298 while (s != null) {
6299 t.result = reducer.apply(t.result, s.result);
6300 s = t.rights = s.nextRight;
6301 }
6302 }
6303 }
6304 }
6305 }
6306
6307 @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V>
6308 extends Traverser<K,V,Integer> {
6309 final ObjectToInt<? super V> transformer;
6310 final IntByIntToInt reducer;
6311 final int basis;
6312 int result;
6313 MapReduceValuesToIntTask<K,V> rights, nextRight;
6314 MapReduceValuesToIntTask
6315 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6316 MapReduceValuesToIntTask<K,V> nextRight,
6317 ObjectToInt<? super V> transformer,
6318 int basis,
6319 IntByIntToInt reducer) {
6320 super(m, p, b); this.nextRight = nextRight;
6321 this.transformer = transformer;
6322 this.basis = basis; this.reducer = reducer;
6323 }
6324 public final Integer getRawResult() { return result; }
6325 @SuppressWarnings("unchecked") public final void compute() {
6326 final ObjectToInt<? super V> transformer;
6327 final IntByIntToInt reducer;
6328 if ((transformer = this.transformer) != null &&
6329 (reducer = this.reducer) != null) {
6330 int r = this.basis;
6331 for (int b; (b = preSplit()) > 0;)
6332 (rights = new MapReduceValuesToIntTask<K,V>
6333 (map, this, b, rights, transformer, r, reducer)).fork();
6334 Object v;
6335 while ((v = advance()) != null)
6336 r = reducer.apply(r, transformer.apply((V)v));
6337 result = r;
6338 CountedCompleter<?> c;
6339 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6340 MapReduceValuesToIntTask<K,V>
6341 t = (MapReduceValuesToIntTask<K,V>)c,
6342 s = t.rights;
6343 while (s != null) {
6344 t.result = reducer.apply(t.result, s.result);
6345 s = t.rights = s.nextRight;
6346 }
6347 }
6348 }
6349 }
6350 }
6351
6352 @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V>
6353 extends Traverser<K,V,Integer> {
6354 final ObjectToInt<Map.Entry<K,V>> transformer;
6355 final IntByIntToInt reducer;
6356 final int basis;
6357 int result;
6358 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6359 MapReduceEntriesToIntTask
6360 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6361 MapReduceEntriesToIntTask<K,V> nextRight,
6362 ObjectToInt<Map.Entry<K,V>> transformer,
6363 int basis,
6364 IntByIntToInt reducer) {
6365 super(m, p, b); this.nextRight = nextRight;
6366 this.transformer = transformer;
6367 this.basis = basis; this.reducer = reducer;
6368 }
6369 public final Integer getRawResult() { return result; }
6370 @SuppressWarnings("unchecked") public final void compute() {
6371 final ObjectToInt<Map.Entry<K,V>> transformer;
6372 final IntByIntToInt reducer;
6373 if ((transformer = this.transformer) != null &&
6374 (reducer = this.reducer) != null) {
6375 int r = this.basis;
6376 for (int b; (b = preSplit()) > 0;)
6377 (rights = new MapReduceEntriesToIntTask<K,V>
6378 (map, this, b, rights, transformer, r, reducer)).fork();
6379 Object v;
6380 while ((v = advance()) != null)
6381 r = reducer.apply(r, transformer.apply(entryFor((K)nextKey,
6382 (V)v)));
6383 result = r;
6384 CountedCompleter<?> c;
6385 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6386 MapReduceEntriesToIntTask<K,V>
6387 t = (MapReduceEntriesToIntTask<K,V>)c,
6388 s = t.rights;
6389 while (s != null) {
6390 t.result = reducer.apply(t.result, s.result);
6391 s = t.rights = s.nextRight;
6392 }
6393 }
6394 }
6395 }
6396 }
6397
6398 @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V>
6399 extends Traverser<K,V,Integer> {
6400 final ObjectByObjectToInt<? super K, ? super V> transformer;
6401 final IntByIntToInt reducer;
6402 final int basis;
6403 int result;
6404 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6405 MapReduceMappingsToIntTask
6406 (ConcurrentHashMap<K,V> m, Traverser<K,V,?> p, int b,
6407 MapReduceMappingsToIntTask<K,V> nextRight,
6408 ObjectByObjectToInt<? super K, ? super V> transformer,
6409 int basis,
6410 IntByIntToInt reducer) {
6411 super(m, p, b); this.nextRight = nextRight;
6412 this.transformer = transformer;
6413 this.basis = basis; this.reducer = reducer;
6414 }
6415 public final Integer getRawResult() { return result; }
6416 @SuppressWarnings("unchecked") public final void compute() {
6417 final ObjectByObjectToInt<? super K, ? super V> transformer;
6418 final IntByIntToInt reducer;
6419 if ((transformer = this.transformer) != null &&
6420 (reducer = this.reducer) != null) {
6421 int r = this.basis;
6422 for (int b; (b = preSplit()) > 0;)
6423 (rights = new MapReduceMappingsToIntTask<K,V>
6424 (map, this, b, rights, transformer, r, reducer)).fork();
6425 Object v;
6426 while ((v = advance()) != null)
6427 r = reducer.apply(r, transformer.apply((K)nextKey, (V)v));
6428 result = r;
6429 CountedCompleter<?> c;
6430 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6431 MapReduceMappingsToIntTask<K,V>
6432 t = (MapReduceMappingsToIntTask<K,V>)c,
6433 s = t.rights;
6434 while (s != null) {
6435 t.result = reducer.apply(t.result, s.result);
6436 s = t.rights = s.nextRight;
6437 }
6438 }
6439 }
6440 }
6441 }
6442
6443 // Unsafe mechanics
6444 private static final sun.misc.Unsafe U;
6445 private static final long SIZECTL;
6446 private static final long TRANSFERINDEX;
6447 private static final long TRANSFERORIGIN;
6448 private static final long BASECOUNT;
6449 private static final long COUNTERBUSY;
6450 private static final long CELLVALUE;
6451 private static final long ABASE;
6452 private static final int ASHIFT;
6453
6454 static {
6455 int ss;
6456 try {
6457 U = sun.misc.Unsafe.getUnsafe();
6458 Class<?> k = ConcurrentHashMap.class;
6459 SIZECTL = U.objectFieldOffset
6460 (k.getDeclaredField("sizeCtl"));
6461 TRANSFERINDEX = U.objectFieldOffset
6462 (k.getDeclaredField("transferIndex"));
6463 TRANSFERORIGIN = U.objectFieldOffset
6464 (k.getDeclaredField("transferOrigin"));
6465 BASECOUNT = U.objectFieldOffset
6466 (k.getDeclaredField("baseCount"));
6467 COUNTERBUSY = U.objectFieldOffset
6468 (k.getDeclaredField("counterBusy"));
6469 Class<?> ck = CounterCell.class;
6470 CELLVALUE = U.objectFieldOffset
6471 (ck.getDeclaredField("value"));
6472 Class<?> sc = Node[].class;
6473 ABASE = U.arrayBaseOffset(sc);
6474 ss = U.arrayIndexScale(sc);
6475 ASHIFT = 31 - Integer.numberOfLeadingZeros(ss);
6476 } catch (Exception e) {
6477 throw new Error(e);
6478 }
6479 if ((ss & (ss-1)) != 0)
6480 throw new Error("data type scale not a power of two");
6481 }
6482 }