--- jsr166/src/jsr166e/ConcurrentHashMapV8.java 2011/09/10 01:38:28 1.19 +++ jsr166/src/jsr166e/ConcurrentHashMapV8.java 2012/12/14 16:33:42 1.83 @@ -5,7 +5,9 @@ */ package jsr166e; -import jsr166e.LongAdder; + +import java.util.Comparator; +import java.util.Arrays; import java.util.Map; import java.util.Set; import java.util.Collection; @@ -19,6 +21,9 @@ import java.util.Enumeration; import java.util.ConcurrentModificationException; import java.util.NoSuchElementException; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.io.Serializable; /** @@ -33,33 +38,37 @@ import java.io.Serializable; * interoperable with {@code Hashtable} in programs that rely on its * thread safety but not on its synchronization details. * - *

Retrieval operations (including {@code get}) generally do not + *

Retrieval operations (including {@code get}) generally do not * block, so may overlap with update operations (including {@code put} * and {@code remove}). Retrievals reflect the results of the most * recently completed update operations holding upon their - * onset. For aggregate operations such as {@code putAll} and {@code - * clear}, concurrent retrievals may reflect insertion or removal of - * only some entries. Similarly, Iterators and Enumerations return - * elements reflecting the state of the hash table at some point at or - * since the creation of the iterator/enumeration. They do - * not throw {@link ConcurrentModificationException}. - * However, iterators are designed to be used by only one thread at a - * time. Bear in mind that the results of aggregate status methods - * including {@code size}, {@code isEmpty}, and {@code containsValue} - * are typically useful only when a map is not undergoing concurrent - * updates in other threads. Otherwise the results of these methods - * reflect transient states that may be adequate for monitoring - * or estimation purposes, but not for program control. + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. * - *

The table is dynamically expanded when there are too many + *

The table is dynamically expanded when there are too many * collisions (i.e., keys that have distinct hash codes but fall into * the same slot modulo the table size), with the expected average - * effect of maintaining roughly two bins per mapping. There may be - * much variance around this average as mappings are added and - * removed, but overall, this maintains a commonly accepted time/space - * tradeoff for hash tables. However, resizing this or any other kind - * of hash table may be a relatively slow operation. When possible, it - * is a good idea to provide a size estimate as an optional {@code + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code * initialCapacity} constructor argument. An additional optional * {@code loadFactor} constructor argument provides a further means of * customizing initial table capacity by specifying the table density @@ -68,51 +77,212 @@ import java.io.Serializable; * versions of this class, constructors may optionally specify an * expected {@code concurrencyLevel} as an additional hint for * internal sizing. Note that using many keys with exactly the same - * {@code hashCode{}} is a sure way to slow down performance of any + * {@code hashCode()} is a sure way to slow down performance of any * hash table. * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * *

This class and its views and iterators implement all of the * optional methods of the {@link Map} and {@link Iterator} * interfaces. * - *

Like {@link Hashtable} but unlike {@link HashMap}, this class + *

Like {@link Hashtable} but unlike {@link HashMap}, this class * does not allow {@code null} to be used as a key or value. * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * *

This class is a member of the * * Java Collections Framework. * - *

jsr166e note: This class is a candidate replacement for - * java.util.concurrent.ConcurrentHashMap. - * - * @since 1.8 + * @since 1.5 * @author Doug Lea * @param the type of keys maintained by this map * @param the type of mapped values */ public class ConcurrentHashMapV8 - implements ConcurrentMap, Serializable { + implements ConcurrentMap, Serializable { private static final long serialVersionUID = 7249069246763182397L; /** - * A function computing a mapping from the given key to a value, - * or {@code null} if there is no mapping. This is a place-holder - * for an upcoming JDK8 interface. - */ - public static interface MappingFunction { - /** - * Returns a value for the given key, or null if there is no - * mapping. If this function throws an (unchecked) exception, - * the exception is rethrown to its caller, and no mapping is - * recorded. Because this function is invoked within - * atomicity control, the computation should be short and - * simple. The most common usage is to construct a new object - * serving as an initial mapped value. + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. * - * @param key the (non-null) key - * @return a value, or null if none + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements */ - V map(K key); + Spliterator split(); } /* @@ -121,128 +291,183 @@ public class ConcurrentHashMapV8 * The primary design goal of this hash table is to maintain * concurrent readability (typically method get(), but also * iterators and related methods) while minimizing update - * contention. + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. * * Each key-value mapping is held in a Node. Because Node fields * can contain special values, they are defined using plain Object * types. Similarly in turn, all internal methods that use them * work off Object types. And similarly, so do the internal - * methods of auxiliary iterator and view classes. All public - * generic typed methods relay in/out of these internal methods, - * supplying null-checks and casts as needed. + * methods of auxiliary iterator and view classes. This also + * allows many of the public methods to be factored into a smaller + * number of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. * * The table is lazily initialized to a power-of-two size upon the - * first insertion. Each bin in the table contains a list of - * Nodes (most often, zero or one Node). Table accesses require - * volatile/atomic reads, writes, and CASes. Because there is no - * other way to arrange this without adding further indirections, - * we use intrinsics (sun.misc.Unsafe) operations. The lists of - * nodes within bins are always accurately traversable under - * volatile reads, so long as lookups check hash code and - * non-nullness of value before checking key equality. (All valid - * hash codes are nonnegative. Negative values are reserved for - * special forwarding nodes; see below.) + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top (sign) bit of Node hash fields for control + * purposes -- it is available anyway because of addressing + * constraints. Nodes with negative hash fields are forwarding + * nodes to either TreeBins or resized tables. The lower 31 bits + * of each normal Node's hash field contain a transformation of + * the key's hash code. * - * Insertion (via put or putIfAbsent) of the first node in an + * Insertion (via put or its variants) of the first node in an * empty bin is performed by just CASing it to the bin. This is - * on average by far the most common case for put operations. - * Other update operations (insert, delete, and replace) require - * locks. We do not want to waste the space required to associate - * a distinct lock object with each bin, so instead use the first - * node of a bin list itself as a lock, using plain "synchronized" - * locks. These save space and we can live with block-structured - * lock/unlock operations. Using the first node of a list as a - * lock does not by itself suffice though: When a node is locked, - * any update must first validate that it is still the first node, - * and retry if not. Because new nodes are always appended to - * lists, once a node is first in a bin, it remains first until - * deleted or the bin becomes invalidated. However, operations - * that only conditionally update can and sometimes do inspect - * nodes until the point of update. This is a converse of sorts to - * the lazy locking technique described by Herlihy & Shavit. + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Locking support for these locks relies on builtin + * "synchronized" monitors. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. * - * The main disadvantage of this approach is that most update + * The main disadvantage of per-bin locks is that other update * operations on other nodes in a bin list protected by the same * lock can stall, for example when user equals() or mapping - * functions take a long time. However, statistically, this is - * not a common enough problem to outweigh the time/space overhead - * of alternatives: Under random hash codes, the frequency of - * nodes in bins follows a Poisson distribution + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution * (http://en.wikipedia.org/wiki/Poisson_distribution) with a * parameter of about 0.5 on average, given the resizing threshold * of 0.75, although with a large variance because of resizing * granularity. Ignoring variance, the expected occurrences of * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The - * first few values are: + * first values are: * - * 0: 0.607 - * 1: 0.303 - * 2: 0.076 - * 3: 0.012 - * more: 0.002 + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million * * Lock contention probability for two threads accessing distinct - * elements is roughly 1 / (8 * #elements). Function "spread" - * performs hashCode randomization that improves the likelihood - * that these assumptions hold unless users define exactly the - * same value for too many hashCodes. - * - * The table is resized when occupancy exceeds a threshold. Only - * a single thread performs the resize (using field "resizing", to - * arrange exclusion), but the table otherwise remains usable for - * reads and updates. Resizing proceeds by transferring bins, one - * by one, from the table to the next table. Upon transfer, the - * old table bin contains only a special forwarding node (with - * negative hash field) that contains the next table as its - * key. On encountering a forwarding node, access and update - * operations restart, using the new table. To ensure concurrent - * readability of traversals, transfers must proceed from the last - * bin (table.length - 1) up towards the first. Upon seeing a - * forwarding node, traversals (see class InternalIterator) - * arrange to move to the new table for the rest of the traversal - * without revisiting nodes. This constrains bin transfers to a - * particular order, and so can block indefinitely waiting for the - * next lock, and other threads cannot help with the transfer. - * However, expected stalls are infrequent enough to not warrant - * the additional overhead of access and iteration schemes that - * could admit out-of-order or concurrent bin transfers. - * - * This traversal scheme also applies to partial traversals of - * ranges of bins (via an alternate InternalIterator constructor) - * to support partitioned aggregate operations (that are not - * otherwise implemented yet). Also, read-only operations give up - * if ever forwarded to a null table, which provides support for - * shutdown-style clearing, which is also not currently - * implemented. + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Any thread + * noticing an overfull bin may assist in resizing after the + * initiating thread allocates and sets up the replacement + * array. However, rather than stalling, these other threads may + * proceed with insertions etc. The use of TreeBins shields us + * from the worst case effects of overfilling while resizes are in + * progress. Resizing proceeds by transferring bins, one by one, + * from the table to the next table. To enable concurrency, the + * next table must be (incrementally) prefilled with place-holders + * serving as reverse forwarders to the old table. Because we are + * using power-of-two expansion, the elements from each bin must + * either stay at same index, or move with a power of two + * offset. We eliminate unnecessary node creation by catching + * cases where old nodes can be reused because their next fields + * won't change. On average, only about one-sixth of them need + * cloning when a table doubles. The nodes they replace will be + * garbage collectable as soon as they are no longer referenced by + * any reader thread that may be in the midst of concurrently + * traversing table. Upon transfer, the old table bin contains + * only a special forwarding node (with hash field "MOVED") that + * contains the next table as its key. On encountering a + * forwarding node, access and update operations restart, using + * the new table. + * + * Each bin transfer requires its bin lock, which can stall + * waiting for locks while resizing. However, because other + * threads can join in and help resize rather than contend for + * locks, average aggregate waits become shorter as resizing + * progresses. The transfer operation must also ensure that all + * accessible bins in both the old and new table are usable by any + * traversal. This is arranged by proceeding from the last bin + * (table.length - 1) up towards the first. Upon seeing a + * forwarding node, traversals (see class Traverser) arrange to + * move to the new table without revisiting nodes. However, to + * ensure that no intervening nodes are skipped, bin splitting can + * only begin after the associated reverse-forwarders are in + * place. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. * * Lazy table initialization minimizes footprint until first use, * and also avoids resizings when the first operation is from a * putAll, constructor with map argument, or deserialization. - * These cases attempt to override the targetCapacity used in - * growTable. These harmlessly fail to take effect in cases of - * races with other ongoing resizings. Uses of the threshold and - * targetCapacity during attempted initializations or resizings - * are racy but fall back on checks to preserve correctness. - * - * The element count is maintained using a LongAdder, which avoids - * contention on updates but can encounter cache thrashing if read - * too frequently during concurrent access. To avoid reading so - * often, resizing is normally attempted only upon adding to a bin - * already holding two or more nodes. Under uniform hash + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a specialization of + * LongAdder. We need to incorporate a specialization rather than + * just use a LongAdder in order to access implicit + * contention-sensing that leads to creation of multiple + * CounterCells. The counter mechanics avoid contention on + * updates but can encounter cache thrashing if read too + * frequently during concurrent access. To avoid reading so often, + * resizing under contention is attempted only upon adding to a + * bin already holding two or more nodes. Under uniform hash * distributions, the probability of this occurring at threshold * is around 13%, meaning that only about 1 in 8 puts check - * threshold (and after resizing, many fewer do so). But this - * approximation has high variance for small table sizes, so we - * check on any collision for sizes <= 64. Further, to increase - * the probability that a resize occurs soon enough, we offset the - * threshold (see THRESHOLD_OFFSET) by the expected number of puts - * between checks. + * threshold (and after resizing, many fewer do so). The bulk + * putAll operation further reduces contention by only committing + * count updates upon these size checks. * * Maintaining API and serialization compatibility with previous * versions of this class introduces several oddities. Mainly: We * leave untouched but unused constructor arguments refering to - * concurrencyLevel. We also declare an unused "Segment" class - * that is instantiated in minimal form only when serializing. + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. */ /* ---------------- Constants -------------- */ @@ -250,7 +475,9 @@ public class ConcurrentHashMapV8 /** * The largest possible table capacity. This value must be * exactly 1<<30 to stay within Java array allocation and indexing - * bounds for power of two table sizes. + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. */ private static final int MAXIMUM_CAPACITY = 1 << 30; @@ -261,58 +488,89 @@ public class ConcurrentHashMapV8 private static final int DEFAULT_CAPACITY = 16; /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** * The load factor for this table. Overrides of this value in * constructors affect only the initial table capacity. The - * actual floating point value isn't normally used, because it is - * simpler to rely on the expression {@code n - (n >>> 2)} for the - * associated resizing threshold. + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. */ private static final float LOAD_FACTOR = 0.75f; /** - * The count value to offset thresholds to compensate for checking - * for the need to resize only when inserting into bins with two - * or more elements. See above for explanation. + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. */ - private static final int THRESHOLD_OFFSET = 8; + private static final int TREE_THRESHOLD = 8; /** - * The default concurrency level for this table. Unused except as - * a sizing hint, but defined for compatibility with previous - * versions of this class. + * Minimum number of rebinnings per transfer step. Ranges are + * subdivided to allow multiple resizer threads. This value + * serves as a lower bound to avoid resizers encountering + * excessive memory contention. The value should be at least + * DEFAULT_CAPACITY. */ - private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + private static final int MIN_TRANSFER_STRIDE = 16; - /* ---------------- Nodes -------------- */ + /* + * Encodings for Node hash fields. See above for explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash + + /** Number of CPUS, to place bounds on some sizings */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /* ---------------- Counters -------------- */ + + // Adapted from LongAdder and Striped64. + // See their internal docs for explanation. + + // A padded cell for distributing counts + static final class CounterCell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + CounterCell(long x) { value = x; } + } /** - * Key-value entry. Note that this is never exported out as a - * user-visible Map.Entry. Nodes with a negative hash field are - * special, and do not contain user keys or values. Otherwise, - * keys are never null, and null val fields indicate that a node - * is in the process of being deleted or created. For purposes of - * read-only, access, a key may be read before a val, but can only - * be used after checking val. (For an update operation, when a - * lock is held on a node, order doesn't matter.) + * Holder for the thread-local hash code determining which + * CounterCell to use. The code is initialized via the + * counterHashCodeGenerator, but may be moved upon collisions. */ - static final class Node { - final int hash; - final Object key; - volatile Object val; - volatile Node next; - - Node(int hash, Object key, Object val, Node next) { - this.hash = hash; - this.key = key; - this.val = val; - this.next = next; - } + static final class CounterHashCode { + int code; } /** - * Sign bit of node hash value indicating to use table in node.key. + * Generates initial value for per-thread CounterHashCodes + */ + static final AtomicInteger counterHashCodeGenerator = new AtomicInteger(); + + /** + * Increment for counterHashCodeGenerator. See class ThreadLocal + * for explanation. */ - private static final int SIGN_BIT = 0x80000000; + static final int SEED_INCREMENT = 0x61c88647; + + /** + * Per-thread counter hash codes. Shared across all instances + */ + static final ThreadLocal threadCounterHashCode = + new ThreadLocal(); /* ---------------- Fields -------------- */ @@ -322,19 +580,52 @@ public class ConcurrentHashMapV8 */ transient volatile Node[] table; - /** The counter maintaining number of elements. */ - private transient final LongAdder counter; - /** Nonzero when table is being initialized or resized. Updated via CAS. */ - private transient volatile int resizing; - /** The next element count value upon which to resize the table. */ - private transient int threshold; - /** The target capacity; volatile to cover initialization races. */ - private transient volatile int targetCapacity; + /** + * The next table to use; non-null only while resizing. + */ + private transient volatile Node[] nextTable; + + /** + * Base counter value, used mainly when there is no contention, + * but also as a fallback during table initialization + * races. Updated via CAS. + */ + private transient volatile long baseCount; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized: -1 for initialization, + * else -(1 + the number of active resizing threads). Otherwise, + * when table is null, holds the initial table size to use upon + * creation, or 0 for default. After initialization, holds the + * next element count value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + /** + * The next table index (plus one) to split while resizing. + */ + private transient volatile int transferIndex; + + /** + * The least available table index to split while resizing. + */ + private transient volatile int transferOrigin; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + private transient volatile int counterBusy; + + /** + * Table of counter cells. When non-null, size is a power of 2. + */ + private transient volatile CounterCell[] counterCells; // views - private transient KeySet keySet; - private transient Values values; - private transient EntrySet entrySet; + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; /** For serialization compatibility. Null unless serialized; see below */ private Segment[] segments; @@ -353,437 +644,1613 @@ public class ConcurrentHashMapV8 * inline assignments below. */ - static final Node tabAt(Node[] tab, int i) { // used by InternalIterator - return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<>> 1; - n |= n >>> 2; - n |= n >>> 4; - n |= n >>> 8; - n |= n >>> 16; - return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + static class Node { + final int hash; + final Object key; + volatile Object val; + volatile Node next; + + Node(int hash, Object key, Object val, Node next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } } + /* ---------------- TreeBins -------------- */ + /** - * If not already resizing, initializes or creates next table and - * transfers bins. Initial table size uses the capacity recorded - * in targetCapacity. Rechecks occupancy after a transfer to see - * if another resize is already needed because resizings are - * lagging additions. - * - * @return current table - */ - private final Node[] growTable() { - if (resizing == 0 && - UNSAFE.compareAndSwapInt(this, resizingOffset, 0, 1)) { - try { - for (;;) { - Node[] tab = table; - int n, c, m; - if (tab == null) - n = (c = targetCapacity) > 0 ? c : DEFAULT_CAPACITY; - else if ((m = tab.length) < MAXIMUM_CAPACITY && - counter.sum() >= (long)threshold) - n = m << 1; - else - break; - threshold = n - (n >>> 2) - THRESHOLD_OFFSET; - Node[] nextTab = new Node[n]; - if (tab != null) - transfer(tab, nextTab, - new Node(SIGN_BIT, nextTab, null, null)); - table = nextTab; - if (tab == null) - break; - } - } finally { - resizing = 0; - } + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; } - else if (table == null) - Thread.yield(); // lost initialization race; just spin - return table; } - /* - * Reclassifies nodes in each bin to new table. Because we are - * using power-of-two expansion, the elements from each bin must - * either stay at same index, or move with a power of two - * offset. We eliminate unnecessary node creation by catching - * cases where old nodes can be reused because their next fields - * won't change. Statistically, only about one-sixth of them need - * cloning when a table doubles. The nodes they replace will be - * garbage collectable as soon as they are no longer referenced by - * any reader thread that may be in the midst of concurrently - * traversing table. + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. * - * Transfers are done from the bottom up to preserve iterator - * traversability. On each step, the old bin is locked, - * moved/copied, and then replaced with a forwarding node. - */ - private static final void transfer(Node[] tab, Node[] nextTab, Node fwd) { - int n = tab.length; - Node ignore = nextTab[n + n - 1]; // force bounds check - for (int i = n - 1; i >= 0; --i) { - for (Node e;;) { - if ((e = tabAt(tab, i)) != null) { - boolean validated = false; - synchronized (e) { - if (tabAt(tab, i) == e) { - validated = true; - Node lo = null, hi = null, lastRun = e; - int runBit = e.hash & n; - for (Node p = e.next; p != null; p = p.next) { - int b = p.hash & n; - if (b != runBit) { - runBit = b; - lastRun = p; + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + Class c = k.getClass(); + while (p != null) { + int dir, ph; Object pk; Class pc; + if ((ph = p.hash) == h) { + if ((pk = p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getClass()) || + !(k instanceof Comparable) || + (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) { + if ((dir = (c == pc) ? 0 : + c.getName().compareTo(pc.getName())) == 0) { + TreeNode r = null, pl, pr; // check both sides + if ((pr = p.right) != null && h >= pr.hash && + (r = getTreeNode(h, k, pr)) != null) + return r; + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // nothing there + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if (e.hash == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + Class c = k.getClass(); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; Object pk; Class pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getClass()) || + !(k instanceof Comparable) || + (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) { + TreeNode s = null, r = null, pr; + if ((dir = (c == pc) ? 0 : + c.getName().compareTo(pc.getName())) == 0) { + if ((pr = p.right) != null && h >= pr.hash && + (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue left + dir = -1; + } + else if ((pr = p.right) != null && h >= pr.hash) + s = pr; + if (s != null && (r = getTreeNode(h, k, s)) != null) + return r; + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); } } - if (runBit == 0) - lo = lastRun; - else - hi = lastRun; - for (Node p = e; p != lastRun; p = p.next) { - int ph = p.hash; - Object pk = p.key, pv = p.val; - if ((ph & n) == 0) - lo = new Node(ph, pk, pv, lo); - else - hi = new Node(ph, pk, pv, hi); - } - setTabAt(nextTab, i, lo); - setTabAt(nextTab, i + n, hi); - setTabAt(tab, i, fwd); } } - if (validated) + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? + null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? + null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } } - else if (casTabAt(tab, i, e, fwd)) - break; + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; } } } - /* ---------------- Internal access and update methods -------------- */ + /* ---------------- Collision reduction methods -------------- */ /** - * Applies a supplemental hash function to a given hashCode, which - * defends against poor quality hash functions. The result must - * be non-negative, and for reasonable performance must have good - * avalanche properties; i.e., that each bit of the argument - * affects each bit (except sign bit) of the result. + * Spreads higher bits to lower, and also forces top bit to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. */ private static final int spread(int h) { - // Apply base step of MurmurHash; see http://code.google.com/p/smhasher/ - h ^= h >>> 16; - h *= 0x85ebca6b; - h ^= h >>> 13; - h *= 0xc2b2ae35; - return (h >>> 16) ^ (h & 0x7fffffff); // mask out sign bit + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin if key is comparable. Call + * only when locked. + */ + private final void replaceWithTreeBin(Node[] tab, int index, Object key) { + if (key instanceof Comparable) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } } + /* ---------------- Internal access and update methods -------------- */ + /** Implementation for get and containsKey */ - private final Object internalGet(Object k) { + @SuppressWarnings("unchecked") private final V internalGet(Object k) { int h = spread(k.hashCode()); retry: for (Node[] tab = table; tab != null;) { - Node e; Object ek, ev; int eh; // locals to read fields once + Node e; Object ek, ev; int eh; // locals to read fields once for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) { - if ((eh = e.hash) == h) { - if ((ev = e.val) != null && - ((ek = e.key) == k || k.equals(ek))) - return ev; - } - else if (eh < 0) { // sign bit set - tab = (Node[])e.key; // bin was moved during resize - continue retry; + if ((eh = e.hash) < 0) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return (V)((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (Node[])ek; + continue retry; + } } + else if (eh == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return (V)ev; } break; } return null; } + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + @SuppressWarnings("unchecked") private final V internalReplace + (Object k, V v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + addCount(-1L, -1); + break; + } + } + else + tab = (Node[])fk; + } + else if (fh != h && f.next == null) // precheck + break; // rules out possible existence + else { + boolean validated = false; + boolean deleted = false; + synchronized (f) { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if (e.hash == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } + if (validated) { + if (deleted) + addCount(-1L, -1); + break; + } + } + } + return (V)oldVal; + } + + /* + * Internal versions of insertion methods + * All have the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The putAll method differs mainly in attempting to pre-allocate + * enough table space, and also more lazily performs count updates + * and checks. + * + * Most of the function-accepting methods can't be factored nicely + * because they require different functional forms, so instead + * sprawl out similar mechanics. + */ + /** Implementation for put and putIfAbsent */ - private final Object internalPut(Object k, Object v, boolean replace) { + @SuppressWarnings("unchecked") private final V internalPut + (K k, V v, boolean onlyIfAbsent) { + if (k == null || v == null) throw new NullPointerException(); int h = spread(k.hashCode()); - Object oldVal = null; // previous value or null if none + int len = 0; for (Node[] tab = table;;) { - Node e; int i; Object ek, ev; + int i, fh; Node f; Object fk, fv; if (tab == null) - tab = growTable(); - else if ((e = tabAt(tab, i = (tab.length - 1) & h)) == null) { + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { if (casTabAt(tab, i, null, new Node(h, k, v, null))) break; // no lock when adding to empty bin } - else if (e.hash < 0) // resized -- restart with new table - tab = (Node[])e.key; - else if (!replace && e.hash == h && (ev = e.val) != null && - ((ek = e.key) == k || k.equals(ek))) { - if (tabAt(tab, i) == e) { // inspect and validate 1st node - oldVal = ev; // without lock for putIfAbsent - break; + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + len = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + if (!onlyIfAbsent) + p.val = v; + } + } + } finally { + t.release(0); + } + if (len != 0) { + if (oldVal != null) + return (V)oldVal; + break; + } } + else + tab = (Node[])fk; } + else if (onlyIfAbsent && fh == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) // peek while nearby + return (V)fv; else { - boolean validated = false; - boolean checkSize = false; - synchronized (e) { // lock the 1st node of bin list - if (tabAt(tab, i) == e) { - validated = true; // retry if 1st already deleted - for (Node first = e;;) { + Object oldVal = null; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f;; ++len) { + Object ek, ev; if (e.hash == h && - ((ek = e.key) == k || k.equals(ek)) && - (ev = e.val) != null) { + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { oldVal = ev; - if (replace) + if (!onlyIfAbsent) e.val = v; break; } Node last = e; if ((e = e.next) == null) { last.next = new Node(h, k, v, null); - if (last != first || tab.length <= 64) - checkSize = true; + if (len >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); break; } } } } - if (validated) { - if (checkSize && tab.length < MAXIMUM_CAPACITY && - resizing == 0 && counter.sum() >= (long)threshold) - growTable(); + if (len != 0) { + if (oldVal != null) + return (V)oldVal; break; } } } - if (oldVal == null) - counter.increment(); // update counter outside of locks - return oldVal; + addCount(1L, len); + return null; } - /** - * Implementation for the four public remove/replace methods: - * Replaces node value with v, conditional upon match of cv if - * non-null. If resulting value is null, delete. - */ - private final Object internalReplace(Object k, Object v, Object cv) { + /** Implementation for computeIfAbsent */ + @SuppressWarnings("unchecked") private final V internalComputeIfAbsent + (K k, Fun mf) { + if (k == null || mf == null) + throw new NullPointerException(); int h = spread(k.hashCode()); + Object val = null; + int len = 0; for (Node[] tab = table;;) { - Node e; int i; - if (tab == null || - (e = tabAt(tab, i = (tab.length - 1) & h)) == null) - return null; - else if (e.hash < 0) - tab = (Node[])e.key; + Node f; int i; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node node = new Node(h, k, null, null); + synchronized (node) { + if (casTabAt(tab, i, null, node)) { + len = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + } + } + } + if (len != 0) + break; + } + else if (f.hash < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + len = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + len = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (len != 0) { + if (!added) + return (V)val; + break; + } + } + else + tab = (Node[])fk; + } else { - Object oldVal = null; - boolean validated = false; - boolean deleted = false; - synchronized (e) { - if (tabAt(tab, i) == e) { - validated = true; - Node pred = null; - do { + for (Node e = f; e != null; e = e.next) { // prescan + Object ek, ev; + if (e.hash == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return (V)ev; + } + boolean added = false; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f;; ++len) { Object ek, ev; if (e.hash == h && - ((ek = e.key) == k || k.equals(ek)) && - ((ev = e.val) != null)) { - if (cv == null || cv == ev || cv.equals(ev)) { - oldVal = ev; - if ((e.val = v) == null) { - deleted = true; - Node en = e.next; - if (pred != null) - pred.next = en; - else - setTabAt(tab, i, en); - } + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (len >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); } break; } - } while ((e = (pred = e).next) != null); + } } } - if (validated) { - if (deleted) - counter.decrement(); - return oldVal; + if (len != 0) { + if (!added) + return (V)val; + break; } } } + if (val != null) + addCount(1L, len); + return (V)val; } - /** Implementation for computeIfAbsent and compute. Like put, but messier. */ - @SuppressWarnings("unchecked") - private final V internalCompute(K k, - MappingFunction f, - boolean replace) { + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final V internalCompute + (K k, boolean onlyIfPresent, + BiFun mf) { + if (k == null || mf == null) + throw new NullPointerException(); int h = spread(k.hashCode()); - V val = null; - boolean added = false; - Node[] tab = table; - outer:for (;;) { - Node e; int i; Object ek, ev; + Object val = null; + int delta = 0; + int len = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; if (tab == null) - tab = growTable(); - else if ((e = tabAt(tab, i = (tab.length - 1) & h)) == null) { + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; Node node = new Node(h, k, null, null); - boolean validated = false; - synchronized (node) { // must lock while computing value + synchronized (node) { if (casTabAt(tab, i, null, node)) { - validated = true; try { - val = f.map(k); - if (val != null) { + len = 1; + if ((val = mf.apply(k, null)) != null) { node.val = val; - added = true; + delta = 1; } } finally { - if (!added) + if (delta == 0) setTabAt(tab, i, null); } } } - if (validated) + if (len != 0) break; } - else if (e.hash < 0) - tab = (Node[])e.key; - else if (!replace && e.hash == h && (ev = e.val) != null && - ((ek = e.key) == k || k.equals(ek))) { - if (tabAt(tab, i) == e) { - val = (V)ev; - break; + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + len = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p == null && onlyIfPresent) + break; + Object pv = (p == null) ? null : p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + len = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (len != 0) + break; } + else + tab = (Node[])fk; } - else if (Thread.holdsLock(e)) - throw new IllegalStateException("Recursive map computation"); else { - boolean validated = false; - boolean checkSize = false; - synchronized (e) { - if (tabAt(tab, i) == e) { - validated = true; - for (Node first = e;;) { + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f, pred = null;; ++len) { + Object ek, ev; if (e.hash == h && - ((ek = e.key) == k || k.equals(ek)) && - ((ev = e.val) != null)) { - Object fv; - if (replace && (fv = f.map(k)) != null) - ev = e.val = fv; - val = (V)ev; + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } break; } - Node last = e; + pred = e; if ((e = e.next) == null) { - if ((val = f.map(k)) != null) { - last.next = new Node(h, k, val, null); - added = true; - if (last != first || tab.length <= 64) - checkSize = true; + if (!onlyIfPresent && + (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (len >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); } break; } } } } - if (validated) { - if (checkSize && tab.length < MAXIMUM_CAPACITY && - resizing == 0 && counter.sum() >= (long)threshold) - growTable(); + if (len != 0) + break; + } + } + if (delta != 0) + addCount((long)delta, len); + return (V)val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final V internalMerge + (K k, V v, BiFun mf) { + if (k == null || v == null || mf == null) + throw new NullPointerException(); + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int len = 0; + for (Node[] tab = table;;) { + int i; Node f; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if (f.hash < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + len = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + len = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (len != 0) + break; + } + else + tab = (Node[])fk; + } + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f, pred = null;; ++len) { + Object ek, ev; + if (e.hash == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (len >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } + if (len != 0) + break; + } + } + if (delta != 0) + addCount((long)delta, len); + return (V)val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; break; } + int h = spread(k.hashCode()); + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (Node[])fk; + } + else { + int len = 0; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f;; ++len) { + Object ek, ev; + if (e.hash == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (len >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } + if (len != 0) { + if (len > 1) + addCount(delta, len); + break; + } + } + } } + } finally { + if (delta != 0L) + addCount(delta, 2); } - if (added) - counter.increment(); - return val; + if (npe) + throw new NullPointerException(); } /** - * Implementation for clear. Steps through each bin, removing all nodes. + * Implementation for clear. Steps through each bin, removing all + * nodes. */ private final void internalClear() { long delta = 0L; // negative number of deletions int i = 0; Node[] tab = table; while (tab != null && i < tab.length) { - Node e = tabAt(tab, i); - if (e == null) + Node f = tabAt(tab, i); + if (f == null) ++i; - else if (e.hash < 0) - tab = (Node[])e.key; + else if (f.hash < 0) { + Object fk; + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (Node[])fk; + } else { - boolean validated = false; - synchronized (e) { - if (tabAt(tab, i) == e) { - validated = true; - Node en; - do { - en = e.next; - if (e.val != null) { // currently always true + synchronized (f) { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) e.val = null; --delta; } - } while ((e = en) != null); + } setTabAt(tab, i, null); + ++i; + } + } + } + } + if (delta != 0L) + addCount(delta, -1); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * Adds to count, and if table is too small and not already + * resizing, initiates transfer. If already resizing, helps + * perform transfer if work is available. Rechecks occupancy + * after a transfer to see if another resize is already needed + * because resizings are lagging additions. + * + * @param x the count to add + * @param check if <0, don't check resize, if <= 1 only check if uncontended + */ + private final void addCount(long x, int check) { + CounterCell[] as; long b, s; + if ((as = counterCells) != null || + !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { + CounterHashCode hc; CounterCell a; long v; int m; + boolean uncontended = true; + if ((hc = threadCounterHashCode.get()) == null || + as == null || (m = as.length - 1) < 0 || + (a = as[m & hc.code]) == null || + !(uncontended = + U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { + fullAddCount(x, hc, uncontended); + return; + } + if (check <= 1) + return; + s = sumCount(); + } + if (check >= 0) { + Node[] tab, nt; int sc; + while (s >= (long)(sc = sizeCtl) && (tab = table) != null && + tab.length < MAXIMUM_CAPACITY) { + if (sc < 0) { + if (sc == -1 || transferIndex <= transferOrigin || + (nt = nextTable) == null) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) + transfer(tab, nt); + } + else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) + transfer(tab, null); + s = sumCount(); + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if (table == tab) { + table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (tab == table && + U.compareAndSwapInt(this, SIZECTL, sc, -2)) + transfer(tab, null); + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + */ + private final void transfer(Node[] tab, Node[] nextTab) { + int n = tab.length, stride; + if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) + stride = MIN_TRANSFER_STRIDE; // subdivide range + if (nextTab == null) { // initiating + try { + nextTab = new Node[n << 1]; + } catch (Throwable ex) { // try to cope with OOME + sizeCtl = Integer.MAX_VALUE; + return; + } + nextTable = nextTab; + transferOrigin = n; + transferIndex = n; + Node rev = new Node(MOVED, tab, null, null); + for (int k = n; k > 0;) { // progressively reveal ready slots + int nextk = (k > stride) ? k - stride : 0; + for (int m = nextk; m < k; ++m) + nextTab[m] = rev; + for (int m = n + nextk; m < n + k; ++m) + nextTab[m] = rev; + U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); + } + } + int nextn = nextTab.length; + Node fwd = new Node(MOVED, nextTab, null, null); + boolean advance = true; + for (int i = 0, bound = 0;;) { + int nextIndex, nextBound; Node f; Object fk; + while (advance) { + if (--i >= bound) + advance = false; + else if ((nextIndex = transferIndex) <= transferOrigin) { + i = -1; + advance = false; + } + else if (U.compareAndSwapInt + (this, TRANSFERINDEX, nextIndex, + nextBound = (nextIndex > stride ? + nextIndex - stride : 0))) { + bound = nextBound; + i = nextIndex - 1; + advance = false; + } + } + if (i < 0 || i >= n || i + n >= nextn) { + for (int sc;;) { + if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { + if (sc == -1) { + nextTable = null; + table = nextTab; + sizeCtl = (n << 1) - (n >>> 1); + } + return; + } + } + } + else if ((f = tabAt(tab, i)) == null) { + if (casTabAt(tab, i, null, fwd)) { + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + advance = true; + } + } + else if (f.hash >= 0) { + synchronized (f) { + if (tabAt(tab, i) == f) { + int runBit = f.hash & n; + Node lastRun = f, lo = null, hi = null; + for (Node p = f.next; p != null; p = p.next) { + int b = p.hash & n; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = f; p != lastRun; p = p.next) { + int ph = p.hash; + Object pk = p.key, pv = p.val; + if ((ph & n) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + n, hi); + setTabAt(tab, i, fwd); + advance = true; + } + } + } + else if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash; + Object k = e.key, v = e.val; + if ((h & n) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc < TREE_THRESHOLD) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc < TREE_THRESHOLD) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + n, hn); + setTabAt(tab, i, fwd); + advance = true; + } + } finally { + t.release(0); + } + } + else + advance = true; // already processed + } + } + + /* ---------------- Counter support -------------- */ + + final long sumCount() { + CounterCell[] as = counterCells; CounterCell a; + long sum = baseCount; + if (as != null) { + for (int i = 0; i < as.length; ++i) { + if ((a = as[i]) != null) + sum += a.value; + } + } + return sum; + } + + // See LongAdder version for explanation + private final void fullAddCount(long x, CounterHashCode hc, + boolean wasUncontended) { + int h; + if (hc == null) { + hc = new CounterHashCode(); + int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT); + h = hc.code = (s == 0) ? 1 : s; // Avoid zero + threadCounterHashCode.set(hc); + } + else + h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + CounterCell[] as; CounterCell a; int n; long v; + if ((as = counterCells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (counterBusy == 0) { // Try to attach new Cell + CounterCell r = new CounterCell(x); // Optimistic create + if (counterBusy == 0 && + U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) { + boolean created = false; + try { // Recheck under lock + CounterCell[] rs; int m, j; + if ((rs = counterCells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + counterBusy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } } + collide = false; } - if (validated) - ++i; + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) + break; + else if (counterCells != as || n >= NCPU) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (counterBusy == 0 && + U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) { + try { + if (counterCells == as) {// Expand table unless stale + CounterCell[] rs = new CounterCell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + counterCells = rs; + } + } finally { + counterBusy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; } + else if (counterBusy == 0 && counterCells == as && + U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) { + boolean init = false; + try { // Initialize table + if (counterCells == as) { + CounterCell[] rs = new CounterCell[2]; + rs[h & 1] = new CounterCell(x); + counterCells = rs; + init = true; + } + } finally { + counterBusy = 0; + } + if (init) + break; + } + else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) + break; // Fall back on using base } - counter.add(delta); + hc.code = h; // Record index for next time } /* ----------------Table Traversal -------------- */ /** * Encapsulates traversal for methods such as containsValue; also - * serves as a base class for other iterators. + * serves as a base class for other iterators and bulk tasks. * * At each step, the iterator snapshots the key ("nextKey") and * value ("nextVal") of a valid node (i.e., one that, at point of - * snapshot, has a nonnull user value). Because val fields can + * snapshot, has a non-null user value). Because val fields can * change (including to null, indicating deletion), field nextVal * might not be accurate at point of use, but still maintains the * weak consistency property of holding a value that was once - * valid. + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. * * Internal traversals directly access these fields, as in: - * {@code while (it.next != null) { process(nextKey); it.advance(); }} + * {@code while (it.advance() != null) { process(it.nextKey); }} * - * Exported iterators (subclasses of ViewIterator) extract key, - * value, or key-value pairs as return values of Iterator.next(), - * and encapsulate the it.next check as hasNext(); - * - * The iterator visits each valid node that was reachable upon - * iterator construction once. It might miss some that were added - * to a bin after the bin was visited, which is OK wrt consistency - * guarantees. Maintaining this property in the face of possible - * ongoing resizes requires a fair amount of bookkeeping state - * that is difficult to optimize away amidst volatile accesses. - * Even so, traversal maintains reasonable throughput. + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. * * Normally, iteration proceeds bin-by-bin traversing lists. * However, if the table has been resized, then all future steps @@ -793,69 +2260,139 @@ public class ConcurrentHashMapV8 * across threads, iteration terminates if a bounds checks fails * for a table read. * - * The range-based constructor enables creation of parallel - * range-splitting traversals. (Not yet implemented.) + * This class extends CountedCompleter to streamline parallel + * iteration in bulk operations. This adds only a few fields of + * space overhead, which is small enough in cases where it is not + * needed to not worry about it. Because CountedCompleter is + * Serializable, but iterators need not be, we need to add warning + * suppressions. */ - static class InternalIterator { + @SuppressWarnings("serial") static class Traverser + extends CountedCompleter { + final ConcurrentHashMapV8 map; Node next; // the next entry to use - Node last; // the last entry used Object nextKey; // cached key field of next Object nextVal; // cached val field of next Node[] tab; // current table; updated if resized int index; // index of bin to use next int baseIndex; // current index of initial table - final int baseLimit; // index bound for initial table - final int baseSize; // initial table size + int baseLimit; // index bound for initial table + int baseSize; // initial table size + int batch; // split control /** Creates iterator for all entries in the table. */ - InternalIterator(Node[] tab) { - this.tab = tab; - baseLimit = baseSize = (tab == null) ? 0 : tab.length; - index = baseIndex = 0; - next = null; - advance(); - } - - /** Creates iterator for the given range of the table */ - InternalIterator(Node[] tab, int lo, int hi) { - this.tab = tab; - baseSize = (tab == null) ? 0 : tab.length; - baseLimit = (hi <= baseSize) ? hi : baseSize; - index = baseIndex = lo; - next = null; - advance(); - } - - /** Advances next. See above for explanation. */ - final void advance() { - Node e = last = next; + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods and task constructors */ + Traverser(ConcurrentHashMapV8 map, Traverser it, int batch) { + super(it); + this.batch = batch; + if ((this.map = map) != null && it != null) { // split parent + Node[] t; + if ((t = it.tab) == null && + (t = it.tab = map.table) != null) + it.baseLimit = it.baseSize = t.length; + this.tab = t; + this.baseSize = it.baseSize; + int hi = this.baseLimit = it.baseLimit; + it.baseLimit = this.index = this.baseIndex = + (hi + it.baseIndex + 1) >>> 1; + } + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final Object advance() { + Node e = next; + Object ev = null; outer: do { - if (e != null) // pass used or skipped node + if (e != null) // advance past used/skipped node e = e.next; - while (e == null) { // get to next non-null bin - Node[] t; int b, i, n; // checks must use locals - if ((b = baseIndex) >= baseLimit || (i = index) < 0 || - (t = tab) == null || i >= (n = t.length)) + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + Node[] t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length; + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length; + else break outer; - else if ((e = tabAt(t, i)) != null && e.hash < 0) - tab = (Node[])e.key; // restarts due to null val - else // visit upper slots if present - index = (i += baseSize) < n ? i : (baseIndex = b + 1); + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash < 0) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); } nextKey = e.key; - } while ((nextVal = e.val) == null); // skip deleted or special nodes + } while ((ev = e.val) == null); // skip deleted or special nodes next = e; + return nextVal = ev; } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + + public void compute() { } // default no-op CountedCompleter body + + /** + * Returns a batch value > 0 if this task should (and must) be + * split, if so, adding to pending count, and in any case + * updating batch value. The initial batch value is approx + * exp2 of the number of times (minus one) to split task by + * two before executing leaf action. This value is faster to + * compute and more convenient to use as a guide to splitting + * than is the depth, since it is used while dividing by two + * anyway. + */ + final int preSplit() { + ConcurrentHashMapV8 m; int b; Node[] t; ForkJoinPool pool; + if ((b = batch) < 0 && (m = map) != null) { // force initialization + if ((t = tab) == null && (t = tab = m.table) != null) + baseLimit = baseSize = t.length; + if (t != null) { + long n = m.sumCount(); + int par = ((pool = getPool()) == null) ? + ForkJoinPool.getCommonPoolParallelism() : + pool.getParallelism(); + int sp = par << 3; // slack of 8 + b = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp; + } + } + b = (b <= 1 || baseIndex == baseLimit) ? 0 : (b >>> 1); + if ((batch = b) > 0) + addToPendingCount(1); + return b; + } + } /* ---------------- Public operations -------------- */ /** - * Creates a new, empty map with the default initial table size (16), + * Creates a new, empty map with the default initial table size (16). */ public ConcurrentHashMapV8() { - this.counter = new LongAdder(); - this.targetCapacity = DEFAULT_CAPACITY; } /** @@ -874,8 +2411,7 @@ public class ConcurrentHashMapV8 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); - this.counter = new LongAdder(); - this.targetCapacity = cap; + this.sizeCtl = cap; } /** @@ -884,9 +2420,8 @@ public class ConcurrentHashMapV8 * @param m the map */ public ConcurrentHashMapV8(Map m) { - this.counter = new LongAdder(); - this.targetCapacity = DEFAULT_CAPACITY; - putAll(m); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); } /** @@ -901,6 +2436,8 @@ public class ConcurrentHashMapV8 * establishing the initial table size * @throws IllegalArgumentException if the initial capacity of * elements is negative or the load factor is nonpositive + * + * @since 1.6 */ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, 1); @@ -931,30 +2468,69 @@ public class ConcurrentHashMapV8 if (initialCapacity < concurrencyLevel) // Use at least as many bins initialCapacity = concurrencyLevel; // as estimated threads long size = (long)(1.0 + (long)initialCapacity / loadFactor); - int cap = ((size >= (long)MAXIMUM_CAPACITY) ? - MAXIMUM_CAPACITY: tableSizeFor((int)size)); - this.counter = new LongAdder(); - this.targetCapacity = cap; + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView + (new ConcurrentHashMapV8(initialCapacity), Boolean.TRUE); } /** * {@inheritDoc} */ public boolean isEmpty() { - return counter.sum() <= 0L; // ignore transient negative values + return sumCount() <= 0L; // ignore transient negative values } /** * {@inheritDoc} */ public int size() { - long n = counter.sum(); + long n = sumCount(); return ((n < 0L) ? 0 : (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int)n); } /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is an estimate; the actual count may differ if + * there are concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = sumCount(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** * Returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * @@ -965,11 +2541,23 @@ public class ConcurrentHashMapV8 * * @throws NullPointerException if the specified key is null */ - @SuppressWarnings("unchecked") public V get(Object key) { - if (key == null) - throw new NullPointerException(); - return (V)internalGet(key); + return internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + public V getValueOrDefault(Object key, V defaultValue) { + V v; + return (v = internalGet(key)) == null ? defaultValue : v; } /** @@ -982,8 +2570,6 @@ public class ConcurrentHashMapV8 * @throws NullPointerException if the specified key is null */ public boolean containsKey(Object key) { - if (key == null) - throw new NullPointerException(); return internalGet(key) != null; } @@ -1001,11 +2587,10 @@ public class ConcurrentHashMapV8 if (value == null) throw new NullPointerException(); Object v; - InternalIterator it = new InternalIterator(table); - while (it.next != null) { - if ((v = it.nextVal) == value || value.equals(v)) + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) return true; - it.advance(); } return false; } @@ -1033,7 +2618,7 @@ public class ConcurrentHashMapV8 * Maps the specified key to the specified value in this table. * Neither the key nor the value can be null. * - *

The value can be retrieved by calling the {@code get} method + *

The value can be retrieved by calling the {@code get} method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated @@ -1042,11 +2627,8 @@ public class ConcurrentHashMapV8 * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key or value is null */ - @SuppressWarnings("unchecked") public V put(K key, V value) { - if (key == null || value == null) - throw new NullPointerException(); - return (V)internalPut(key, value, true); + return internalPut(key, value, false); } /** @@ -1056,11 +2638,8 @@ public class ConcurrentHashMapV8 * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ - @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { - if (key == null || value == null) - throw new NullPointerException(); - return (V)internalPut(key, value, false); + return internalPut(key, value, true); } /** @@ -1071,51 +2650,40 @@ public class ConcurrentHashMapV8 * @param m mappings to be stored in this map */ public void putAll(Map m) { - if (m == null) - throw new NullPointerException(); - /* - * If uninitialized, try to adjust targetCapacity to - * accommodate the given number of elements. - */ - if (table == null) { - int size = m.size(); - int cap = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : - tableSizeFor(size + (size >>> 1) + 1); - if (cap > targetCapacity) - targetCapacity = cap; - } - for (Map.Entry e : m.entrySet()) - put(e.getKey(), e.getValue()); + internalPutAll(m); } /** * If the specified key is not already associated with a value, - * computes its value using the given mappingFunction, and if - * non-null, enters it into the map. This is equivalent to - *

 {@code
+     * computes its value using the given mappingFunction and enters
+     * it into the map unless null.  This is equivalent to
+     * 
 {@code
      * if (map.containsKey(key))
      *   return map.get(key);
-     * value = mappingFunction.map(key);
+     * value = mappingFunction.apply(key);
      * if (value != null)
      *   map.put(key, value);
      * return value;}
* - * except that the action is performed atomically. Some attempted - * update operations on this map by other threads may be blocked - * while computation is in progress, so the computation should be - * short and simple, and must not attempt to update any other - * mappings of this Map. The most appropriate usage is to + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to * construct a new object serving as an initial mapped value, or * memoized result, as in: + * *
 {@code
-     * map.computeIfAbsent(key, new MappingFunction() {
+     * map.computeIfAbsent(key, new Fun() {
      *   public V map(K k) { return new Value(f(k)); }});}
* * @param key key with which the specified value is to be associated * @param mappingFunction the function to compute a value * @return the current (existing or computed) value associated with - * the specified key, or {@code null} if the computation - * returned {@code null} + * the specified key, or null if the computed value is null * @throws NullPointerException if the specified key or mappingFunction * is null * @throws IllegalStateException if the computation detectably @@ -1124,47 +2692,124 @@ public class ConcurrentHashMapV8 * @throws RuntimeException or Error if the mappingFunction does so, * in which case the mapping is left unestablished */ - public V computeIfAbsent(K key, MappingFunction mappingFunction) { - if (key == null || mappingFunction == null) - throw new NullPointerException(); - return internalCompute(key, mappingFunction, false); + public V computeIfAbsent + (K key, Fun mappingFunction) { + return internalComputeIfAbsent(key, mappingFunction); } /** - * Computes the value associated with the given key using the given - * mappingFunction, and if non-null, enters it into the map. This - * is equivalent to + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to *
 {@code
-     * value = mappingFunction.map(key);
-     * if (value != null)
-     *   map.put(key, value);
-     * else
-     *   value = map.get(key);
-     * return value;}
+ * if (map.containsKey(key)) { + * value = remappingFunction.apply(key, map.get(key)); + * if (value != null) + * map.put(key, value); + * else + * map.remove(key); + * } + * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: * - * except that the action is performed atomically. Some attempted - * update operations on this map by other threads may be blocked - * while computation is in progress, so the computation should be - * short and simple, and must not attempt to update any other - * mappings of this Map. + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V computeIfPresent + (K key, BiFun remappingFunction) { + return internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
* * @param key key with which the specified value is to be associated - * @param mappingFunction the function to compute a value - * @return the current value associated with - * the specified key, or {@code null} if the computation - * returned {@code null} and the value was not otherwise present - * @throws NullPointerException if the specified key or mappingFunction + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete - * @throws RuntimeException or Error if the mappingFunction does so, + * @throws RuntimeException or Error if the remappingFunction does so, * in which case the mapping is unchanged */ - public V compute(K key, MappingFunction mappingFunction) { - if (key == null || mappingFunction == null) - throw new NullPointerException(); - return internalCompute(key, mappingFunction, true); + public V compute + (K key, BiFun remappingFunction) { + return internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + public V merge + (K key, V value, + BiFun remappingFunction) { + return internalMerge(key, value, remappingFunction); } /** @@ -1176,11 +2821,8 @@ public class ConcurrentHashMapV8 * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key is null */ - @SuppressWarnings("unchecked") public V remove(Object key) { - if (key == null) - throw new NullPointerException(); - return (V)internalReplace(key, null, null); + return internalReplace(key, null, null); } /** @@ -1189,11 +2831,7 @@ public class ConcurrentHashMapV8 * @throws NullPointerException if the specified key is null */ public boolean remove(Object key, Object value) { - if (key == null) - throw new NullPointerException(); - if (value == null) - return false; - return internalReplace(key, null, value) != null; + return value != null && internalReplace(key, null, value) != null; } /** @@ -1214,11 +2852,10 @@ public class ConcurrentHashMapV8 * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ - @SuppressWarnings("unchecked") public V replace(K key, V value) { if (key == null || value == null) throw new NullPointerException(); - return (V)internalReplace(key, value, null); + return internalReplace(key, value, null); } /** @@ -1231,43 +2868,41 @@ public class ConcurrentHashMapV8 /** * Returns a {@link Set} view of the keys contained in this map. * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. The set supports element - * removal, which removes the corresponding mapping from this map, - * via the {@code Iterator.remove}, {@code Set.remove}, - * {@code removeAll}, {@code retainAll}, and {@code clear} - * operations. It does not support the {@code add} or - * {@code addAll} operations. + * reflected in the set, and vice-versa. * - *

The view's {@code iterator} is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null */ - public Set keySet() { - KeySet ks = keySet; - return (ks != null) ? ks : (keySet = new KeySet(this)); + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); } /** * Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are - * reflected in the collection, and vice-versa. The collection - * supports element removal, which removes the corresponding - * mapping from this map, via the {@code Iterator.remove}, - * {@code Collection.remove}, {@code removeAll}, - * {@code retainAll}, and {@code clear} operations. It does not - * support the {@code add} or {@code addAll} operations. - * - *

The view's {@code iterator} is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. + * reflected in the collection, and vice-versa. */ - public Collection values() { - Values vs = values; - return (vs != null) ? vs : (values = new Values(this)); + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); } /** @@ -1287,8 +2922,8 @@ public class ConcurrentHashMapV8 * reflect any modifications subsequent to construction. */ public Set> entrySet() { - EntrySet es = entrySet; - return (es != null) ? es : (entrySet = new EntrySet(this)); + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); } /** @@ -1312,6 +2947,33 @@ public class ConcurrentHashMapV8 } /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** * Returns the hash code value for this {@link Map}, i.e., * the sum of, for each key-value pair in the map, * {@code key.hashCode() ^ value.hashCode()}. @@ -1320,10 +2982,10 @@ public class ConcurrentHashMapV8 */ public int hashCode() { int h = 0; - InternalIterator it = new InternalIterator(table); - while (it.next != null) { - h += it.nextKey.hashCode() ^ it.nextVal.hashCode(); - it.advance(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); } return h; } @@ -1340,17 +3002,17 @@ public class ConcurrentHashMapV8 * @return a string representation of this map */ public String toString() { - InternalIterator it = new InternalIterator(table); + Traverser it = new Traverser(this); StringBuilder sb = new StringBuilder(); sb.append('{'); - if (it.next != null) { + Object v; + if ((v = it.advance()) != null) { for (;;) { - Object k = it.nextKey, v = it.nextVal; + Object k = it.nextKey; sb.append(k == this ? "(this Map)" : k); sb.append('='); sb.append(v == this ? "(this Map)" : v); - it.advance(); - if (it.next == null) + if ((v = it.advance()) == null) break; sb.append(',').append(' '); } @@ -1373,13 +3035,12 @@ public class ConcurrentHashMapV8 if (!(o instanceof Map)) return false; Map m = (Map) o; - InternalIterator it = new InternalIterator(table); - while (it.next != null) { - Object val = it.nextVal; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { Object v = m.get(it.nextKey); if (v == null || (v != val && !v.equals(val))) return false; - it.advance(); } for (Map.Entry e : m.entrySet()) { Object mk, mv, v; @@ -1395,87 +3056,88 @@ public class ConcurrentHashMapV8 /* ----------------Iterators -------------- */ - /** - * Base class for key, value, and entry iterators. Adds a map - * reference to InternalIterator to support Iterator.remove. - */ - static abstract class ViewIterator extends InternalIterator { - final ConcurrentHashMapV8 map; - ViewIterator(ConcurrentHashMapV8 map) { - super(map.table); - this.map = map; + @SuppressWarnings("serial") static final class KeyIterator + extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(ConcurrentHashMapV8 map, Traverser it) { + super(map, it, -1); } - - public final void remove() { - if (last == null) + public KeyIterator split() { + if (nextKey != null) throw new IllegalStateException(); - map.remove(last.key); - last = null; + return new KeyIterator(map, this); } - - public final boolean hasNext() { return next != null; } - public final boolean hasMoreElements() { return next != null; } - } - - static final class KeyIterator extends ViewIterator - implements Iterator, Enumeration { - KeyIterator(ConcurrentHashMapV8 map) { super(map); } - - @SuppressWarnings("unchecked") - public final K next() { - if (next == null) + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) throw new NoSuchElementException(); Object k = nextKey; - advance(); - return (K)k; + nextVal = null; + return (K) k; } public final K nextElement() { return next(); } } - static final class ValueIterator extends ViewIterator - implements Iterator, Enumeration { + @SuppressWarnings("serial") static final class ValueIterator + extends Traverser + implements Spliterator, Enumeration { ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(ConcurrentHashMapV8 map, Traverser it) { + super(map, it, -1); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(map, this); + } - @SuppressWarnings("unchecked") - public final V next() { - if (next == null) + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) throw new NoSuchElementException(); - Object v = nextVal; - advance(); - return (V)v; + nextVal = null; + return (V) v; } public final V nextElement() { return next(); } } - static final class EntryIterator extends ViewIterator - implements Iterator> { + @SuppressWarnings("serial") static final class EntryIterator + extends Traverser + implements Spliterator> { EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(ConcurrentHashMapV8 map, Traverser it) { + super(map, it, -1); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(map, this); + } - @SuppressWarnings("unchecked") - public final Map.Entry next() { - if (next == null) + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) throw new NoSuchElementException(); Object k = nextKey; - Object v = nextVal; - advance(); - return new WriteThroughEntry(map, (K)k, (V)v); + nextVal = null; + return new MapEntry((K)k, (V)v, map); } } /** - * Custom Entry class used by EntryIterator.next(), that relays - * setValue changes to the underlying map. + * Exported Entry for iterators */ - static final class WriteThroughEntry implements Map.Entry { - final ConcurrentHashMapV8 map; + static final class MapEntry implements Map.Entry { final K key; // non-null V val; // non-null - WriteThroughEntry(ConcurrentHashMapV8 map, K key, V val) { - this.map = map; this.key = key; this.val = val; + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; } - public final K getKey() { return key; } public final V getValue() { return val; } public final int hashCode() { return key.hashCode() ^ val.hashCode(); } @@ -1492,12 +3154,11 @@ public class ConcurrentHashMapV8 /** * Sets our entry's value and writes through to the map. The - * value to return is somewhat arbitrary here. Since a - * WriteThroughEntry does not necessarily track asynchronous - * changes, the most recent "previous" value could be - * different from what we return (or could even have been - * removed in which case the put will re-establish). We do not - * and cannot guarantee more. + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. */ public final V setValue(V value) { if (value == null) throw new NullPointerException(); @@ -1508,67 +3169,12 @@ public class ConcurrentHashMapV8 } } - /* ----------------Views -------------- */ - - /* - * These currently just extend java.util.AbstractX classes, but - * may need a new custom base to support partitioned traversal. + /** + * Returns exportable snapshot entry for the given key and value + * when write-through can't or shouldn't be used. */ - - static final class KeySet extends AbstractSet { - final ConcurrentHashMapV8 map; - KeySet(ConcurrentHashMapV8 map) { this.map = map; } - - public final int size() { return map.size(); } - public final boolean isEmpty() { return map.isEmpty(); } - public final void clear() { map.clear(); } - public final boolean contains(Object o) { return map.containsKey(o); } - public final boolean remove(Object o) { return map.remove(o) != null; } - public final Iterator iterator() { - return new KeyIterator(map); - } - } - - static final class Values extends AbstractCollection { - final ConcurrentHashMapV8 map; - Values(ConcurrentHashMapV8 map) { this.map = map; } - - public final int size() { return map.size(); } - public final boolean isEmpty() { return map.isEmpty(); } - public final void clear() { map.clear(); } - public final boolean contains(Object o) { return map.containsValue(o); } - public final Iterator iterator() { - return new ValueIterator(map); - } - } - - static final class EntrySet extends AbstractSet> { - final ConcurrentHashMapV8 map; - EntrySet(ConcurrentHashMapV8 map) { this.map = map; } - - public final int size() { return map.size(); } - public final boolean isEmpty() { return map.isEmpty(); } - public final void clear() { map.clear(); } - public final Iterator> iterator() { - return new EntryIterator(map); - } - - public final boolean contains(Object o) { - Object k, v, r; Map.Entry e; - return ((o instanceof Map.Entry) && - (k = (e = (Map.Entry)o).getKey()) != null && - (r = map.get(k)) != null && - (v = e.getValue()) != null && - (v == r || v.equals(r))); - } - - public final boolean remove(Object o) { - Object k, v; Map.Entry e; - return ((o instanceof Map.Entry) && - (k = (e = (Map.Entry)o).getKey()) != null && - (v = e.getValue()) != null && - map.remove(k, v)); - } + static AbstractMap.SimpleEntry entryFor(K k, V v) { + return new AbstractMap.SimpleEntry(k, v); } /* ---------------- Serialization Support -------------- */ @@ -1592,9 +3198,9 @@ public class ConcurrentHashMapV8 * for each key-value mapping, followed by a null pair. * The key-value mappings are emitted in no particular order. */ - @SuppressWarnings("unchecked") - private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { + @SuppressWarnings("unchecked") private void writeObject + (java.io.ObjectOutputStream s) + throws java.io.IOException { if (segments == null) { // for serialization compatibility segments = (Segment[]) new Segment[DEFAULT_CONCURRENCY_LEVEL]; @@ -1602,11 +3208,11 @@ public class ConcurrentHashMapV8 segments[i] = new Segment(LOAD_FACTOR); } s.defaultWriteObject(); - InternalIterator it = new InternalIterator(table); - while (it.next != null) { + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { s.writeObject(it.nextKey); - s.writeObject(it.nextVal); - it.advance(); + s.writeObject(v); } s.writeObject(null); s.writeObject(null); @@ -1617,14 +3223,11 @@ public class ConcurrentHashMapV8 * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream */ - @SuppressWarnings("unchecked") - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { + @SuppressWarnings("unchecked") private void readObject + (java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { s.defaultReadObject(); this.segments = null; // unneeded - // initalize transient final field - UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder()); - this.targetCapacity = DEFAULT_CAPACITY; // Create all nodes, then place in table once size is known long size = 0L; @@ -1633,7 +3236,8 @@ public class ConcurrentHashMapV8 K k = (K) s.readObject(); V v = (V) s.readObject(); if (k != null && v != null) { - p = new Node(spread(k.hashCode()), k, v, p); + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); ++size; } else @@ -1641,69 +3245,3237 @@ public class ConcurrentHashMapV8 } if (p != null) { boolean init = false; - if (resizing == 0 && - UNSAFE.compareAndSwapInt(this, resizingOffset, 0, 1)) { + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + U.compareAndSwapInt(this, SIZECTL, sc, -1)) { try { if (table == null) { init = true; - int n; - if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) - n = MAXIMUM_CAPACITY; - else { - int sz = (int)size; - n = tableSizeFor(sz + (sz >>> 1) + 1); - } - threshold = n - (n >>> 2) - THRESHOLD_OFFSET; Node[] tab = new Node[n]; int mask = n - 1; while (p != null) { int j = p.hash & mask; Node next = p.next; - p.next = tabAt(tab, j); + Node q = p.next = tabAt(tab, j); setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; p = next; } table = tab; - counter.add(size); + addCount(size, -1); + sc = n - (n >>> 2); } } finally { - resizing = 0; + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } } } if (!init) { // Can only happen if unsafely published. while (p != null) { - internalPut(p.key, p.val, true); + internalPut((K)p.key, (V)p.val, false); p = p.next; } } } } + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Fun { T apply(A a); } + /** Interface describing a function of two arguments */ + public interface BiFun { T apply(A a, B b); } + /** Interface describing a function of no arguments */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + // ------------------------------------------------------- + + /** + * Performs the given action for each (key, value). + * + * @param action the action + */ + public void forEach(BiAction action) { + ForkJoinTasks.forEach + (this, action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each (key, value). + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case the action is not applied). + * @param action the action + */ + public void forEach(BiFun transformer, + Action action) { + ForkJoinTasks.forEach + (this, transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each (key, value), or null if none. Upon + * success, further element processing is suppressed and the + * results of any other parallel invocations of the search + * function are ignored. + * + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each (key, value), or null if none + */ + public U search(BiFun searchFunction) { + return ForkJoinTasks.search + (this, searchFunction).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, or null if none. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + */ + public U reduce(BiFun transformer, + BiFun reducer) { + return ForkJoinTasks.reduce + (this, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + */ + public double reduceToDouble(ObjectByObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + return ForkJoinTasks.reduceToDouble + (this, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + */ + public long reduceToLong(ObjectByObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + return ForkJoinTasks.reduceToLong + (this, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + */ + public int reduceToInt(ObjectByObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + return ForkJoinTasks.reduceToInt + (this, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each key. + * + * @param action the action + */ + public void forEachKey(Action action) { + ForkJoinTasks.forEachKey + (this, action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each key. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case the action is not applied). + * @param action the action + */ + public void forEachKey(Fun transformer, + Action action) { + ForkJoinTasks.forEachKey + (this, transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each key, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each key, or null if none + */ + public U searchKeys(Fun searchFunction) { + return ForkJoinTasks.searchKeys + (this, searchFunction).invoke(); + } + + /** + * Returns the result of accumulating all keys using the given + * reducer to combine values, or null if none. + * + * @param reducer a commutative associative combining function + * @return the result of accumulating all keys using the given + * reducer to combine values, or null if none + */ + public K reduceKeys(BiFun reducer) { + return ForkJoinTasks.reduceKeys + (this, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, or + * null if none. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + */ + public U reduceKeys(Fun transformer, + BiFun reducer) { + return ForkJoinTasks.reduceKeys + (this, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + */ + public double reduceKeysToDouble(ObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + return ForkJoinTasks.reduceKeysToDouble + (this, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + */ + public long reduceKeysToLong(ObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + return ForkJoinTasks.reduceKeysToLong + (this, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + */ + public int reduceKeysToInt(ObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + return ForkJoinTasks.reduceKeysToInt + (this, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each value. + * + * @param action the action + */ + public void forEachValue(Action action) { + ForkJoinTasks.forEachValue + (this, action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each value. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case the action is not applied). + */ + public void forEachValue(Fun transformer, + Action action) { + ForkJoinTasks.forEachValue + (this, transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each value, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each value, or null if none + * + */ + public U searchValues(Fun searchFunction) { + return ForkJoinTasks.searchValues + (this, searchFunction).invoke(); + } + + /** + * Returns the result of accumulating all values using the + * given reducer to combine values, or null if none. + * + * @param reducer a commutative associative combining function + * @return the result of accumulating all values + */ + public V reduceValues(BiFun reducer) { + return ForkJoinTasks.reduceValues + (this, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, or + * null if none. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + */ + public U reduceValues(Fun transformer, + BiFun reducer) { + return ForkJoinTasks.reduceValues + (this, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + */ + public double reduceValuesToDouble(ObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + return ForkJoinTasks.reduceValuesToDouble + (this, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + */ + public long reduceValuesToLong(ObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + return ForkJoinTasks.reduceValuesToLong + (this, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + */ + public int reduceValuesToInt(ObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + return ForkJoinTasks.reduceValuesToInt + (this, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each entry. + * + * @param action the action + */ + public void forEachEntry(Action> action) { + ForkJoinTasks.forEachEntry + (this, action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each entry. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case the action is not applied). + * @param action the action + */ + public void forEachEntry(Fun, ? extends U> transformer, + Action action) { + ForkJoinTasks.forEachEntry + (this, transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each entry, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each entry, or null if none + */ + public U searchEntries(Fun, ? extends U> searchFunction) { + return ForkJoinTasks.searchEntries + (this, searchFunction).invoke(); + } + + /** + * Returns the result of accumulating all entries using the + * given reducer to combine values, or null if none. + * + * @param reducer a commutative associative combining function + * @return the result of accumulating all entries + */ + public Map.Entry reduceEntries(BiFun, Map.Entry, ? extends Map.Entry> reducer) { + return ForkJoinTasks.reduceEntries + (this, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * or null if none. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + */ + public U reduceEntries(Fun, ? extends U> transformer, + BiFun reducer) { + return ForkJoinTasks.reduceEntries + (this, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + */ + public double reduceEntriesToDouble(ObjectToDouble> transformer, + double basis, + DoubleByDoubleToDouble reducer) { + return ForkJoinTasks.reduceEntriesToDouble + (this, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + */ + public long reduceEntriesToLong(ObjectToLong> transformer, + long basis, + LongByLongToLong reducer) { + return ForkJoinTasks.reduceEntriesToLong + (this, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + */ + public int reduceEntriesToInt(ObjectToInt> transformer, + int basis, + IntByIntToInt reducer) { + return ForkJoinTasks.reduceEntriesToInt + (this, transformer, basis, reducer).invoke(); + } + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView + implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPut(e, v, true) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPut(e, v, true) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + + /** + * Performs the given action for each key. + * + * @param action the action + */ + public void forEach(Action action) { + ForkJoinTasks.forEachKey + (map, action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each key. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case the action is not applied). + * @param action the action + */ + public void forEach(Fun transformer, + Action action) { + ForkJoinTasks.forEachKey + (map, transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each key, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each key, or null if none + */ + public U search(Fun searchFunction) { + return ForkJoinTasks.searchKeys + (map, searchFunction).invoke(); + } + + /** + * Returns the result of accumulating all keys using the given + * reducer to combine values, or null if none. + * + * @param reducer a commutative associative combining function + * @return the result of accumulating all keys using the given + * reducer to combine values, or null if none + */ + public K reduce(BiFun reducer) { + return ForkJoinTasks.reduceKeys + (map, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + */ + public double reduceToDouble(ObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + return ForkJoinTasks.reduceKeysToDouble + (map, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + */ + public long reduceToLong(ObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + return ForkJoinTasks.reduceKeysToLong + (map, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + */ + public int reduceToInt(ObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + return ForkJoinTasks.reduceKeysToInt + (map, transformer, basis, reducer).invoke(); + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + + /** + * Performs the given action for each value. + * + * @param action the action + */ + public void forEach(Action action) { + ForkJoinTasks.forEachValue + (map, action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each value. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case the action is not applied). + */ + public void forEach(Fun transformer, + Action action) { + ForkJoinTasks.forEachValue + (map, transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each value, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each value, or null if none + * + */ + public U search(Fun searchFunction) { + return ForkJoinTasks.searchValues + (map, searchFunction).invoke(); + } + + /** + * Returns the result of accumulating all values using the + * given reducer to combine values, or null if none. + * + * @param reducer a commutative associative combining function + * @return the result of accumulating all values + */ + public V reduce(BiFun reducer) { + return ForkJoinTasks.reduceValues + (map, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, or + * null if none. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + */ + public U reduce(Fun transformer, + BiFun reducer) { + return ForkJoinTasks.reduceValues + (map, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + */ + public double reduceToDouble(ObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + return ForkJoinTasks.reduceValuesToDouble + (map, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + */ + public long reduceToLong(ObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + return ForkJoinTasks.reduceValuesToLong + (map, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + */ + public int reduceToInt(ObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + return ForkJoinTasks.reduceValuesToInt + (map, transformer, basis, reducer).invoke(); + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value, false) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + + /** + * Performs the given action for each entry. + * + * @param action the action + */ + public void forEach(Action> action) { + ForkJoinTasks.forEachEntry + (map, action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each entry. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case the action is not applied). + * @param action the action + */ + public void forEach(Fun, ? extends U> transformer, + Action action) { + ForkJoinTasks.forEachEntry + (map, transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each entry, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each entry, or null if none + */ + public U search(Fun, ? extends U> searchFunction) { + return ForkJoinTasks.searchEntries + (map, searchFunction).invoke(); + } + + /** + * Returns the result of accumulating all entries using the + * given reducer to combine values, or null if none. + * + * @param reducer a commutative associative combining function + * @return the result of accumulating all entries + */ + public Map.Entry reduce(BiFun, Map.Entry, ? extends Map.Entry> reducer) { + return ForkJoinTasks.reduceEntries + (map, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * or null if none. + * + * @param transformer a function returning the transformation + * for an element, or null of there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + */ + public U reduce(Fun, ? extends U> transformer, + BiFun reducer) { + return ForkJoinTasks.reduceEntries + (map, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + */ + public double reduceToDouble(ObjectToDouble> transformer, + double basis, + DoubleByDoubleToDouble reducer) { + return ForkJoinTasks.reduceEntriesToDouble + (map, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + */ + public long reduceToLong(ObjectToLong> transformer, + long basis, + LongByLongToLong reducer) { + return ForkJoinTasks.reduceEntriesToLong + (map, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + */ + public int reduceToInt(ObjectToInt> transformer, + int basis, + IntByIntToInt reducer) { + return ForkJoinTasks.reduceEntriesToInt + (map, transformer, basis, reducer).invoke(); + } + + } + + // --------------------------------------------------------------------- + + /** + * Predefined tasks for performing bulk parallel operations on + * ConcurrentHashMapV8s. These tasks follow the forms and rules used + * for bulk operations. Each method has the same name, but returns + * a task rather than invoking it. These methods may be useful in + * custom applications such as submitting a task without waiting + * for completion, using a custom pool, or combining with other + * tasks. + */ + public static class ForkJoinTasks { + private ForkJoinTasks() {} + + /** + * Returns a task that when invoked, performs the given + * action for each (key, value) + * + * @param map the map + * @param action the action + * @return the task + */ + public static ForkJoinTask forEach + (ConcurrentHashMapV8 map, + BiAction action) { + if (action == null) throw new NullPointerException(); + return new ForEachMappingTask(map, null, -1, action); + } + + /** + * Returns a task that when invoked, performs the given + * action for each non-null transformation of each (key, value) + * + * @param map the map + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @return the task + */ + public static ForkJoinTask forEach + (ConcurrentHashMapV8 map, + BiFun transformer, + Action action) { + if (transformer == null || action == null) + throw new NullPointerException(); + return new ForEachTransformedMappingTask + (map, null, -1, transformer, action); + } + + /** + * Returns a task that when invoked, returns a non-null result + * from applying the given search function on each (key, + * value), or null if none. Upon success, further element + * processing is suppressed and the results of any other + * parallel invocations of the search function are ignored. + * + * @param map the map + * @param searchFunction a function returning a non-null + * result on success, else null + * @return the task + */ + public static ForkJoinTask search + (ConcurrentHashMapV8 map, + BiFun searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchMappingsTask + (map, null, -1, searchFunction, + new AtomicReference()); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all (key, value) pairs + * using the given reducer to combine values, or null if none. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduce + (ConcurrentHashMapV8 map, + BiFun transformer, + BiFun reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsTask + (map, null, -1, null, transformer, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all (key, value) pairs + * using the given reducer to combine values, and the given + * basis as an identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceToDouble + (ConcurrentHashMapV8 map, + ObjectByObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToDoubleTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all (key, value) pairs + * using the given reducer to combine values, and the given + * basis as an identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceToLong + (ConcurrentHashMapV8 map, + ObjectByObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToLongTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all (key, value) pairs + * using the given reducer to combine values, and the given + * basis as an identity value. + * + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceToInt + (ConcurrentHashMapV8 map, + ObjectByObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToIntTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, performs the given action + * for each key. + * + * @param map the map + * @param action the action + * @return the task + */ + public static ForkJoinTask forEachKey + (ConcurrentHashMapV8 map, + Action action) { + if (action == null) throw new NullPointerException(); + return new ForEachKeyTask(map, null, -1, action); + } + + /** + * Returns a task that when invoked, performs the given action + * for each non-null transformation of each key. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @return the task + */ + public static ForkJoinTask forEachKey + (ConcurrentHashMapV8 map, + Fun transformer, + Action action) { + if (transformer == null || action == null) + throw new NullPointerException(); + return new ForEachTransformedKeyTask + (map, null, -1, transformer, action); + } + + /** + * Returns a task that when invoked, returns a non-null result + * from applying the given search function on each key, or + * null if none. Upon success, further element processing is + * suppressed and the results of any other parallel + * invocations of the search function are ignored. + * + * @param map the map + * @param searchFunction a function returning a non-null + * result on success, else null + * @return the task + */ + public static ForkJoinTask searchKeys + (ConcurrentHashMapV8 map, + Fun searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchKeysTask + (map, null, -1, searchFunction, + new AtomicReference()); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating all keys using the given reducer to combine + * values, or null if none. + * + * @param map the map + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceKeys + (ConcurrentHashMapV8 map, + BiFun reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceKeysTask + (map, null, -1, null, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all keys using the given + * reducer to combine values, or null if none. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceKeys + (ConcurrentHashMapV8 map, + Fun transformer, + BiFun reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysTask + (map, null, -1, null, transformer, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all keys using the given + * reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceKeysToDouble + (ConcurrentHashMapV8 map, + ObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToDoubleTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all keys using the given + * reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceKeysToLong + (ConcurrentHashMapV8 map, + ObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToLongTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all keys using the given + * reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceKeysToInt + (ConcurrentHashMapV8 map, + ObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToIntTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, performs the given action + * for each value. + * + * @param map the map + * @param action the action + */ + public static ForkJoinTask forEachValue + (ConcurrentHashMapV8 map, + Action action) { + if (action == null) throw new NullPointerException(); + return new ForEachValueTask(map, null, -1, action); + } + + /** + * Returns a task that when invoked, performs the given action + * for each non-null transformation of each value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + */ + public static ForkJoinTask forEachValue + (ConcurrentHashMapV8 map, + Fun transformer, + Action action) { + if (transformer == null || action == null) + throw new NullPointerException(); + return new ForEachTransformedValueTask + (map, null, -1, transformer, action); + } + + /** + * Returns a task that when invoked, returns a non-null result + * from applying the given search function on each value, or + * null if none. Upon success, further element processing is + * suppressed and the results of any other parallel + * invocations of the search function are ignored. + * + * @param map the map + * @param searchFunction a function returning a non-null + * result on success, else null + * @return the task + */ + public static ForkJoinTask searchValues + (ConcurrentHashMapV8 map, + Fun searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchValuesTask + (map, null, -1, searchFunction, + new AtomicReference()); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating all values using the given reducer to combine + * values, or null if none. + * + * @param map the map + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceValues + (ConcurrentHashMapV8 map, + BiFun reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceValuesTask + (map, null, -1, null, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all values using the + * given reducer to combine values, or null if none. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceValues + (ConcurrentHashMapV8 map, + Fun transformer, + BiFun reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesTask + (map, null, -1, null, transformer, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all values using the + * given reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceValuesToDouble + (ConcurrentHashMapV8 map, + ObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToDoubleTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all values using the + * given reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceValuesToLong + (ConcurrentHashMapV8 map, + ObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToLongTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all values using the + * given reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceValuesToInt + (ConcurrentHashMapV8 map, + ObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToIntTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, perform the given action + * for each entry. + * + * @param map the map + * @param action the action + */ + public static ForkJoinTask forEachEntry + (ConcurrentHashMapV8 map, + Action> action) { + if (action == null) throw new NullPointerException(); + return new ForEachEntryTask(map, null, -1, action); + } + + /** + * Returns a task that when invoked, perform the given action + * for each non-null transformation of each entry. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + */ + public static ForkJoinTask forEachEntry + (ConcurrentHashMapV8 map, + Fun, ? extends U> transformer, + Action action) { + if (transformer == null || action == null) + throw new NullPointerException(); + return new ForEachTransformedEntryTask + (map, null, -1, transformer, action); + } + + /** + * Returns a task that when invoked, returns a non-null result + * from applying the given search function on each entry, or + * null if none. Upon success, further element processing is + * suppressed and the results of any other parallel + * invocations of the search function are ignored. + * + * @param map the map + * @param searchFunction a function returning a non-null + * result on success, else null + * @return the task + */ + public static ForkJoinTask searchEntries + (ConcurrentHashMapV8 map, + Fun, ? extends U> searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchEntriesTask + (map, null, -1, searchFunction, + new AtomicReference()); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating all entries using the given reducer to combine + * values, or null if none. + * + * @param map the map + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask> reduceEntries + (ConcurrentHashMapV8 map, + BiFun, Map.Entry, ? extends Map.Entry> reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceEntriesTask + (map, null, -1, null, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all entries using the + * given reducer to combine values, or null if none. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined). + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceEntries + (ConcurrentHashMapV8 map, + Fun, ? extends U> transformer, + BiFun reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesTask + (map, null, -1, null, transformer, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all entries using the + * given reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceEntriesToDouble + (ConcurrentHashMapV8 map, + ObjectToDouble> transformer, + double basis, + DoubleByDoubleToDouble reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToDoubleTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all entries using the + * given reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceEntriesToLong + (ConcurrentHashMapV8 map, + ObjectToLong> transformer, + long basis, + LongByLongToLong reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToLongTask + (map, null, -1, null, transformer, basis, reducer); + } + + /** + * Returns a task that when invoked, returns the result of + * accumulating the given transformation of all entries using the + * given reducer to combine values, and the given basis as an + * identity value. + * + * @param map the map + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the task + */ + public static ForkJoinTask reduceEntriesToInt + (ConcurrentHashMapV8 map, + ObjectToInt> transformer, + int basis, + IntByIntToInt reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToIntTask + (map, null, -1, null, transformer, basis, reducer); + } + } + + // ------------------------------------------------------- + + /* + * Task classes. Coded in a regular but ugly format/style to + * simplify checks that each variant differs in the right way from + * others. The null screenings exist because compilers cannot tell + * that we've already null-checked task arguments, so we force + * simplest hoisted bypass to help avoid convoluted traps. + */ + + @SuppressWarnings("serial") static final class ForEachKeyTask + extends Traverser { + final Action action; + ForEachKeyTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Action action) { + super(m, p, b); + this.action = action; + } + @SuppressWarnings("unchecked") public final void compute() { + final Action action; + if ((action = this.action) != null) { + for (int b; (b = preSplit()) > 0;) + new ForEachKeyTask(map, this, b, action).fork(); + while (advance() != null) + action.apply((K)nextKey); + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") static final class ForEachValueTask + extends Traverser { + final Action action; + ForEachValueTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Action action) { + super(m, p, b); + this.action = action; + } + @SuppressWarnings("unchecked") public final void compute() { + final Action action; + if ((action = this.action) != null) { + for (int b; (b = preSplit()) > 0;) + new ForEachValueTask(map, this, b, action).fork(); + Object v; + while ((v = advance()) != null) + action.apply((V)v); + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") static final class ForEachEntryTask + extends Traverser { + final Action> action; + ForEachEntryTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Action> action) { + super(m, p, b); + this.action = action; + } + @SuppressWarnings("unchecked") public final void compute() { + final Action> action; + if ((action = this.action) != null) { + for (int b; (b = preSplit()) > 0;) + new ForEachEntryTask(map, this, b, action).fork(); + Object v; + while ((v = advance()) != null) + action.apply(entryFor((K)nextKey, (V)v)); + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") static final class ForEachMappingTask + extends Traverser { + final BiAction action; + ForEachMappingTask + (ConcurrentHashMapV8 m, Traverser p, int b, + BiAction action) { + super(m, p, b); + this.action = action; + } + @SuppressWarnings("unchecked") public final void compute() { + final BiAction action; + if ((action = this.action) != null) { + for (int b; (b = preSplit()) > 0;) + new ForEachMappingTask(map, this, b, action).fork(); + Object v; + while ((v = advance()) != null) + action.apply((K)nextKey, (V)v); + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") static final class ForEachTransformedKeyTask + extends Traverser { + final Fun transformer; + final Action action; + ForEachTransformedKeyTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Fun transformer, Action action) { + super(m, p, b); + this.transformer = transformer; this.action = action; + } + @SuppressWarnings("unchecked") public final void compute() { + final Fun transformer; + final Action action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int b; (b = preSplit()) > 0;) + new ForEachTransformedKeyTask + (map, this, b, transformer, action).fork(); + U u; + while (advance() != null) { + if ((u = transformer.apply((K)nextKey)) != null) + action.apply(u); + } + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") static final class ForEachTransformedValueTask + extends Traverser { + final Fun transformer; + final Action action; + ForEachTransformedValueTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Fun transformer, Action action) { + super(m, p, b); + this.transformer = transformer; this.action = action; + } + @SuppressWarnings("unchecked") public final void compute() { + final Fun transformer; + final Action action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int b; (b = preSplit()) > 0;) + new ForEachTransformedValueTask + (map, this, b, transformer, action).fork(); + Object v; U u; + while ((v = advance()) != null) { + if ((u = transformer.apply((V)v)) != null) + action.apply(u); + } + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") static final class ForEachTransformedEntryTask + extends Traverser { + final Fun, ? extends U> transformer; + final Action action; + ForEachTransformedEntryTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Fun, ? extends U> transformer, Action action) { + super(m, p, b); + this.transformer = transformer; this.action = action; + } + @SuppressWarnings("unchecked") public final void compute() { + final Fun, ? extends U> transformer; + final Action action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int b; (b = preSplit()) > 0;) + new ForEachTransformedEntryTask + (map, this, b, transformer, action).fork(); + Object v; U u; + while ((v = advance()) != null) { + if ((u = transformer.apply(entryFor((K)nextKey, + (V)v))) != null) + action.apply(u); + } + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") static final class ForEachTransformedMappingTask + extends Traverser { + final BiFun transformer; + final Action action; + ForEachTransformedMappingTask + (ConcurrentHashMapV8 m, Traverser p, int b, + BiFun transformer, + Action action) { + super(m, p, b); + this.transformer = transformer; this.action = action; + } + @SuppressWarnings("unchecked") public final void compute() { + final BiFun transformer; + final Action action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int b; (b = preSplit()) > 0;) + new ForEachTransformedMappingTask + (map, this, b, transformer, action).fork(); + Object v; U u; + while ((v = advance()) != null) { + if ((u = transformer.apply((K)nextKey, (V)v)) != null) + action.apply(u); + } + propagateCompletion(); + } + } + } + + @SuppressWarnings("serial") static final class SearchKeysTask + extends Traverser { + final Fun searchFunction; + final AtomicReference result; + SearchKeysTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Fun searchFunction, + AtomicReference result) { + super(m, p, b); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + @SuppressWarnings("unchecked") public final void compute() { + final Fun searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int b;;) { + if (result.get() != null) + return; + if ((b = preSplit()) <= 0) + break; + new SearchKeysTask + (map, this, b, searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + if (advance() == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply((K)nextKey)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + @SuppressWarnings("serial") static final class SearchValuesTask + extends Traverser { + final Fun searchFunction; + final AtomicReference result; + SearchValuesTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Fun searchFunction, + AtomicReference result) { + super(m, p, b); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + @SuppressWarnings("unchecked") public final void compute() { + final Fun searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int b;;) { + if (result.get() != null) + return; + if ((b = preSplit()) <= 0) + break; + new SearchValuesTask + (map, this, b, searchFunction, result).fork(); + } + while (result.get() == null) { + Object v; U u; + if ((v = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply((V)v)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + @SuppressWarnings("serial") static final class SearchEntriesTask + extends Traverser { + final Fun, ? extends U> searchFunction; + final AtomicReference result; + SearchEntriesTask + (ConcurrentHashMapV8 m, Traverser p, int b, + Fun, ? extends U> searchFunction, + AtomicReference result) { + super(m, p, b); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + @SuppressWarnings("unchecked") public final void compute() { + final Fun, ? extends U> searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int b;;) { + if (result.get() != null) + return; + if ((b = preSplit()) <= 0) + break; + new SearchEntriesTask + (map, this, b, searchFunction, result).fork(); + } + while (result.get() == null) { + Object v; U u; + if ((v = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(entryFor((K)nextKey, + (V)v))) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + return; + } + } + } + } + } + + @SuppressWarnings("serial") static final class SearchMappingsTask + extends Traverser { + final BiFun searchFunction; + final AtomicReference result; + SearchMappingsTask + (ConcurrentHashMapV8 m, Traverser p, int b, + BiFun searchFunction, + AtomicReference result) { + super(m, p, b); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + @SuppressWarnings("unchecked") public final void compute() { + final BiFun searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int b;;) { + if (result.get() != null) + return; + if ((b = preSplit()) <= 0) + break; + new SearchMappingsTask + (map, this, b, searchFunction, result).fork(); + } + while (result.get() == null) { + Object v; U u; + if ((v = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply((K)nextKey, (V)v)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + @SuppressWarnings("serial") static final class ReduceKeysTask + extends Traverser { + final BiFun reducer; + K result; + ReduceKeysTask rights, nextRight; + ReduceKeysTask + (ConcurrentHashMapV8 m, Traverser p, int b, + ReduceKeysTask nextRight, + BiFun reducer) { + super(m, p, b); this.nextRight = nextRight; + this.reducer = reducer; + } + public final K getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final BiFun reducer; + if ((reducer = this.reducer) != null) { + for (int b; (b = preSplit()) > 0;) + (rights = new ReduceKeysTask + (map, this, b, rights, reducer)).fork(); + K r = null; + while (advance() != null) { + K u = (K)nextKey; + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceKeysTask + t = (ReduceKeysTask)c, + s = t.rights; + while (s != null) { + K tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class ReduceValuesTask + extends Traverser { + final BiFun reducer; + V result; + ReduceValuesTask rights, nextRight; + ReduceValuesTask + (ConcurrentHashMapV8 m, Traverser p, int b, + ReduceValuesTask nextRight, + BiFun reducer) { + super(m, p, b); this.nextRight = nextRight; + this.reducer = reducer; + } + public final V getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final BiFun reducer; + if ((reducer = this.reducer) != null) { + for (int b; (b = preSplit()) > 0;) + (rights = new ReduceValuesTask + (map, this, b, rights, reducer)).fork(); + V r = null; + Object v; + while ((v = advance()) != null) { + V u = (V)v; + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceValuesTask + t = (ReduceValuesTask)c, + s = t.rights; + while (s != null) { + V tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class ReduceEntriesTask + extends Traverser> { + final BiFun, Map.Entry, ? extends Map.Entry> reducer; + Map.Entry result; + ReduceEntriesTask rights, nextRight; + ReduceEntriesTask + (ConcurrentHashMapV8 m, Traverser p, int b, + ReduceEntriesTask nextRight, + BiFun, Map.Entry, ? extends Map.Entry> reducer) { + super(m, p, b); this.nextRight = nextRight; + this.reducer = reducer; + } + public final Map.Entry getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final BiFun, Map.Entry, ? extends Map.Entry> reducer; + if ((reducer = this.reducer) != null) { + for (int b; (b = preSplit()) > 0;) + (rights = new ReduceEntriesTask + (map, this, b, rights, reducer)).fork(); + Map.Entry r = null; + Object v; + while ((v = advance()) != null) { + Map.Entry u = entryFor((K)nextKey, (V)v); + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceEntriesTask + t = (ReduceEntriesTask)c, + s = t.rights; + while (s != null) { + Map.Entry tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceKeysTask + extends Traverser { + final Fun transformer; + final BiFun reducer; + U result; + MapReduceKeysTask rights, nextRight; + MapReduceKeysTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceKeysTask nextRight, + Fun transformer, + BiFun reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final Fun transformer; + final BiFun reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceKeysTask + (map, this, b, rights, transformer, reducer)).fork(); + U r = null, u; + while (advance() != null) { + if ((u = transformer.apply((K)nextKey)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysTask + t = (MapReduceKeysTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceValuesTask + extends Traverser { + final Fun transformer; + final BiFun reducer; + U result; + MapReduceValuesTask rights, nextRight; + MapReduceValuesTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceValuesTask nextRight, + Fun transformer, + BiFun reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final Fun transformer; + final BiFun reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceValuesTask + (map, this, b, rights, transformer, reducer)).fork(); + U r = null, u; + Object v; + while ((v = advance()) != null) { + if ((u = transformer.apply((V)v)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesTask + t = (MapReduceValuesTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceEntriesTask + extends Traverser { + final Fun, ? extends U> transformer; + final BiFun reducer; + U result; + MapReduceEntriesTask rights, nextRight; + MapReduceEntriesTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceEntriesTask nextRight, + Fun, ? extends U> transformer, + BiFun reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final Fun, ? extends U> transformer; + final BiFun reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceEntriesTask + (map, this, b, rights, transformer, reducer)).fork(); + U r = null, u; + Object v; + while ((v = advance()) != null) { + if ((u = transformer.apply(entryFor((K)nextKey, + (V)v))) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesTask + t = (MapReduceEntriesTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceMappingsTask + extends Traverser { + final BiFun transformer; + final BiFun reducer; + U result; + MapReduceMappingsTask rights, nextRight; + MapReduceMappingsTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceMappingsTask nextRight, + BiFun transformer, + BiFun reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final BiFun transformer; + final BiFun reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceMappingsTask + (map, this, b, rights, transformer, reducer)).fork(); + U r = null, u; + Object v; + while ((v = advance()) != null) { + if ((u = transformer.apply((K)nextKey, (V)v)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsTask + t = (MapReduceMappingsTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask + extends Traverser { + final ObjectToDouble transformer; + final DoubleByDoubleToDouble reducer; + final double basis; + double result; + MapReduceKeysToDoubleTask rights, nextRight; + MapReduceKeysToDoubleTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceKeysToDoubleTask nextRight, + ObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToDouble transformer; + final DoubleByDoubleToDouble reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceKeysToDoubleTask + (map, this, b, rights, transformer, r, reducer)).fork(); + while (advance() != null) + r = reducer.apply(r, transformer.apply((K)nextKey)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToDoubleTask + t = (MapReduceKeysToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask + extends Traverser { + final ObjectToDouble transformer; + final DoubleByDoubleToDouble reducer; + final double basis; + double result; + MapReduceValuesToDoubleTask rights, nextRight; + MapReduceValuesToDoubleTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceValuesToDoubleTask nextRight, + ObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToDouble transformer; + final DoubleByDoubleToDouble reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceValuesToDoubleTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply((V)v)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToDoubleTask + t = (MapReduceValuesToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask + extends Traverser { + final ObjectToDouble> transformer; + final DoubleByDoubleToDouble reducer; + final double basis; + double result; + MapReduceEntriesToDoubleTask rights, nextRight; + MapReduceEntriesToDoubleTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceEntriesToDoubleTask nextRight, + ObjectToDouble> transformer, + double basis, + DoubleByDoubleToDouble reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToDouble> transformer; + final DoubleByDoubleToDouble reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceEntriesToDoubleTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, + (V)v))); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToDoubleTask + t = (MapReduceEntriesToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask + extends Traverser { + final ObjectByObjectToDouble transformer; + final DoubleByDoubleToDouble reducer; + final double basis; + double result; + MapReduceMappingsToDoubleTask rights, nextRight; + MapReduceMappingsToDoubleTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceMappingsToDoubleTask nextRight, + ObjectByObjectToDouble transformer, + double basis, + DoubleByDoubleToDouble reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectByObjectToDouble transformer; + final DoubleByDoubleToDouble reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceMappingsToDoubleTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply((K)nextKey, (V)v)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToDoubleTask + t = (MapReduceMappingsToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceKeysToLongTask + extends Traverser { + final ObjectToLong transformer; + final LongByLongToLong reducer; + final long basis; + long result; + MapReduceKeysToLongTask rights, nextRight; + MapReduceKeysToLongTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceKeysToLongTask nextRight, + ObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToLong transformer; + final LongByLongToLong reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceKeysToLongTask + (map, this, b, rights, transformer, r, reducer)).fork(); + while (advance() != null) + r = reducer.apply(r, transformer.apply((K)nextKey)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToLongTask + t = (MapReduceKeysToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceValuesToLongTask + extends Traverser { + final ObjectToLong transformer; + final LongByLongToLong reducer; + final long basis; + long result; + MapReduceValuesToLongTask rights, nextRight; + MapReduceValuesToLongTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceValuesToLongTask nextRight, + ObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToLong transformer; + final LongByLongToLong reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceValuesToLongTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply((V)v)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToLongTask + t = (MapReduceValuesToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask + extends Traverser { + final ObjectToLong> transformer; + final LongByLongToLong reducer; + final long basis; + long result; + MapReduceEntriesToLongTask rights, nextRight; + MapReduceEntriesToLongTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceEntriesToLongTask nextRight, + ObjectToLong> transformer, + long basis, + LongByLongToLong reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToLong> transformer; + final LongByLongToLong reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceEntriesToLongTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, + (V)v))); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToLongTask + t = (MapReduceEntriesToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask + extends Traverser { + final ObjectByObjectToLong transformer; + final LongByLongToLong reducer; + final long basis; + long result; + MapReduceMappingsToLongTask rights, nextRight; + MapReduceMappingsToLongTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceMappingsToLongTask nextRight, + ObjectByObjectToLong transformer, + long basis, + LongByLongToLong reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectByObjectToLong transformer; + final LongByLongToLong reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceMappingsToLongTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply((K)nextKey, (V)v)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToLongTask + t = (MapReduceMappingsToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceKeysToIntTask + extends Traverser { + final ObjectToInt transformer; + final IntByIntToInt reducer; + final int basis; + int result; + MapReduceKeysToIntTask rights, nextRight; + MapReduceKeysToIntTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceKeysToIntTask nextRight, + ObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToInt transformer; + final IntByIntToInt reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceKeysToIntTask + (map, this, b, rights, transformer, r, reducer)).fork(); + while (advance() != null) + r = reducer.apply(r, transformer.apply((K)nextKey)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToIntTask + t = (MapReduceKeysToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceValuesToIntTask + extends Traverser { + final ObjectToInt transformer; + final IntByIntToInt reducer; + final int basis; + int result; + MapReduceValuesToIntTask rights, nextRight; + MapReduceValuesToIntTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceValuesToIntTask nextRight, + ObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToInt transformer; + final IntByIntToInt reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceValuesToIntTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply((V)v)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToIntTask + t = (MapReduceValuesToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask + extends Traverser { + final ObjectToInt> transformer; + final IntByIntToInt reducer; + final int basis; + int result; + MapReduceEntriesToIntTask rights, nextRight; + MapReduceEntriesToIntTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceEntriesToIntTask nextRight, + ObjectToInt> transformer, + int basis, + IntByIntToInt reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectToInt> transformer; + final IntByIntToInt reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceEntriesToIntTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply(entryFor((K)nextKey, + (V)v))); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToIntTask + t = (MapReduceEntriesToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask + extends Traverser { + final ObjectByObjectToInt transformer; + final IntByIntToInt reducer; + final int basis; + int result; + MapReduceMappingsToIntTask rights, nextRight; + MapReduceMappingsToIntTask + (ConcurrentHashMapV8 m, Traverser p, int b, + MapReduceMappingsToIntTask nextRight, + ObjectByObjectToInt transformer, + int basis, + IntByIntToInt reducer) { + super(m, p, b); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + @SuppressWarnings("unchecked") public final void compute() { + final ObjectByObjectToInt transformer; + final IntByIntToInt reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int b; (b = preSplit()) > 0;) + (rights = new MapReduceMappingsToIntTask + (map, this, b, rights, transformer, r, reducer)).fork(); + Object v; + while ((v = advance()) != null) + r = reducer.apply(r, transformer.apply((K)nextKey, (V)v)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToIntTask + t = (MapReduceMappingsToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.apply(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + // Unsafe mechanics - private static final sun.misc.Unsafe UNSAFE; - private static final long counterOffset; - private static final long resizingOffset; + private static final sun.misc.Unsafe U; + private static final long SIZECTL; + private static final long TRANSFERINDEX; + private static final long TRANSFERORIGIN; + private static final long BASECOUNT; + private static final long COUNTERBUSY; + private static final long CELLVALUE; private static final long ABASE; private static final int ASHIFT; static { int ss; try { - UNSAFE = getUnsafe(); + U = getUnsafe(); Class k = ConcurrentHashMapV8.class; - counterOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("counter")); - resizingOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("resizing")); + SIZECTL = U.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + TRANSFERINDEX = U.objectFieldOffset + (k.getDeclaredField("transferIndex")); + TRANSFERORIGIN = U.objectFieldOffset + (k.getDeclaredField("transferOrigin")); + BASECOUNT = U.objectFieldOffset + (k.getDeclaredField("baseCount")); + COUNTERBUSY = U.objectFieldOffset + (k.getDeclaredField("counterBusy")); + Class ck = CounterCell.class; + CELLVALUE = U.objectFieldOffset + (ck.getDeclaredField("value")); Class sc = Node[].class; - ABASE = UNSAFE.arrayBaseOffset(sc); - ss = UNSAFE.arrayIndexScale(sc); + ABASE = U.arrayBaseOffset(sc); + ss = U.arrayIndexScale(sc); + ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); } catch (Exception e) { throw new Error(e); } if ((ss & (ss-1)) != 0) throw new Error("data type scale not a power of two"); - ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); } /** @@ -1733,5 +6505,4 @@ public class ConcurrentHashMapV8 } } } - }