--- jsr166/src/jsr166e/ConcurrentHashMapV8.java 2011/08/29 17:06:20 1.2
+++ jsr166/src/jsr166e/ConcurrentHashMapV8.java 2012/07/03 23:25:57 1.41
@@ -6,6 +6,7 @@
package jsr166e;
import jsr166e.LongAdder;
+import java.util.Arrays;
import java.util.Map;
import java.util.Set;
import java.util.Collection;
@@ -19,6 +20,9 @@ import java.util.Enumeration;
import java.util.ConcurrentModificationException;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.locks.LockSupport;
+import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import java.io.Serializable;
/**
@@ -49,14 +53,28 @@ import java.io.Serializable;
* are typically useful only when a map is not undergoing concurrent
* updates in other threads. Otherwise the results of these methods
* reflect transient states that may be adequate for monitoring
- * purposes, but not for program control.
+ * or estimation purposes, but not for program control.
*
- *
Resizing this or any other kind of hash table is a relatively
- * slow operation, so, when possible, it is a good idea to provide
- * estimates of expected table sizes in constructors. Also, for
- * compatability with previous versions of this class, constructors
- * may optionally specify an expected {@code concurrencyLevel} as an
- * additional hint for internal sizing.
+ *
The table is dynamically expanded when there are too many
+ * collisions (i.e., keys that have distinct hash codes but fall into
+ * the same slot modulo the table size), with the expected average
+ * effect of maintaining roughly two bins per mapping (corresponding
+ * to a 0.75 load factor threshold for resizing). There may be much
+ * variance around this average as mappings are added and removed, but
+ * overall, this maintains a commonly accepted time/space tradeoff for
+ * hash tables. However, resizing this or any other kind of hash
+ * table may be a relatively slow operation. When possible, it is a
+ * good idea to provide a size estimate as an optional {@code
+ * initialCapacity} constructor argument. An additional optional
+ * {@code loadFactor} constructor argument provides a further means of
+ * customizing initial table capacity by specifying the table density
+ * to be used in calculating the amount of space to allocate for the
+ * given number of elements. Also, for compatibility with previous
+ * versions of this class, constructors may optionally specify an
+ * expected {@code concurrencyLevel} as an additional hint for
+ * internal sizing. Note that using many keys with exactly the same
+ * {@code hashCode()} is a sure way to slow down performance of any
+ * hash table.
*
*
This class and its views and iterators implement all of the
* optional methods of the {@link Map} and {@link Iterator}
@@ -82,216 +100,444 @@ public class ConcurrentHashMapV8
private static final long serialVersionUID = 7249069246763182397L;
/**
- * A function computing a mapping from the given key to a value,
- * or null
if there is no mapping. This is a
- * place-holder for an upcoming JDK8 interface.
+ * A function computing a mapping from the given key to a value.
+ * This is a place-holder for an upcoming JDK8 interface.
*/
public static interface MappingFunction {
/**
- * Returns a value for the given key, or null if there is no
- * mapping. If this function throws an (unchecked) exception,
- * the exception is rethrown to its caller, and no mapping is
- * recorded. Because this function is invoked within
- * atomicity control, the computation should be short and
- * simple. The most common usage is to construct a new object
- * serving as an initial mapped value.
+ * Returns a value for the given key, or null if there is no mapping
*
* @param key the (non-null) key
- * @return a value, or null if none
+ * @return a value for the key, or null if none
*/
V map(K key);
}
+ /**
+ * A function computing a new mapping given a key and its current
+ * mapped value (or {@code null} if there is no current
+ * mapping). This is a place-holder for an upcoming JDK8
+ * interface.
+ */
+ public static interface RemappingFunction {
+ /**
+ * Returns a new value given a key and its current value.
+ *
+ * @param key the (non-null) key
+ * @param value the current value, or null if there is no mapping
+ * @return a value for the key, or null if none
+ */
+ V remap(K key, V value);
+ }
+
+ /**
+ * A partitionable iterator. A Spliterator can be traversed
+ * directly, but can also be partitioned (before traversal) by
+ * creating another Spliterator that covers a non-overlapping
+ * portion of the elements, and so may be amenable to parallel
+ * execution.
+ *
+ * This interface exports a subset of expected JDK8
+ * functionality.
+ *
+ *
Sample usage: Here is one (of the several) ways to compute
+ * the sum of the values held in a map using the ForkJoin
+ * framework. As illustrated here, Spliterators are well suited to
+ * designs in which a task repeatedly splits off half its work
+ * into forked subtasks until small enough to process directly,
+ * and then joins these subtasks. Variants of this style can be
+ * also be used in completion-based designs.
+ *
+ *
+ * {@code ConcurrentHashMapV8 m = ...
+ * // Uses parallel depth of log2 of size / (parallelism * slack of 8).
+ * int depth = 32 - Integer.numberOfLeadingZeros(m.size() / (aForkJoinPool.getParallelism() * 8));
+ * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), depth, null));
+ * // ...
+ * static class SumValues extends RecursiveTask {
+ * final Spliterator s;
+ * final int depth; // number of splits before processing
+ * final SumValues nextJoin; // records forked subtasks to join
+ * SumValues(Spliterator s, int depth, SumValues nextJoin) {
+ * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+ * }
+ * public Long compute() {
+ * long sum = 0;
+ * SumValues subtasks = null; // fork subtasks
+ * for (int d = depth - 1; d >= 0; --d)
+ * (subtasks = new SumValues(s.split(), d, subtasks)).fork();
+ * while (s.hasNext()) // directly process remaining elements
+ * sum += s.next();
+ * for (SumValues t = subtasks; t != null; t = t.nextJoin)
+ * sum += t.join(); // collect subtask results
+ * return sum;
+ * }
+ * }
+ * }
+ */
+ public static interface Spliterator extends Iterator {
+ /**
+ * Returns a Spliterator covering approximately half of the
+ * elements, guaranteed not to overlap with those subsequently
+ * returned by this Spliterator. After invoking this method,
+ * the current Spliterator will not produce any of
+ * the elements of the returned Spliterator, but the two
+ * Spliterators together will produce all of the elements that
+ * would have been produced by this Spliterator had this
+ * method not been called. The exact number of elements
+ * produced by the returned Spliterator is not guaranteed, and
+ * may be zero (i.e., with {@code hasNext()} reporting {@code
+ * false}) if this Spliterator cannot be further split.
+ *
+ * @return a Spliterator covering approximately half of the
+ * elements
+ * @throws IllegalStateException if this Spliterator has
+ * already commenced traversing elements.
+ */
+ Spliterator split();
+
+ /**
+ * Returns a Spliterator producing the same elements as this
+ * Spliterator. This method may be used for example to create
+ * a second Spliterator before a traversal, in order to later
+ * perform a second traversal.
+ *
+ * @return a Spliterator covering the same range as this Spliterator.
+ * @throws IllegalStateException if this Spliterator has
+ * already commenced traversing elements.
+ */
+ Spliterator clone();
+ }
+
/*
* Overview:
*
* The primary design goal of this hash table is to maintain
* concurrent readability (typically method get(), but also
* iterators and related methods) while minimizing update
- * contention.
+ * contention. Secondary goals are to keep space consumption about
+ * the same or better than java.util.HashMap, and to support high
+ * initial insertion rates on an empty table by many threads.
*
* Each key-value mapping is held in a Node. Because Node fields
* can contain special values, they are defined using plain Object
* types. Similarly in turn, all internal methods that use them
- * work off Object types. All public generic-typed methods relay
- * in/out of these internal methods, supplying casts as needed.
+ * work off Object types. And similarly, so do the internal
+ * methods of auxiliary iterator and view classes. All public
+ * generic typed methods relay in/out of these internal methods,
+ * supplying null-checks and casts as needed. This also allows
+ * many of the public methods to be factored into a smaller number
+ * of internal methods (although sadly not so for the five
+ * variants of put-related operations). The validation-based
+ * approach explained below leads to a lot of code sprawl because
+ * retry-control precludes factoring into smaller methods.
*
* The table is lazily initialized to a power-of-two size upon the
- * first insertion. Each bin in the table contains a (typically
- * short) list of Nodes. Table accesses require volatile/atomic
- * reads, writes, and CASes. Because there is no other way to
- * arrange this without adding further indirections, we use
- * intrinsics (sun.misc.Unsafe) operations. The lists of nodes
- * within bins are always accurately traversable under volatile
- * reads, so long as lookups check hash code and non-nullness of
- * key and value before checking key equality. (All valid hash
- * codes are nonnegative. Negative values are reserved for special
- * forwarding nodes; see below.)
- *
- * A bin may be locked during update (insert, delete, and replace)
- * operations. We do not want to waste the space required to
- * associate a distinct lock object with each bin, so instead use
- * the first node of a bin list itself as a lock, using builtin
- * "synchronized" locks. These save space and we can live with
- * only plain block-structured lock/unlock operations. Using the
- * first node of a list as a lock does not by itself suffice
- * though: When a node is locked, any update must first validate
- * that it is still the first node, and retry if not. (Because new
- * nodes are always appended to lists, once a node is first in a
- * bin, it remains first until deleted or the bin becomes
- * invalidated.) However, update operations can and usually do
- * still traverse the bin until the point of update, which helps
- * reduce cache misses on retries. This is a converse of sorts to
- * the lazy locking technique described by Herlihy & Shavit. If
- * there is no existing node during a put operation, then one can
- * be CAS'ed in (without need for lock except in computeIfAbsent);
- * the CAS serves as validation. This is on average the most
- * common case for put operations. The expected number of locks
- * covering different elements (i.e., bins with 2 or more nodes)
- * is approximately 10% at steady state under default settings.
- * Lock contention probability for two threads accessing arbitrary
- * distinct elements is thus less than 1% even for small tables.
- *
- * The table is resized when occupancy exceeds a threshold. Only
- * a single thread performs the resize (using field "resizing", to
- * arrange exclusion), but the table otherwise remains usable for
- * both reads and updates. Resizing proceeds by transferring bins,
- * one by one, from the table to the next table. Upon transfer,
- * the old table bin contains only a special forwarding node (with
- * negative hash code ("MOVED")) that contains the next table as
+ * first insertion. Each bin in the table normally contains a
+ * list of Nodes (most often, the list has only zero or one Node).
+ * Table accesses require volatile/atomic reads, writes, and
+ * CASes. Because there is no other way to arrange this without
+ * adding further indirections, we use intrinsics
+ * (sun.misc.Unsafe) operations. The lists of nodes within bins
+ * are always accurately traversable under volatile reads, so long
+ * as lookups check hash code and non-nullness of value before
+ * checking key equality.
+ *
+ * We use the top two bits of Node hash fields for control
+ * purposes -- they are available anyway because of addressing
+ * constraints. As explained further below, these top bits are
+ * used as follows:
+ * 00 - Normal
+ * 01 - Locked
+ * 11 - Locked and may have a thread waiting for lock
+ * 10 - Node is a forwarding node
+ *
+ * The lower 30 bits of each Node's hash field contain a
+ * transformation of the key's hash code, except for forwarding
+ * nodes, for which the lower bits are zero (and so always have
+ * hash field == MOVED).
+ *
+ * Insertion (via put or its variants) of the first node in an
+ * empty bin is performed by just CASing it to the bin. This is
+ * by far the most common case for put operations under most
+ * key/hash distributions. Other update operations (insert,
+ * delete, and replace) require locks. We do not want to waste
+ * the space required to associate a distinct lock object with
+ * each bin, so instead use the first node of a bin list itself as
+ * a lock. Blocking support for these locks relies on the builtin
+ * "synchronized" monitors. However, we also need a tryLock
+ * construction, so we overlay these by using bits of the Node
+ * hash field for lock control (see above), and so normally use
+ * builtin monitors only for blocking and signalling using
+ * wait/notifyAll constructions. See Node.tryAwaitLock.
+ *
+ * Using the first node of a list as a lock does not by itself
+ * suffice though: When a node is locked, any update must first
+ * validate that it is still the first node after locking it, and
+ * retry if not. Because new nodes are always appended to lists,
+ * once a node is first in a bin, it remains first until deleted
+ * or the bin becomes invalidated (upon resizing). However,
+ * operations that only conditionally update may inspect nodes
+ * until the point of update. This is a converse of sorts to the
+ * lazy locking technique described by Herlihy & Shavit.
+ *
+ * The main disadvantage of per-bin locks is that other update
+ * operations on other nodes in a bin list protected by the same
+ * lock can stall, for example when user equals() or mapping
+ * functions take a long time. However, statistically, under
+ * random hash codes, this is not a common problem. Ideally, the
+ * frequency of nodes in bins follows a Poisson distribution
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
+ * parameter of about 0.5 on average, given the resizing threshold
+ * of 0.75, although with a large variance because of resizing
+ * granularity. Ignoring variance, the expected occurrences of
+ * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
+ * first values are:
+ *
+ * 0: 0.60653066
+ * 1: 0.30326533
+ * 2: 0.07581633
+ * 3: 0.01263606
+ * 4: 0.00157952
+ * 5: 0.00015795
+ * 6: 0.00001316
+ * 7: 0.00000094
+ * 8: 0.00000006
+ * more: less than 1 in ten million
+ *
+ * Lock contention probability for two threads accessing distinct
+ * elements is roughly 1 / (8 * #elements) under random hashes.
+ *
+ * Actual hash code distributions encountered in practice
+ * sometimes deviate significantly from uniform randomness. This
+ * includes the case when N > (1<<30), so some keys MUST collide.
+ * Similarly for dumb or hostile usages in which multiple keys are
+ * designed to have identical hash codes. Also, although we guard
+ * against the worst effects of this (see method spread), sets of
+ * hashes may differ only in bits that do not impact their bin
+ * index for a given power-of-two mask. So we use a secondary
+ * strategy that applies when the number of nodes in a bin exceeds
+ * a threshold, and at least one of the keys implements
+ * Comparable. These TreeBins use a balanced tree to hold nodes
+ * (a specialized form of red-black trees), bounding search time
+ * to O(log N). Each search step in a TreeBin is around twice as
+ * slow as in a regular list, but given that N cannot exceed
+ * (1<<64) (before running out of addresses) this bounds search
+ * steps, lock hold times, etc, to reasonable constants (roughly
+ * 100 nodes inspected per operation worst case) so long as keys
+ * are Comparable (which is very common -- String, Long, etc).
+ * TreeBin nodes (TreeNodes) also maintain the same "next"
+ * traversal pointers as regular nodes, so can be traversed in
+ * iterators in the same way.
+ *
+ * The table is resized when occupancy exceeds a percentage
+ * threshold (nominally, 0.75, but see below). Only a single
+ * thread performs the resize (using field "sizeCtl", to arrange
+ * exclusion), but the table otherwise remains usable for reads
+ * and updates. Resizing proceeds by transferring bins, one by
+ * one, from the table to the next table. Because we are using
+ * power-of-two expansion, the elements from each bin must either
+ * stay at same index, or move with a power of two offset. We
+ * eliminate unnecessary node creation by catching cases where old
+ * nodes can be reused because their next fields won't change. On
+ * average, only about one-sixth of them need cloning when a table
+ * doubles. The nodes they replace will be garbage collectable as
+ * soon as they are no longer referenced by any reader thread that
+ * may be in the midst of concurrently traversing table. Upon
+ * transfer, the old table bin contains only a special forwarding
+ * node (with hash field "MOVED") that contains the next table as
* its key. On encountering a forwarding node, access and update
- * operations restart, using the new table. To ensure concurrent
- * readability of traversals, transfers must proceed from the last
- * bin (table.length - 1) up towards the first. Any traversal
- * starting from the first bin can then arrange to move to the new
- * table for the rest of the traversal without revisiting nodes.
- * This constrains bin transfers to a particular order, and so can
- * block indefinitely waiting for the next lock, and other threads
- * cannot help with the transfer. However, expected stalls are
- * infrequent enough to not warrant the additional overhead and
- * complexity of access and iteration schemes that could admit
- * out-of-order or concurrent bin transfers.
- *
- * A similar traversal scheme (not yet implemented) can apply to
- * partial traversals during partitioned aggregate operations.
- * Also, read-only operations give up if ever forwarded to a null
- * table, which provides support for shutdown-style clearing,
- * which is also not currently implemented.
+ * operations restart, using the new table.
+ *
+ * Each bin transfer requires its bin lock. However, unlike other
+ * cases, a transfer can skip a bin if it fails to acquire its
+ * lock, and revisit it later (unless it is a TreeBin). Method
+ * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
+ * have been skipped because of failure to acquire a lock, and
+ * blocks only if none are available (i.e., only very rarely).
+ * The transfer operation must also ensure that all accessible
+ * bins in both the old and new table are usable by any traversal.
+ * When there are no lock acquisition failures, this is arranged
+ * simply by proceeding from the last bin (table.length - 1) up
+ * towards the first. Upon seeing a forwarding node, traversals
+ * (see class InternalIterator) arrange to move to the new table
+ * without revisiting nodes. However, when any node is skipped
+ * during a transfer, all earlier table bins may have become
+ * visible, so are initialized with a reverse-forwarding node back
+ * to the old table until the new ones are established. (This
+ * sometimes requires transiently locking a forwarding node, which
+ * is possible under the above encoding.) These more expensive
+ * mechanics trigger only when necessary.
+ *
+ * The traversal scheme also applies to partial traversals of
+ * ranges of bins (via an alternate InternalIterator constructor)
+ * to support partitioned aggregate operations. Also, read-only
+ * operations give up if ever forwarded to a null table, which
+ * provides support for shutdown-style clearing, which is also not
+ * currently implemented.
+ *
+ * Lazy table initialization minimizes footprint until first use,
+ * and also avoids resizings when the first operation is from a
+ * putAll, constructor with map argument, or deserialization.
+ * These cases attempt to override the initial capacity settings,
+ * but harmlessly fail to take effect in cases of races.
*
* The element count is maintained using a LongAdder, which avoids
* contention on updates but can encounter cache thrashing if read
- * too frequently during concurrent updates. To avoid reading so
- * often, resizing is normally attempted only upon adding to a bin
- * already holding two or more nodes. Under the default threshold
- * (0.75), and uniform hash distributions, the probability of this
- * occurring at threshold is around 13%, meaning that only about 1
- * in 8 puts check threshold (and after resizing, many fewer do
- * so). But this approximation has high variance for small table
- * sizes, so we check on any collision for sizes <= 64. Further,
- * to increase the probablity that a resize occurs soon enough, we
- * offset the threshold (see THRESHOLD_OFFSET) by the expected
- * number of puts between checks. This is currently set to 8, in
- * accord with the default load factor. In practice, this is
- * rarely overridden, and in any case is close enough to other
- * plausible values not to waste dynamic probablity computation
- * for more precision.
+ * too frequently during concurrent access. To avoid reading so
+ * often, resizing is attempted either when a bin lock is
+ * contended, or upon adding to a bin already holding two or more
+ * nodes (checked before adding in the xIfAbsent methods, after
+ * adding in others). Under uniform hash distributions, the
+ * probability of this occurring at threshold is around 13%,
+ * meaning that only about 1 in 8 puts check threshold (and after
+ * resizing, many fewer do so). But this approximation has high
+ * variance for small table sizes, so we check on any collision
+ * for sizes <= 64. The bulk putAll operation further reduces
+ * contention by only committing count updates upon these size
+ * checks.
+ *
+ * Maintaining API and serialization compatibility with previous
+ * versions of this class introduces several oddities. Mainly: We
+ * leave untouched but unused constructor arguments refering to
+ * concurrencyLevel. We accept a loadFactor constructor argument,
+ * but apply it only to initial table capacity (which is the only
+ * time that we can guarantee to honor it.) We also declare an
+ * unused "Segment" class that is instantiated in minimal form
+ * only when serializing.
*/
/* ---------------- Constants -------------- */
/**
- * The smallest allowed table capacity. Must be a power of 2, at
- * least 2.
+ * The largest possible table capacity. This value must be
+ * exactly 1<<30 to stay within Java array allocation and indexing
+ * bounds for power of two table sizes, and is further required
+ * because the top two bits of 32bit hash fields are used for
+ * control purposes.
*/
- static final int MINIMUM_CAPACITY = 2;
+ private static final int MAXIMUM_CAPACITY = 1 << 30;
/**
- * The largest allowed table capacity. Must be a power of 2, at
- * most 1<<30.
+ * The default initial table capacity. Must be a power of 2
+ * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
*/
- static final int MAXIMUM_CAPACITY = 1 << 30;
+ private static final int DEFAULT_CAPACITY = 16;
/**
- * The default initial table capacity. Must be a power of 2, at
- * least MINIMUM_CAPACITY and at most MAXIMUM_CAPACITY
+ * The largest possible (non-power of two) array size.
+ * Needed by toArray and related methods.
*/
- static final int DEFAULT_CAPACITY = 16;
+ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/**
- * The default load factor for this table, used when not otherwise
- * specified in a constructor.
+ * The default concurrency level for this table. Unused but
+ * defined for compatibility with previous versions of this class.
*/
- static final float DEFAULT_LOAD_FACTOR = 0.75f;
+ private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
/**
- * The default concurrency level for this table. Unused, but
- * defined for compatibility with previous versions of this class.
+ * The load factor for this table. Overrides of this value in
+ * constructors affect only the initial table capacity. The
+ * actual floating point value isn't normally used -- it is
+ * simpler to use expressions such as {@code n - (n >>> 2)} for
+ * the associated resizing threshold.
*/
- static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+ private static final float LOAD_FACTOR = 0.75f;
/**
- * The count value to offset thesholds to compensate for checking
- * for resizing only when inserting into bins with two or more
- * elements. See above for explanation.
+ * The buffer size for skipped bins during transfers. The
+ * value is arbitrary but should be large enough to avoid
+ * most locking stalls during resizes.
*/
- static final int THRESHOLD_OFFSET = 8;
+ private static final int TRANSFER_BUFFER_SIZE = 32;
/**
- * Special node hash value indicating to use table in node.key
- * Must be negative.
+ * The bin count threshold for using a tree rather than list for a
+ * bin. The value reflects the approximate break-even point for
+ * using tree-based operations.
+ */
+ private static final int TREE_THRESHOLD = 8;
+
+ /*
+ * Encodings for special uses of Node hash fields. See above for
+ * explanation.
*/
- static final int MOVED = -1;
+ static final int MOVED = 0x80000000; // hash field for forwarding nodes
+ static final int LOCKED = 0x40000000; // set/tested only as a bit
+ static final int WAITING = 0xc0000000; // both bits set/tested together
+ static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
/* ---------------- Fields -------------- */
/**
* The array of bins. Lazily initialized upon first insertion.
- * Size is always a power of two. Accessed directly by inner
- * classes.
+ * Size is always a power of two. Accessed directly by iterators.
*/
transient volatile Node[] table;
- /** The counter maintaining number of elements. */
+ /**
+ * The counter maintaining number of elements.
+ */
private transient final LongAdder counter;
- /** Nonzero when table is being initialized or resized. Updated via CAS. */
- private transient volatile int resizing;
- /** The target load factor for the table. */
- private transient float loadFactor;
- /** The next element count value upon which to resize the table. */
- private transient int threshold;
- /** The initial capacity of the table. */
- private transient int initCap;
+
+ /**
+ * Table initialization and resizing control. When negative, the
+ * table is being initialized or resized. Otherwise, when table is
+ * null, holds the initial table size to use upon creation, or 0
+ * for default. After initialization, holds the next element count
+ * value upon which to resize the table.
+ */
+ private transient volatile int sizeCtl;
// views
- transient Set keySet;
- transient Set> entrySet;
- transient Collection values;
-
- /** For serialization compatability. Null unless serialized; see below */
- Segment[] segments;
+ private transient KeySet keySet;
+ private transient Values values;
+ private transient EntrySet entrySet;
- /**
- * Applies a supplemental hash function to a given hashCode, which
- * defends against poor quality hash functions. The result must
- * be non-negative, and for reasonable performance must have good
- * avalanche properties; i.e., that each bit of the argument
- * affects each bit (except sign bit) of the result.
+ /** For serialization compatibility. Null unless serialized; see below */
+ private Segment[] segments;
+
+ /* ---------------- Table element access -------------- */
+
+ /*
+ * Volatile access methods are used for table elements as well as
+ * elements of in-progress next table while resizing. Uses are
+ * null checked by callers, and implicitly bounds-checked, relying
+ * on the invariants that tab arrays have non-zero size, and all
+ * indices are masked with (tab.length - 1) which is never
+ * negative and always less than length. Note that, to be correct
+ * wrt arbitrary concurrency errors by users, bounds checks must
+ * operate on local variables, which accounts for some odd-looking
+ * inline assignments below.
*/
- private static final int spread(int h) {
- // Apply base step of MurmurHash; see http://code.google.com/p/smhasher/
- h ^= h >>> 16;
- h *= 0x85ebca6b;
- h ^= h >>> 13;
- h *= 0xc2b2ae35;
- return (h >>> 16) ^ (h & 0x7fffffff); // mask out sign bit
+
+ static final Node tabAt(Node[] tab, int i) { // used by InternalIterator
+ return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<
this.val = val;
this.next = next;
}
- }
- /*
- * Volatile access nethods are used for table elements as well as
- * elements of in-progress next table while resizing. Uses in
- * access and update methods are null checked by callers, and
- * implicitly bounds-checked, relying on the invariants that tab
- * arrays have non-zero size, and all indices are masked with
- * (tab.length - 1) which is never negative and always less than
- * length. The "relaxed" non-volatile forms are used only during
- * table initialization. The only other usage is in
- * HashIterator.advance, which performs explicit checks.
- */
+ /** CompareAndSet the hash field */
+ final boolean casHash(int cmp, int val) {
+ return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
+ }
- static final Node tabAt(Node[] tab, int i) { // used in HashIterator
- return (Node)UNSAFE.getObjectVolatile(tab, ((long)i< 1 ? 64 : 1;
- private static final boolean casTabAt(Node[] tab, int i, Node c, Node v) {
- return UNSAFE.compareAndSwapObject(tab, ((long)i<= 0 && i < tab.length) { // bounds check
+ int r = ThreadLocalRandom.current().nextInt(); // randomize spins
+ int spins = MAX_SPINS, h;
+ while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
+ if (spins >= 0) {
+ r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
+ if (r >= 0 && --spins == 0)
+ Thread.yield(); // yield before block
+ }
+ else if (casHash(h, h | WAITING)) {
+ synchronized (this) {
+ if (tabAt(tab, i) == this &&
+ (hash & WAITING) == WAITING) {
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ else
+ notifyAll(); // possibly won race vs signaller
+ }
+ break;
+ }
+ }
+ }
+ }
- private static final void setTabAt(Node[] tab, int i, Node v) {
- UNSAFE.putObjectVolatile(tab, ((long)i< k = Node.class;
+ hashOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("hash"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
}
- private static final void relaxedSetTabAt(Node[] tab, int i, Node v) {
- UNSAFE.putObject(tab, ((long)i<
+ * for the same T, so we cannot invoke compareTo among them. To
+ * handle this, the tree is ordered primarily by hash value, then
+ * by getClass().getName() order, and then by Comparator order
+ * among elements of the same class. On lookup at a node, if
+ * elements are not comparable or compare as 0, both left and
+ * right children may need to be searched in the case of tied hash
+ * values. (This corresponds to the full list search that would be
+ * necessary if all elements were non-Comparable and had tied
+ * hashes.) The red-black balancing code is updated from
+ * pre-jdk-collections
+ * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
+ * based in turn on Cormen, Leiserson, and Rivest "Introduction to
+ * Algorithms" (CLR).
+ *
+ * TreeBins also maintain a separate locking discipline than
+ * regular bins. Because they are forwarded via special MOVED
+ * nodes at bin heads (which can never change once established),
+ * we cannot use use those nodes as locks. Instead, TreeBin
+ * extends AbstractQueuedSynchronizer to support a simple form of
+ * read-write lock. For update operations and table validation,
+ * the exclusive form of lock behaves in the same way as bin-head
+ * locks. However, lookups use shared read-lock mechanics to allow
+ * multiple readers in the absence of writers. Additionally,
+ * these lookups do not ever block: While the lock is not
+ * available, they proceed along the slow traversal path (via
+ * next-pointers) until the lock becomes available or the list is
+ * exhausted, whichever comes first. (These cases are not fast,
+ * but maximize aggregate expected throughput.) The AQS mechanics
+ * for doing this are straightforward. The lock state is held as
+ * AQS getState(). Read counts are negative; the write count (1)
+ * is positive. There are no signalling preferences among readers
+ * and writers. Since we don't need to export full Lock API, we
+ * just override the minimal AQS methods and use them directly.
+ */
+ static final class TreeBin extends AbstractQueuedSynchronizer {
+ private static final long serialVersionUID = 2249069246763182397L;
+ transient TreeNode root; // root of tree
+ transient TreeNode first; // head of next-pointer list
- /** Implementation for get and containsKey **/
- private final Object internalGet(Object k) {
- int h = spread(k.hashCode());
- Node[] tab = table;
- retry: while (tab != null) {
- Node e = tabAt(tab, (tab.length - 1) & h);
- while (e != null) {
- int eh = e.hash;
- if (eh == h) {
- Object ek = e.key, ev = e.val;
- if (ev != null && ek != null && (k == ek || k.equals(ek)))
- return ev;
- }
- else if (eh < 0) { // bin was moved during resize
- tab = (Node[])e.key;
- continue retry;
+ /* AQS overrides */
+ public final boolean isHeldExclusively() { return getState() > 0; }
+ public final boolean tryAcquire(int ignore) {
+ if (compareAndSetState(0, 1)) {
+ setExclusiveOwnerThread(Thread.currentThread());
+ return true;
+ }
+ return false;
+ }
+ public final boolean tryRelease(int ignore) {
+ setExclusiveOwnerThread(null);
+ setState(0);
+ return true;
+ }
+ public final int tryAcquireShared(int ignore) {
+ for (int c;;) {
+ if ((c = getState()) > 0)
+ return -1;
+ if (compareAndSetState(c, c -1))
+ return 1;
+ }
+ }
+ public final boolean tryReleaseShared(int ignore) {
+ int c;
+ do {} while (!compareAndSetState(c = getState(), c + 1));
+ return c == -1;
+ }
+
+ /** From CLR */
+ private void rotateLeft(TreeNode p) {
+ if (p != null) {
+ TreeNode r = p.right, pp, rl;
+ if ((rl = p.right = r.left) != null)
+ rl.parent = p;
+ if ((pp = r.parent = p.parent) == null)
+ root = r;
+ else if (pp.left == p)
+ pp.left = r;
+ else
+ pp.right = r;
+ r.left = p;
+ p.parent = r;
+ }
+ }
+
+ /** From CLR */
+ private void rotateRight(TreeNode p) {
+ if (p != null) {
+ TreeNode l = p.left, pp, lr;
+ if ((lr = p.left = l.right) != null)
+ lr.parent = p;
+ if ((pp = l.parent = p.parent) == null)
+ root = l;
+ else if (pp.right == p)
+ pp.right = l;
+ else
+ pp.left = l;
+ l.right = p;
+ p.parent = l;
+ }
+ }
+
+ /**
+ * Return the TreeNode (or null if not found) for the given key
+ * starting at given root.
+ */
+ @SuppressWarnings("unchecked") // suppress Comparable cast warning
+ final TreeNode getTreeNode(int h, Object k, TreeNode p) {
+ Class> c = k.getClass();
+ while (p != null) {
+ int dir, ph; Object pk; Class> pc;
+ if ((ph = p.hash) == h) {
+ if ((pk = p.key) == k || k.equals(pk))
+ return p;
+ if (c != (pc = pk.getClass()) ||
+ !(k instanceof Comparable) ||
+ (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
+ dir = (c == pc)? 0 : c.getName().compareTo(pc.getName());
+ TreeNode r = null, s = null, pl, pr;
+ if (dir >= 0) {
+ if ((pl = p.left) != null && h <= pl.hash)
+ s = pl;
+ }
+ else if ((pr = p.right) != null && h >= pr.hash)
+ s = pr;
+ if (s != null && (r = getTreeNode(h, k, s)) != null)
+ return r;
+ }
}
- e = e.next;
+ else
+ dir = (h < ph) ? -1 : 1;
+ p = (dir > 0) ? p.right : p.left;
}
- break;
+ return null;
}
- return null;
- }
- /** Implementation for put and putIfAbsent **/
- private final Object internalPut(Object k, Object v, boolean replace) {
- int h = spread(k.hashCode());
- Object oldVal = null; // the previous value or null if none
- Node[] tab = table;
- for (;;) {
- Node e; int i;
- if (tab == null)
- tab = grow(0);
- else if ((e = tabAt(tab, i = (tab.length - 1) & h)) == null) {
- if (casTabAt(tab, i, null, new Node(h, k, v, null)))
+ /**
+ * Wrapper for getTreeNode used by CHM.get. Tries to obtain
+ * read-lock to call getTreeNode, but during failure to get
+ * lock, searches along next links.
+ */
+ final Object getValue(int h, Object k) {
+ Node r = null;
+ int c = getState(); // Must read lock state first
+ for (Node e = first; e != null; e = e.next) {
+ if (c <= 0 && compareAndSetState(c, c - 1)) {
+ try {
+ r = getTreeNode(h, k, root);
+ } finally {
+ releaseShared(0);
+ }
break;
+ }
+ else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
+ r = e;
+ break;
+ }
+ else
+ c = getState();
+ }
+ return r == null ? null : r.val;
+ }
+
+ /**
+ * Find or add a node
+ * @return null if added
+ */
+ @SuppressWarnings("unchecked") // suppress Comparable cast warning
+ final TreeNode putTreeNode(int h, Object k, Object v) {
+ Class> c = k.getClass();
+ TreeNode pp = root, p = null;
+ int dir = 0;
+ while (pp != null) { // find existing node or leaf to insert at
+ int ph; Object pk; Class> pc;
+ p = pp;
+ if ((ph = p.hash) == h) {
+ if ((pk = p.key) == k || k.equals(pk))
+ return p;
+ if (c != (pc = pk.getClass()) ||
+ !(k instanceof Comparable) ||
+ (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
+ dir = (c == pc)? 0 : c.getName().compareTo(pc.getName());
+ TreeNode r = null, s = null, pl, pr;
+ if (dir >= 0) {
+ if ((pl = p.left) != null && h <= pl.hash)
+ s = pl;
+ }
+ else if ((pr = p.right) != null && h >= pr.hash)
+ s = pr;
+ if (s != null && (r = getTreeNode(h, k, s)) != null)
+ return r;
+ }
+ }
+ else
+ dir = (h < ph) ? -1 : 1;
+ pp = (dir > 0) ? p.right : p.left;
+ }
+
+ TreeNode f = first;
+ TreeNode x = first = new TreeNode(h, k, v, f, p);
+ if (p == null)
+ root = x;
+ else { // attach and rebalance; adapted from CLR
+ TreeNode xp, xpp;
+ if (f != null)
+ f.prev = x;
+ if (dir <= 0)
+ p.left = x;
+ else
+ p.right = x;
+ x.red = true;
+ while (x != null && (xp = x.parent) != null && xp.red &&
+ (xpp = xp.parent) != null) {
+ TreeNode xppl = xpp.left;
+ if (xp == xppl) {
+ TreeNode y = xpp.right;
+ if (y != null && y.red) {
+ y.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.right) {
+ rotateLeft(x = xp);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ rotateRight(xpp);
+ }
+ }
+ }
+ }
+ else {
+ TreeNode y = xppl;
+ if (y != null && y.red) {
+ y.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.left) {
+ rotateRight(x = xp);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ rotateLeft(xpp);
+ }
+ }
+ }
+ }
+ }
+ TreeNode r = root;
+ if (r != null && r.red)
+ r.red = false;
+ }
+ return null;
+ }
+
+ /**
+ * Removes the given node, that must be present before this
+ * call. This is messier than typical red-black deletion code
+ * because we cannot swap the contents of an interior node
+ * with a leaf successor that is pinned by "next" pointers
+ * that are accessible independently of lock. So instead we
+ * swap the tree linkages.
+ */
+ final void deleteTreeNode(TreeNode p) {
+ TreeNode next = (TreeNode)p.next; // unlink traversal pointers
+ TreeNode pred = p.prev;
+ if (pred == null)
+ first = next;
+ else
+ pred.next = next;
+ if (next != null)
+ next.prev = pred;
+ TreeNode replacement;
+ TreeNode pl = p.left;
+ TreeNode pr = p.right;
+ if (pl != null && pr != null) {
+ TreeNode s = pr, sl;
+ while ((sl = s.left) != null) // find successor
+ s = sl;
+ boolean c = s.red; s.red = p.red; p.red = c; // swap colors
+ TreeNode sr = s.right;
+ TreeNode pp = p.parent;
+ if (s == pr) { // p was s's direct parent
+ p.parent = s;
+ s.right = p;
+ }
+ else {
+ TreeNode sp = s.parent;
+ if ((p.parent = sp) != null) {
+ if (s == sp.left)
+ sp.left = p;
+ else
+ sp.right = p;
+ }
+ if ((s.right = pr) != null)
+ pr.parent = s;
+ }
+ p.left = null;
+ if ((p.right = sr) != null)
+ sr.parent = p;
+ if ((s.left = pl) != null)
+ pl.parent = s;
+ if ((s.parent = pp) == null)
+ root = s;
+ else if (p == pp.left)
+ pp.left = s;
+ else
+ pp.right = s;
+ replacement = sr;
+ }
+ else
+ replacement = (pl != null) ? pl : pr;
+ TreeNode pp = p.parent;
+ if (replacement == null) {
+ if (pp == null) {
+ root = null;
+ return;
+ }
+ replacement = p;
}
- else if (e.hash < 0)
- tab = (Node[])e.key;
else {
- boolean validated = false;
- boolean checkSize = false;
- synchronized(e) {
- Node first = e;
- for (;;) {
- Object ek, ev;
- if ((ev = e.val) == null)
- break;
- if (e.hash == h && (ek = e.key) != null &&
- (k == ek || k.equals(ek))) {
- if (tabAt(tab, i) == first) {
- validated = true;
- oldVal = ev;
- if (replace)
- e.val = v;
+ replacement.parent = pp;
+ if (pp == null)
+ root = replacement;
+ else if (p == pp.left)
+ pp.left = replacement;
+ else
+ pp.right = replacement;
+ p.left = p.right = p.parent = null;
+ }
+ if (!p.red) { // rebalance, from CLR
+ TreeNode x = replacement;
+ while (x != null) {
+ TreeNode xp, xpl;
+ if (x.red || (xp = x.parent) == null) {
+ x.red = false;
+ break;
+ }
+ if (x == (xpl = xp.left)) {
+ TreeNode sib = xp.right;
+ if (sib != null && sib.red) {
+ sib.red = false;
+ xp.red = true;
+ rotateLeft(xp);
+ sib = (xp = x.parent) == null ? null : xp.right;
+ }
+ if (sib == null)
+ x = xp;
+ else {
+ TreeNode sl = sib.left, sr = sib.right;
+ if ((sr == null || !sr.red) &&
+ (sl == null || !sl.red)) {
+ sib.red = true;
+ x = xp;
+ }
+ else {
+ if (sr == null || !sr.red) {
+ if (sl != null)
+ sl.red = false;
+ sib.red = true;
+ rotateRight(sib);
+ sib = (xp = x.parent) == null ? null : xp.right;
+ }
+ if (sib != null) {
+ sib.red = (xp == null)? false : xp.red;
+ if ((sr = sib.right) != null)
+ sr.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ rotateLeft(xp);
+ }
+ x = root;
}
- break;
}
- Node last = e;
- if ((e = e.next) == null) {
- if (tabAt(tab, i) == first) {
- validated = true;
- last.next = new Node(h, k, v, null);
- if (last != first || tab.length <= 64)
- checkSize = true;
+ }
+ else { // symmetric
+ TreeNode sib = xpl;
+ if (sib != null && sib.red) {
+ sib.red = false;
+ xp.red = true;
+ rotateRight(xp);
+ sib = (xp = x.parent) == null ? null : xp.left;
+ }
+ if (sib == null)
+ x = xp;
+ else {
+ TreeNode sl = sib.left, sr = sib.right;
+ if ((sl == null || !sl.red) &&
+ (sr == null || !sr.red)) {
+ sib.red = true;
+ x = xp;
+ }
+ else {
+ if (sl == null || !sl.red) {
+ if (sr != null)
+ sr.red = false;
+ sib.red = true;
+ rotateLeft(sib);
+ sib = (xp = x.parent) == null ? null : xp.left;
+ }
+ if (sib != null) {
+ sib.red = (xp == null)? false : xp.red;
+ if ((sl = sib.left) != null)
+ sl.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ rotateRight(xp);
+ }
+ x = root;
}
- break;
}
}
}
- if (validated) {
- if (checkSize && tab.length < MAXIMUM_CAPACITY &&
- resizing == 0 && counter.sum() >= threshold)
- grow(0);
- break;
+ }
+ if (p == replacement && (pp = p.parent) != null) {
+ if (p == pp.left) // detach pointers
+ pp.left = null;
+ else if (p == pp.right)
+ pp.right = null;
+ p.parent = null;
+ }
+ }
+ }
+
+ /* ---------------- Collision reduction methods -------------- */
+
+ /**
+ * Spreads higher bits to lower, and also forces top 2 bits to 0.
+ * Because the table uses power-of-two masking, sets of hashes
+ * that vary only in bits above the current mask will always
+ * collide. (Among known examples are sets of Float keys holding
+ * consecutive whole numbers in small tables.) To counter this,
+ * we apply a transform that spreads the impact of higher bits
+ * downward. There is a tradeoff between speed, utility, and
+ * quality of bit-spreading. Because many common sets of hashes
+ * are already reasonably distributed across bits (so don't benefit
+ * from spreading), and because we use trees to handle large sets
+ * of collisions in bins, we don't need excessively high quality.
+ */
+ private static final int spread(int h) {
+ h ^= (h >>> 18) ^ (h >>> 12);
+ return (h ^ (h >>> 10)) & HASH_BITS;
+ }
+
+ /**
+ * Replaces a list bin with a tree bin. Call only when locked.
+ * Fails to replace if the given key is non-comparable or table
+ * is, or needs, resizing.
+ */
+ private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
+ if ((key instanceof Comparable) &&
+ (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
+ TreeBin t = new TreeBin();
+ for (Node e = tabAt(tab, index); e != null; e = e.next)
+ t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
+ setTabAt(tab, index, new Node(MOVED, t, null, null));
+ }
+ }
+
+ /* ---------------- Internal access and update methods -------------- */
+
+ /** Implementation for get and containsKey */
+ private final Object internalGet(Object k) {
+ int h = spread(k.hashCode());
+ retry: for (Node[] tab = table; tab != null;) {
+ Node e, p; Object ek, ev; int eh; // locals to read fields once
+ for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
+ if ((eh = e.hash) == MOVED) {
+ if ((ek = e.key) instanceof TreeBin) // search TreeBin
+ return ((TreeBin)ek).getValue(h, k);
+ else { // restart with new table
+ tab = (Node[])ek;
+ continue retry;
+ }
}
+ else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek)))
+ return ev;
}
+ break;
}
- if (oldVal == null)
- counter.increment();
- return oldVal;
+ return null;
}
/**
- * Covers the four public remove/replace methods: Replaces node
- * value with v, conditional upon match of cv if non-null. If
- * resulting value is null, delete.
+ * Implementation for the four public remove/replace methods:
+ * Replaces node value with v, conditional upon match of cv if
+ * non-null. If resulting value is null, delete.
*/
private final Object internalReplace(Object k, Object v, Object cv) {
int h = spread(k.hashCode());
Object oldVal = null;
- Node e; int i;
- Node[] tab = table;
- while (tab != null &&
- (e = tabAt(tab, i = (tab.length - 1) & h)) != null) {
- if (e.hash < 0)
- tab = (Node[])e.key;
- else {
+ for (Node[] tab = table;;) {
+ Node f; int i, fh; Object fk;
+ if (tab == null ||
+ (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
+ break;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ boolean validated = false;
+ boolean deleted = false;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ TreeNode p = t.getTreeNode(h, k, t.root);
+ if (p != null) {
+ Object pv = p.val;
+ if (cv == null || cv == pv || cv.equals(pv)) {
+ oldVal = pv;
+ if ((p.val = v) == null) {
+ deleted = true;
+ t.deleteTreeNode(p);
+ }
+ }
+ }
+ }
+ } finally {
+ t.release(0);
+ }
+ if (validated) {
+ if (deleted)
+ counter.add(-1L);
+ break;
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
+ else if ((fh & HASH_BITS) != h && f.next == null) // precheck
+ break; // rules out possible existence
+ else if ((fh & LOCKED) != 0) {
+ checkForResize(); // try resizing if can't get lock
+ f.tryAwaitLock(tab, i);
+ }
+ else if (f.casHash(fh, fh | LOCKED)) {
boolean validated = false;
boolean deleted = false;
- synchronized(e) {
- Node pred = null;
- Node first = e;
- for (;;) {
- Object ek, ev;
- if ((ev = e.val) == null)
- break;
- if (e.hash == h && (ek = e.key) != null &&
- (k == ek || k.equals(ek))) {
- if (tabAt(tab, i) == first) {
- validated = true;
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ for (Node e = f, pred = null;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ ((ev = e.val) != null) &&
+ ((ek = e.key) == k || k.equals(ek))) {
if (cv == null || cv == ev || cv.equals(ev)) {
oldVal = ev;
if ((e.val = v) == null) {
@@ -460,20 +1213,22 @@ public class ConcurrentHashMapV8
setTabAt(tab, i, en);
}
}
+ break;
}
- break;
- }
- pred = e;
- if ((e = e.next) == null) {
- if (tabAt(tab, i) == first)
- validated = true;
- break;
+ pred = e;
+ if ((e = e.next) == null)
+ break;
}
}
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
}
if (validated) {
if (deleted)
- counter.decrement();
+ counter.add(-1L);
break;
}
}
@@ -481,509 +1236,1138 @@ public class ConcurrentHashMapV8
return oldVal;
}
- /** Implementation for computeIfAbsent and compute */
- @SuppressWarnings("unchecked")
- private final V internalCompute(K k,
- MappingFunction super K, ? extends V> f,
- boolean replace) {
+ /*
+ * Internal versions of the five insertion methods, each a
+ * little more complicated than the last. All have
+ * the same basic structure as the first (internalPut):
+ * 1. If table uninitialized, create
+ * 2. If bin empty, try to CAS new node
+ * 3. If bin stale, use new table
+ * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
+ * 5. Lock and validate; if valid, scan and add or update
+ *
+ * The others interweave other checks and/or alternative actions:
+ * * Plain put checks for and performs resize after insertion.
+ * * putIfAbsent prescans for mapping without lock (and fails to add
+ * if present), which also makes pre-emptive resize checks worthwhile.
+ * * computeIfAbsent extends form used in putIfAbsent with additional
+ * mechanics to deal with, calls, potential exceptions and null
+ * returns from function call.
+ * * compute uses the same function-call mechanics, but without
+ * the prescans
+ * * putAll attempts to pre-allocate enough table space
+ * and more lazily performs count updates and checks.
+ *
+ * Someday when details settle down a bit more, it might be worth
+ * some factoring to reduce sprawl.
+ */
+
+ /** Implementation for put */
+ private final Object internalPut(Object k, Object v) {
int h = spread(k.hashCode());
- V val = null;
- boolean added = false;
- boolean validated = false;
- Node[] tab = table;
- do {
- Node e; int i;
+ int count = 0;
+ for (Node[] tab = table;;) {
+ int i; Node f; int fh; Object fk;
if (tab == null)
- tab = grow(0);
- else if ((e = tabAt(tab, i = (tab.length - 1) & h)) == null) {
- Node node = new Node(h, k, null, null);
- synchronized(node) {
- if (casTabAt(tab, i, null, node)) {
- validated = true;
- try {
- val = f.map(k);
- if (val != null) {
- node.val = val;
- added = true;
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
+ break; // no lock when adding to empty bin
+ }
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ Object oldVal = null;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 2;
+ TreeNode p = t.putTreeNode(h, k, v);
+ if (p != null) {
+ oldVal = p.val;
+ p.val = v;
+ }
+ }
+ } finally {
+ t.release(0);
+ }
+ if (count != 0) {
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
+ else if ((fh & LOCKED) != 0) {
+ checkForResize();
+ f.tryAwaitLock(tab, i);
+ }
+ else if (f.casHash(fh, fh | LOCKED)) {
+ Object oldVal = null;
+ try { // needed in case equals() throws
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ for (Node e = f;; ++count) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ oldVal = ev;
+ e.val = v;
+ break;
+ }
+ Node last = e;
+ if ((e = e.next) == null) {
+ last.next = new Node(h, k, v, null);
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ break;
}
- } finally {
- if (!added)
- setTabAt(tab, i, null);
}
}
+ } finally { // unlock and signal if needed
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (count != 0) {
+ if (oldVal != null)
+ return oldVal;
+ if (tab.length <= 64)
+ count = 2;
+ break;
}
}
- else if (e.hash < 0)
- tab = (Node[])e.key;
+ }
+ counter.add(1L);
+ if (count > 1)
+ checkForResize();
+ return null;
+ }
+
+ /** Implementation for putIfAbsent */
+ private final Object internalPutIfAbsent(Object k, Object v) {
+ int h = spread(k.hashCode());
+ int count = 0;
+ for (Node[] tab = table;;) {
+ int i; Node f; int fh; Object fk, fv;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
+ break;
+ }
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ Object oldVal = null;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 2;
+ TreeNode p = t.putTreeNode(h, k, v);
+ if (p != null)
+ oldVal = p.val;
+ }
+ } finally {
+ t.release(0);
+ }
+ if (count != 0) {
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
+ ((fk = f.key) == k || k.equals(fk)))
+ return fv;
else {
- boolean checkSize = false;
- synchronized(e) {
- Node first = e;
- for (;;) {
+ Node g = f.next;
+ if (g != null) { // at least 2 nodes -- search and maybe resize
+ for (Node e = g;;) {
Object ek, ev;
- if ((ev = e.val) == null)
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek)))
+ return ev;
+ if ((e = e.next) == null) {
+ checkForResize();
break;
- if (e.hash == h && (ek = e.key) != null &&
- (k == ek || k.equals(ek))) {
- if (tabAt(tab, i) == first) {
- validated = true;
- if (replace && (ev = f.map(k)) != null)
- e.val = ev;
- val = (V)ev;
+ }
+ }
+ }
+ if (((fh = f.hash) & LOCKED) != 0) {
+ checkForResize();
+ f.tryAwaitLock(tab, i);
+ }
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
+ Object oldVal = null;
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ for (Node e = f;; ++count) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ oldVal = ev;
+ break;
+ }
+ Node last = e;
+ if ((e = e.next) == null) {
+ last.next = new Node(h, k, v, null);
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ break;
+ }
+ }
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (count != 0) {
+ if (oldVal != null)
+ return oldVal;
+ if (tab.length <= 64)
+ count = 2;
+ break;
+ }
+ }
+ }
+ }
+ counter.add(1L);
+ if (count > 1)
+ checkForResize();
+ return null;
+ }
+
+ /** Implementation for computeIfAbsent */
+ private final Object internalComputeIfAbsent(K k,
+ MappingFunction super K, ?> mf) {
+ int h = spread(k.hashCode());
+ Object val = null;
+ int count = 0;
+ for (Node[] tab = table;;) {
+ Node f; int i, fh; Object fk, fv;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ Node node = new Node(fh = h | LOCKED, k, null, null);
+ if (casTabAt(tab, i, null, node)) {
+ count = 1;
+ try {
+ if ((val = mf.map(k)) != null)
+ node.val = val;
+ } finally {
+ if (val == null)
+ setTabAt(tab, i, null);
+ if (!node.casHash(fh, h)) {
+ node.hash = h;
+ synchronized (node) { node.notifyAll(); };
+ }
+ }
+ }
+ if (count != 0)
+ break;
+ }
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ boolean added = false;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ TreeNode p = t.getTreeNode(h, k, t.root);
+ if (p != null)
+ val = p.val;
+ else if ((val = mf.map(k)) != null) {
+ added = true;
+ count = 2;
+ t.putTreeNode(h, k, val);
}
- break;
}
- Node last = e;
+ } finally {
+ t.release(0);
+ }
+ if (count != 0) {
+ if (!added)
+ return val;
+ break;
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
+ ((fk = f.key) == k || k.equals(fk)))
+ return fv;
+ else {
+ Node g = f.next;
+ if (g != null) {
+ for (Node e = g;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek)))
+ return ev;
if ((e = e.next) == null) {
- if (tabAt(tab, i) == first) {
- validated = true;
- if ((val = f.map(k)) != null) {
- last.next = new Node(h, k, val, null);
- added = true;
- if (last != first || tab.length <= 64)
- checkSize = true;
+ checkForResize();
+ break;
+ }
+ }
+ }
+ if (((fh = f.hash) & LOCKED) != 0) {
+ checkForResize();
+ f.tryAwaitLock(tab, i);
+ }
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
+ boolean added = false;
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ for (Node e = f;; ++count) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ val = ev;
+ break;
+ }
+ Node last = e;
+ if ((e = e.next) == null) {
+ if ((val = mf.map(k)) != null) {
+ added = true;
+ last.next = new Node(h, k, val, null);
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ }
+ break;
}
}
- break;
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
}
}
+ if (count != 0) {
+ if (!added)
+ return val;
+ if (tab.length <= 64)
+ count = 2;
+ break;
+ }
}
- if (checkSize && tab.length < MAXIMUM_CAPACITY &&
- resizing == 0 && counter.sum() >= threshold)
- grow(0);
- }
- } while (!validated);
- if (added)
- counter.increment();
+ }
+ }
+ if (val != null) {
+ counter.add(1L);
+ if (count > 1)
+ checkForResize();
+ }
return val;
}
- /*
- * Reclassifies nodes in each bin to new table. Because we are
- * using power-of-two expansion, the elements from each bin must
- * either stay at same index, or move with a power of two
- * offset. We eliminate unnecessary node creation by catching
- * cases where old nodes can be reused because their next fields
- * won't change. Statistically, at the default threshold, only
- * about one-sixth of them need cloning when a table doubles. The
- * nodes they replace will be garbage collectable as soon as they
- * are no longer referenced by any reader thread that may be in
- * the midst of concurrently traversing table.
- *
- * Transfers are done from the bottom up to preserve iterator
- * traversability. On each step, the old bin is locked,
- * moved/copied, and then replaced with a forwarding node.
- */
- private static final void transfer(Node[] tab, Node[] nextTab) {
- int n = tab.length;
- int mask = nextTab.length - 1;
- Node fwd = new Node(MOVED, nextTab, null, null);
- for (int i = n - 1; i >= 0; --i) {
- for (Node e;;) {
- if ((e = tabAt(tab, i)) == null) {
- if (casTabAt(tab, i, e, fwd))
+ /** Implementation for compute */
+ @SuppressWarnings("unchecked")
+ private final Object internalCompute(K k,
+ RemappingFunction super K, V> mf) {
+ int h = spread(k.hashCode());
+ Object val = null;
+ int delta = 0;
+ int count = 0;
+ for (Node[] tab = table;;) {
+ Node f; int i, fh; Object fk;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ Node node = new Node(fh = h | LOCKED, k, null, null);
+ if (casTabAt(tab, i, null, node)) {
+ try {
+ count = 1;
+ if ((val = mf.remap(k, null)) != null) {
+ node.val = val;
+ delta = 1;
+ }
+ } finally {
+ if (delta == 0)
+ setTabAt(tab, i, null);
+ if (!node.casHash(fh, h)) {
+ node.hash = h;
+ synchronized (node) { node.notifyAll(); };
+ }
+ }
+ }
+ if (count != 0)
+ break;
+ }
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ TreeNode p = t.getTreeNode(h, k, t.root);
+ Object pv = (p == null) ? null : p.val;
+ if ((val = mf.remap(k, (V)pv)) != null) {
+ if (p != null)
+ p.val = val;
+ else {
+ count = 2;
+ delta = 1;
+ t.putTreeNode(h, k, val);
+ }
+ }
+ else if (p != null) {
+ delta = -1;
+ t.deleteTreeNode(p);
+ }
+ }
+ } finally {
+ t.release(0);
+ }
+ if (count != 0)
break;
}
- else {
- boolean validated = false;
- synchronized(e) {
- int idx = e.hash & mask;
- Node lastRun = e;
- for (Node p = e.next; p != null; p = p.next) {
- int j = p.hash & mask;
- if (j != idx) {
- idx = j;
- lastRun = p;
+ else
+ tab = (Node[])fk;
+ }
+ else if ((fh & LOCKED) != 0) {
+ checkForResize();
+ f.tryAwaitLock(tab, i);
+ }
+ else if (f.casHash(fh, fh | LOCKED)) {
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ for (Node e = f, pred = null;; ++count) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ val = mf.remap(k, (V)ev);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null) {
+ if ((val = mf.remap(k, null)) != null) {
+ pred.next = new Node(h, k, val, null);
+ delta = 1;
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ }
+ break;
}
}
- if (tabAt(tab, i) == e) {
- validated = true;
- relaxedSetTabAt(nextTab, idx, lastRun);
- for (Node p = e; p != lastRun; p = p.next) {
- int h = p.hash;
- int j = h & mask;
- Node r = relaxedTabAt(nextTab, j);
- relaxedSetTabAt(nextTab, j,
- new Node(h, p.key, p.val, r));
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (count != 0) {
+ if (tab.length <= 64)
+ count = 2;
+ break;
+ }
+ }
+ }
+ if (delta != 0) {
+ counter.add((long)delta);
+ if (count > 1)
+ checkForResize();
+ }
+ return val;
+ }
+
+ /** Implementation for putAll */
+ private final void internalPutAll(Map, ?> m) {
+ tryPresize(m.size());
+ long delta = 0L; // number of uncommitted additions
+ boolean npe = false; // to throw exception on exit for nulls
+ try { // to clean up counts on other exceptions
+ for (Map.Entry, ?> entry : m.entrySet()) {
+ Object k, v;
+ if (entry == null || (k = entry.getKey()) == null ||
+ (v = entry.getValue()) == null) {
+ npe = true;
+ break;
+ }
+ int h = spread(k.hashCode());
+ for (Node[] tab = table;;) {
+ int i; Node f; int fh; Object fk;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
+ if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
+ ++delta;
+ break;
+ }
+ }
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ boolean validated = false;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ TreeNode p = t.getTreeNode(h, k, t.root);
+ if (p != null)
+ p.val = v;
+ else {
+ t.putTreeNode(h, k, v);
+ ++delta;
+ }
+ }
+ } finally {
+ t.release(0);
}
- setTabAt(tab, i, fwd);
+ if (validated)
+ break;
+ }
+ else
+ tab = (Node[])fk;
+ }
+ else if ((fh & LOCKED) != 0) {
+ counter.add(delta);
+ delta = 0L;
+ checkForResize();
+ f.tryAwaitLock(tab, i);
+ }
+ else if (f.casHash(fh, fh | LOCKED)) {
+ int count = 0;
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ for (Node e = f;; ++count) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ e.val = v;
+ break;
+ }
+ Node last = e;
+ if ((e = e.next) == null) {
+ ++delta;
+ last.next = new Node(h, k, v, null);
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ break;
+ }
+ }
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (count != 0) {
+ if (count > 1) {
+ counter.add(delta);
+ delta = 0L;
+ checkForResize();
+ }
+ break;
}
}
- if (validated)
- break;
}
}
+ } finally {
+ if (delta != 0)
+ counter.add(delta);
}
+ if (npe)
+ throw new NullPointerException();
+ }
+
+ /* ---------------- Table Initialization and Resizing -------------- */
+
+ /**
+ * Returns a power of two table size for the given desired capacity.
+ * See Hackers Delight, sec 3.2
+ */
+ private static final int tableSizeFor(int c) {
+ int n = c - 1;
+ n |= n >>> 1;
+ n |= n >>> 2;
+ n |= n >>> 4;
+ n |= n >>> 8;
+ n |= n >>> 16;
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
/**
- * If not already resizing, initializes or creates next table and
- * transfers bins. Rechecks occupancy after a transfer to see if
- * another resize is already needed because resizings are lagging
- * additions.
- *
- * @param sizeHint overridden capacity target (nonzero only from putAll)
- * @return current table
- */
- private final Node[] grow(int sizeHint) {
- if (resizing == 0 &&
- UNSAFE.compareAndSwapInt(this, resizingOffset, 0, 1)) {
- try {
- for (;;) {
- int cap, n;
- Node[] tab = table;
- if (tab == null) {
- int c = initCap;
- if (c < sizeHint)
- c = sizeHint;
- if (c == DEFAULT_CAPACITY)
- cap = c;
- else if (c >= MAXIMUM_CAPACITY)
- cap = MAXIMUM_CAPACITY;
- else {
- cap = MINIMUM_CAPACITY;
- while (cap < c)
- cap <<= 1;
- }
+ * Initializes table, using the size recorded in sizeCtl.
+ */
+ private final Node[] initTable() {
+ Node[] tab; int sc;
+ while ((tab = table) == null) {
+ if ((sc = sizeCtl) < 0)
+ Thread.yield(); // lost initialization race; just spin
+ else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if ((tab = table) == null) {
+ int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
+ tab = table = new Node[n];
+ sc = n - (n >>> 2);
}
- else if ((n = tab.length) < MAXIMUM_CAPACITY &&
- (sizeHint <= 0 || n < sizeHint))
- cap = n << 1;
- else
- break;
- threshold = (int)(cap * loadFactor) - THRESHOLD_OFFSET;
- Node[] nextTab = new Node[cap];
- if (tab != null)
- transfer(tab, nextTab);
- table = nextTab;
- if (tab == null || cap >= MAXIMUM_CAPACITY ||
- (sizeHint > 0 && cap >= sizeHint) ||
- counter.sum() < threshold)
- break;
+ } finally {
+ sizeCtl = sc;
}
- } finally {
- resizing = 0;
+ break;
}
}
- else if (table == null)
- Thread.yield(); // lost initialization race; just spin
- return table;
+ return tab;
}
/**
- * Implementation for putAll and constructor with Map
- * argument. Tries to first override initial capacity or grow
- * based on map size to pre-allocate table space.
+ * If table is too small and not already resizing, creates next
+ * table and transfers bins. Rechecks occupancy after a transfer
+ * to see if another resize is already needed because resizings
+ * are lagging additions.
*/
- private final void internalPutAll(Map extends K, ? extends V> m) {
- int s = m.size();
- grow((s >= (MAXIMUM_CAPACITY >>> 1))? s : s + (s >>> 1));
- for (Map.Entry extends K, ? extends V> e : m.entrySet()) {
- Object k = e.getKey();
- Object v = e.getValue();
- if (k == null || v == null)
- throw new NullPointerException();
- internalPut(k, v, true);
+ private final void checkForResize() {
+ Node[] tab; int n, sc;
+ while ((tab = table) != null &&
+ (n = tab.length) < MAXIMUM_CAPACITY &&
+ (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
+ UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if (tab == table) {
+ table = rebuild(tab);
+ sc = (n << 1) - (n >>> 1);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
}
}
/**
- * Implementation for clear. Steps through each bin, removing all nodes.
+ * Tries to presize table to accommodate the given number of elements.
+ *
+ * @param size number of elements (doesn't need to be perfectly accurate)
*/
- private final void internalClear() {
- long deletions = 0L;
- int i = 0;
- Node[] tab = table;
- while (tab != null && i < tab.length) {
- Node e = tabAt(tab, i);
- if (e == null)
- ++i;
- else if (e.hash < 0)
- tab = (Node[])e.key;
- else {
+ private final void tryPresize(int size) {
+ int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
+ tableSizeFor(size + (size >>> 1) + 1);
+ int sc;
+ while ((sc = sizeCtl) >= 0) {
+ Node[] tab = table; int n;
+ if (tab == null || (n = tab.length) == 0) {
+ n = (sc > c) ? sc : c;
+ if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if (table == tab) {
+ table = new Node[n];
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ }
+ }
+ else if (c <= sc || n >= MAXIMUM_CAPACITY)
+ break;
+ else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if (table == tab) {
+ table = rebuild(tab);
+ sc = (n << 1) - (n >>> 1);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ }
+ }
+ }
+
+ /*
+ * Moves and/or copies the nodes in each bin to new table. See
+ * above for explanation.
+ *
+ * @return the new table
+ */
+ private static final Node[] rebuild(Node[] tab) {
+ int n = tab.length;
+ Node[] nextTab = new Node[n << 1];
+ Node fwd = new Node(MOVED, nextTab, null, null);
+ int[] buffer = null; // holds bins to revisit; null until needed
+ Node rev = null; // reverse forwarder; null until needed
+ int nbuffered = 0; // the number of bins in buffer list
+ int bufferIndex = 0; // buffer index of current buffered bin
+ int bin = n - 1; // current non-buffered bin or -1 if none
+
+ for (int i = bin;;) { // start upwards sweep
+ int fh; Node f;
+ if ((f = tabAt(tab, i)) == null) {
+ if (bin >= 0) { // no lock needed (or available)
+ if (!casTabAt(tab, i, f, fwd))
+ continue;
+ }
+ else { // transiently use a locked forwarding node
+ Node g = new Node(MOVED|LOCKED, nextTab, null, null);
+ if (!casTabAt(tab, i, f, g))
+ continue;
+ setTabAt(nextTab, i, null);
+ setTabAt(nextTab, i + n, null);
+ setTabAt(tab, i, fwd);
+ if (!g.casHash(MOVED|LOCKED, MOVED)) {
+ g.hash = MOVED;
+ synchronized (g) { g.notifyAll(); }
+ }
+ }
+ }
+ else if ((fh = f.hash) == MOVED) {
+ Object fk = f.key;
+ if (fk instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ boolean validated = false;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ splitTreeBin(nextTab, i, t);
+ setTabAt(tab, i, fwd);
+ }
+ } finally {
+ t.release(0);
+ }
+ if (!validated)
+ continue;
+ }
+ }
+ else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
boolean validated = false;
- synchronized(e) {
- if (tabAt(tab, i) == e) {
+ try { // split to lo and hi lists; copying as needed
+ if (tabAt(tab, i) == f) {
validated = true;
- do {
- if (e.val != null) {
- e.val = null;
- ++deletions;
- }
- } while ((e = e.next) != null);
- setTabAt(tab, i, null);
+ splitBin(nextTab, i, f);
+ setTabAt(tab, i, fwd);
}
- }
- if (validated) {
- ++i;
- if (deletions > THRESHOLD_OFFSET) { // bound lag in counts
- counter.add(-deletions);
- deletions = 0L;
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
}
}
+ if (!validated)
+ continue;
}
+ else {
+ if (buffer == null) // initialize buffer for revisits
+ buffer = new int[TRANSFER_BUFFER_SIZE];
+ if (bin < 0 && bufferIndex > 0) {
+ int j = buffer[--bufferIndex];
+ buffer[bufferIndex] = i;
+ i = j; // swap with another bin
+ continue;
+ }
+ if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
+ f.tryAwaitLock(tab, i);
+ continue; // no other options -- block
+ }
+ if (rev == null) // initialize reverse-forwarder
+ rev = new Node(MOVED, tab, null, null);
+ if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
+ continue; // recheck before adding to list
+ buffer[nbuffered++] = i;
+ setTabAt(nextTab, i, rev); // install place-holders
+ setTabAt(nextTab, i + n, rev);
+ }
+
+ if (bin > 0)
+ i = --bin;
+ else if (buffer != null && nbuffered > 0) {
+ bin = -1;
+ i = buffer[bufferIndex = --nbuffered];
+ }
+ else
+ return nextTab;
}
- if (deletions != 0L)
- counter.add(-deletions);
}
/**
- * Base class for key, value, and entry iterators, plus internal
- * implementations of public traversal-based methods, to avoid
- * duplicating traversal code.
+ * Split a normal bin with list headed by e into lo and hi parts;
+ * install in given table
*/
- class HashIterator {
- private Node next; // the next entry to return
- private Node[] tab; // current table; updated if resized
- private Node lastReturned; // the last entry returned, for remove
- private Object nextVal; // cached value of next
- private int index; // index of bin to use next
- private int baseIndex; // current index of initial table
- private final int baseSize; // initial table size
+ private static void splitBin(Node[] nextTab, int i, Node e) {
+ int bit = nextTab.length >>> 1; // bit to split on
+ int runBit = e.hash & bit;
+ Node lastRun = e, lo = null, hi = null;
+ for (Node p = e.next; p != null; p = p.next) {
+ int b = p.hash & bit;
+ if (b != runBit) {
+ runBit = b;
+ lastRun = p;
+ }
+ }
+ if (runBit == 0)
+ lo = lastRun;
+ else
+ hi = lastRun;
+ for (Node p = e; p != lastRun; p = p.next) {
+ int ph = p.hash & HASH_BITS;
+ Object pk = p.key, pv = p.val;
+ if ((ph & bit) == 0)
+ lo = new Node(ph, pk, pv, lo);
+ else
+ hi = new Node(ph, pk, pv, hi);
+ }
+ setTabAt(nextTab, i, lo);
+ setTabAt(nextTab, i + bit, hi);
+ }
- HashIterator() {
- Node[] t = tab = table;
- if (t == null)
- baseSize = 0;
+ /**
+ * Split a tree bin into lo and hi parts; install in given table
+ */
+ private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
+ int bit = nextTab.length >>> 1;
+ TreeBin lt = new TreeBin();
+ TreeBin ht = new TreeBin();
+ int lc = 0, hc = 0;
+ for (Node e = t.first; e != null; e = e.next) {
+ int h = e.hash & HASH_BITS;
+ Object k = e.key, v = e.val;
+ if ((h & bit) == 0) {
+ ++lc;
+ lt.putTreeNode(h, k, v);
+ }
else {
- baseSize = t.length;
- advance(null);
+ ++hc;
+ ht.putTreeNode(h, k, v);
}
}
+ Node ln, hn; // throw away trees if too small
+ if (lc <= (TREE_THRESHOLD >>> 1)) {
+ ln = null;
+ for (Node p = lt.first; p != null; p = p.next)
+ ln = new Node(p.hash, p.key, p.val, ln);
+ }
+ else
+ ln = new Node(MOVED, lt, null, null);
+ setTabAt(nextTab, i, ln);
+ if (hc <= (TREE_THRESHOLD >>> 1)) {
+ hn = null;
+ for (Node p = ht.first; p != null; p = p.next)
+ hn = new Node(p.hash, p.key, p.val, hn);
+ }
+ else
+ hn = new Node(MOVED, ht, null, null);
+ setTabAt(nextTab, i + bit, hn);
+ }
- public final boolean hasNext() { return next != null; }
- public final boolean hasMoreElements() { return next != null; }
-
- /**
- * Advances next. Normally, iteration proceeds bin-by-bin
- * traversing lists. However, if the table has been resized,
- * then all future steps must traverse both the bin at the
- * current index as well as at (index + baseSize); and so on
- * for further resizings. To paranoically cope with potential
- * (improper) sharing of iterators across threads, table reads
- * are bounds-checked.
- */
- final void advance(Node e) {
- for (;;) {
- Node[] t; int i; // for bounds checks
- if (e != null) {
- Object ek = e.key, ev = e.val;
- if (ev != null && ek != null) {
- nextVal = ev;
- next = e;
- break;
+ /**
+ * Implementation for clear. Steps through each bin, removing all
+ * nodes.
+ */
+ private final void internalClear() {
+ long delta = 0L; // negative number of deletions
+ int i = 0;
+ Node[] tab = table;
+ while (tab != null && i < tab.length) {
+ int fh; Object fk;
+ Node f = tabAt(tab, i);
+ if (f == null)
+ ++i;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ for (Node p = t.first; p != null; p = p.next) {
+ p.val = null;
+ --delta;
+ }
+ t.first = null;
+ t.root = null;
+ ++i;
+ }
+ } finally {
+ t.release(0);
}
- e = e.next;
}
- else if (baseIndex < baseSize && (t = tab) != null &&
- t.length > (i = index) && i >= 0) {
- if ((e = tabAt(t, i)) != null && e.hash < 0) {
- tab = (Node[])e.key;
- e = null;
+ else
+ tab = (Node[])fk;
+ }
+ else if ((fh & LOCKED) != 0) {
+ counter.add(delta); // opportunistically update count
+ delta = 0L;
+ f.tryAwaitLock(tab, i);
+ }
+ else if (f.casHash(fh, fh | LOCKED)) {
+ try {
+ if (tabAt(tab, i) == f) {
+ for (Node e = f; e != null; e = e.next) {
+ e.val = null;
+ --delta;
+ }
+ setTabAt(tab, i, null);
+ ++i;
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
}
- else if (i + baseSize < t.length)
- index += baseSize; // visit forwarded upper slots
- else
- index = ++baseIndex;
- }
- else {
- next = null;
- break;
}
}
}
+ if (delta != 0)
+ counter.add(delta);
+ }
- final Object nextKey() {
- Node e = next;
- if (e == null)
- throw new NoSuchElementException();
- Object k = e.key;
- advance((lastReturned = e).next);
- return k;
- }
+ /* ----------------Table Traversal -------------- */
- final Object nextValue() {
- Node e = next;
- if (e == null)
- throw new NoSuchElementException();
- Object v = nextVal;
- advance((lastReturned = e).next);
- return v;
+ /**
+ * Encapsulates traversal for methods such as containsValue; also
+ * serves as a base class for other iterators.
+ *
+ * At each step, the iterator snapshots the key ("nextKey") and
+ * value ("nextVal") of a valid node (i.e., one that, at point of
+ * snapshot, has a non-null user value). Because val fields can
+ * change (including to null, indicating deletion), field nextVal
+ * might not be accurate at point of use, but still maintains the
+ * weak consistency property of holding a value that was once
+ * valid.
+ *
+ * Internal traversals directly access these fields, as in:
+ * {@code while (it.advance() != null) { process(it.nextKey); }}
+ *
+ * Exported iterators must track whether the iterator has advanced
+ * (in hasNext vs next) (by setting/checking/nulling field
+ * nextVal), and then extract key, value, or key-value pairs as
+ * return values of next().
+ *
+ * The iterator visits once each still-valid node that was
+ * reachable upon iterator construction. It might miss some that
+ * were added to a bin after the bin was visited, which is OK wrt
+ * consistency guarantees. Maintaining this property in the face
+ * of possible ongoing resizes requires a fair amount of
+ * bookkeeping state that is difficult to optimize away amidst
+ * volatile accesses. Even so, traversal maintains reasonable
+ * throughput.
+ *
+ * Normally, iteration proceeds bin-by-bin traversing lists.
+ * However, if the table has been resized, then all future steps
+ * must traverse both the bin at the current index as well as at
+ * (index + baseSize); and so on for further resizings. To
+ * paranoically cope with potential sharing by users of iterators
+ * across threads, iteration terminates if a bounds checks fails
+ * for a table read.
+ */
+ static class InternalIterator {
+ final ConcurrentHashMapV8 map;
+ Node next; // the next entry to use
+ Node last; // the last entry used
+ Object nextKey; // cached key field of next
+ Object nextVal; // cached val field of next
+ Node[] tab; // current table; updated if resized
+ int index; // index of bin to use next
+ int baseIndex; // current index of initial table
+ int baseLimit; // index bound for initial table
+ final int baseSize; // initial table size
+
+ /** Creates iterator for all entries in the table. */
+ InternalIterator(ConcurrentHashMapV8 map) {
+ this.tab = (this.map = map).table;
+ baseLimit = baseSize = (tab == null) ? 0 : tab.length;
+ }
+
+ /** Creates iterator for clone() and split() methods */
+ InternalIterator(InternalIterator it, boolean split) {
+ this.map = it.map;
+ this.tab = it.tab;
+ this.baseSize = it.baseSize;
+ int lo = it.baseIndex;
+ int hi = this.baseLimit = it.baseLimit;
+ this.index = this.baseIndex =
+ (split) ? (it.baseLimit = (lo + hi + 1) >>> 1) : lo;
}
- final WriteThroughEntry nextEntry() {
- Node e = next;
- if (e == null)
- throw new NoSuchElementException();
- WriteThroughEntry entry =
- new WriteThroughEntry(e.key, nextVal);
- advance((lastReturned = e).next);
- return entry;
+ /**
+ * Advances next; returns nextVal or null if terminated
+ * See above for explanation.
+ */
+ final Object advance() {
+ Node e = last = next;
+ Object ev = null;
+ outer: do {
+ if (e != null) // advance past used/skipped node
+ e = e.next;
+ while (e == null) { // get to next non-null bin
+ Node[] t; int b, i, n; Object ek; // checks must use locals
+ if ((b = baseIndex) >= baseLimit || (i = index) < 0 ||
+ (t = tab) == null || i >= (n = t.length))
+ break outer;
+ else if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
+ if ((ek = e.key) instanceof TreeBin)
+ e = ((TreeBin)ek).first;
+ else {
+ tab = (Node[])ek;
+ continue; // restarts due to null val
+ }
+ } // visit upper slots if present
+ index = (i += baseSize) < n ? i : (baseIndex = b + 1);
+ }
+ nextKey = e.key;
+ } while ((ev = e.val) == null); // skip deleted or special nodes
+ next = e;
+ return nextVal = ev;
}
public final void remove() {
- if (lastReturned == null)
+ if (nextVal == null)
+ advance();
+ Node e = last;
+ if (e == null)
throw new IllegalStateException();
- ConcurrentHashMapV8.this.remove(lastReturned.key);
- lastReturned = null;
- }
-
- /** Helper for serialization */
- final void writeEntries(java.io.ObjectOutputStream s)
- throws java.io.IOException {
- Node e;
- while ((e = next) != null) {
- s.writeObject(e.key);
- s.writeObject(nextVal);
- advance(e.next);
- }
+ last = null;
+ map.remove(e.key);
}
- /** Helper for containsValue */
- final boolean containsVal(Object value) {
- if (value != null) {
- Node e;
- while ((e = next) != null) {
- Object v = nextVal;
- if (value == v || value.equals(v))
- return true;
- advance(e.next);
- }
- }
- return false;
+ public final boolean hasNext() {
+ return nextVal != null || advance() != null;
}
- /** Helper for Map.hashCode */
- final int mapHashCode() {
- int h = 0;
- Node e;
- while ((e = next) != null) {
- h += e.key.hashCode() ^ nextVal.hashCode();
- advance(e.next);
- }
- return h;
- }
-
- /** Helper for Map.toString */
- final String mapToString() {
- Node e = next;
- if (e == null)
- return "{}";
- StringBuilder sb = new StringBuilder();
- sb.append('{');
- for (;;) {
- sb.append(e.key == this ? "(this Map)" : e.key);
- sb.append('=');
- sb.append(nextVal == this ? "(this Map)" : nextVal);
- advance(e.next);
- if ((e = next) != null)
- sb.append(',').append(' ');
- else
- return sb.append('}').toString();
- }
- }
+ public final boolean hasMoreElements() { return hasNext(); }
}
/* ---------------- Public operations -------------- */
/**
- * Creates a new, empty map with the specified initial
- * capacity, load factor and concurrency level.
- *
- * @param initialCapacity the initial capacity. The implementation
- * performs internal sizing to accommodate this many elements.
- * @param loadFactor the load factor threshold, used to control resizing.
- * Resizing may be performed when the average number of elements per
- * bin exceeds this threshold.
- * @param concurrencyLevel the estimated number of concurrently
- * updating threads. The implementation may use this value as
- * a sizing hint.
- * @throws IllegalArgumentException if the initial capacity is
- * negative or the load factor or concurrencyLevel are
- * nonpositive.
+ * Creates a new, empty map with the default initial table size (16),
*/
- public ConcurrentHashMapV8(int initialCapacity,
- float loadFactor, int concurrencyLevel) {
- if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
- throw new IllegalArgumentException();
- this.initCap = initialCapacity;
- this.loadFactor = loadFactor;
+ public ConcurrentHashMapV8() {
this.counter = new LongAdder();
}
/**
- * Creates a new, empty map with the specified initial capacity
- * and load factor and with the default concurrencyLevel (16).
+ * Creates a new, empty map with an initial table size
+ * accommodating the specified number of elements without the need
+ * to dynamically resize.
*
* @param initialCapacity The implementation performs internal
* sizing to accommodate this many elements.
- * @param loadFactor the load factor threshold, used to control resizing.
- * Resizing may be performed when the average number of elements per
- * bin exceeds this threshold.
* @throws IllegalArgumentException if the initial capacity of
- * elements is negative or the load factor is nonpositive
- *
- * @since 1.6
+ * elements is negative
*/
- public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
- this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
+ public ConcurrentHashMapV8(int initialCapacity) {
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException();
+ int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
+ MAXIMUM_CAPACITY :
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
+ this.counter = new LongAdder();
+ this.sizeCtl = cap;
}
/**
- * Creates a new, empty map with the specified initial capacity,
- * and with default load factor (0.75) and concurrencyLevel (16).
+ * Creates a new map with the same mappings as the given map.
*
- * @param initialCapacity the initial capacity. The implementation
- * performs internal sizing to accommodate this many elements.
- * @throws IllegalArgumentException if the initial capacity of
- * elements is negative.
+ * @param m the map
*/
- public ConcurrentHashMapV8(int initialCapacity) {
- this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
+ public ConcurrentHashMapV8(Map extends K, ? extends V> m) {
+ this.counter = new LongAdder();
+ this.sizeCtl = DEFAULT_CAPACITY;
+ internalPutAll(m);
}
/**
- * Creates a new, empty map with a default initial capacity (16),
- * load factor (0.75) and concurrencyLevel (16).
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}) and
+ * initial table density ({@code loadFactor}).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative or the load factor is nonpositive
+ *
+ * @since 1.6
*/
- public ConcurrentHashMapV8() {
- this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
+ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
+ this(initialCapacity, loadFactor, 1);
}
/**
- * Creates a new map with the same mappings as the given map.
- * The map is created with a capacity of 1.5 times the number
- * of mappings in the given map or 16 (whichever is greater),
- * and a default load factor (0.75) and concurrencyLevel (16).
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}), table
+ * density ({@code loadFactor}), and number of concurrently
+ * updating threads ({@code concurrencyLevel}).
*
- * @param m the map
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
+ * @param concurrencyLevel the estimated number of concurrently
+ * updating threads. The implementation may use this value as
+ * a sizing hint.
+ * @throws IllegalArgumentException if the initial capacity is
+ * negative or the load factor or concurrencyLevel are
+ * nonpositive
*/
- public ConcurrentHashMapV8(Map extends K, ? extends V> m) {
- this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
- if (m == null)
- throw new NullPointerException();
- internalPutAll(m);
+ public ConcurrentHashMapV8(int initialCapacity,
+ float loadFactor, int concurrencyLevel) {
+ if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
+ throw new IllegalArgumentException();
+ if (initialCapacity < concurrencyLevel) // Use at least as many bins
+ initialCapacity = concurrencyLevel; // as estimated threads
+ long size = (long)(1.0 + (long)initialCapacity / loadFactor);
+ int cap = ((size >= (long)MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY: tableSizeFor((int)size));
+ this.counter = new LongAdder();
+ this.sizeCtl = cap;
}
/**
- * Returns {@code true} if this map contains no key-value mappings.
- *
- * @return {@code true} if this map contains no key-value mappings
+ * {@inheritDoc}
*/
public boolean isEmpty() {
return counter.sum() <= 0L; // ignore transient negative values
}
/**
- * Returns the number of key-value mappings in this map. If the
- * map contains more than {@code Integer.MAX_VALUE} elements, returns
- * {@code Integer.MAX_VALUE}.
- *
- * @return the number of key-value mappings in this map
+ * {@inheritDoc}
*/
public int size() {
long n = counter.sum();
- return n <= 0L? 0 : n >= Integer.MAX_VALUE ? Integer.MAX_VALUE : (int)n;
+ return ((n < 0L) ? 0 :
+ (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
+ (int)n);
+ }
+
+ final long longSize() { // accurate version of size needed for views
+ long n = counter.sum();
+ return (n < 0L) ? 0L : n;
}
/**
@@ -1010,7 +2394,7 @@ public class ConcurrentHashMapV8
* @param key possible key
* @return {@code true} if and only if the specified object
* is a key in this table, as determined by the
- * {@code equals} method; {@code false} otherwise.
+ * {@code equals} method; {@code false} otherwise
* @throws NullPointerException if the specified key is null
*/
public boolean containsKey(Object key) {
@@ -1021,9 +2405,8 @@ public class ConcurrentHashMapV8
/**
* Returns {@code true} if this map maps one or more keys to the
- * specified value. Note: This method requires a full internal
- * traversal of the hash table, and so is much slower than
- * method {@code containsKey}.
+ * specified value. Note: This method may require a full traversal
+ * of the map, and is much slower than method {@code containsKey}.
*
* @param value value whose presence in this map is to be tested
* @return {@code true} if this map maps one or more keys to the
@@ -1033,7 +2416,13 @@ public class ConcurrentHashMapV8
public boolean containsValue(Object value) {
if (value == null)
throw new NullPointerException();
- return new HashIterator().containsVal(value);
+ Object v;
+ InternalIterator it = new InternalIterator(this);
+ while ((v = it.advance()) != null) {
+ if (v == value || value.equals(v))
+ return true;
+ }
+ return false;
}
/**
@@ -1072,7 +2461,7 @@ public class ConcurrentHashMapV8
public V put(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
- return (V)internalPut(key, value, true);
+ return (V)internalPut(key, value);
}
/**
@@ -1086,7 +2475,7 @@ public class ConcurrentHashMapV8
public V putIfAbsent(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
- return (V)internalPut(key, value, false);
+ return (V)internalPutIfAbsent(key, value);
}
/**
@@ -1097,81 +2486,101 @@ public class ConcurrentHashMapV8
* @param m mappings to be stored in this map
*/
public void putAll(Map extends K, ? extends V> m) {
- if (m == null)
- throw new NullPointerException();
internalPutAll(m);
}
/**
* If the specified key is not already associated with a value,
- * computes its value using the given mappingFunction, and if
- * non-null, enters it into the map. This is equivalent to
- *
- *
- * if (map.containsKey(key))
- * return map.get(key);
- * value = mappingFunction.map(key);
- * if (value != null)
- * map.put(key, value);
- * return value;
- *
- *
- * except that the action is performed atomically. Some attempted
- * operations on this map by other threads may be blocked while
- * computation is in progress, so the computation should be short
- * and simple, and must not attempt to update any other mappings
- * of this Map. The most common usage is to construct a new object
- * serving as an initial mapped value, or memoized result.
+ * computes its value using the given mappingFunction and enters
+ * it into the map unless null. This is equivalent to
+ * {@code
+ * if (map.containsKey(key))
+ * return map.get(key);
+ * value = mappingFunction.map(key);
+ * if (value != null)
+ * map.put(key, value);
+ * return value;}
+ *
+ * except that the action is performed atomically. If the
+ * function returns {@code null} no mapping is recorded. If the
+ * function itself throws an (unchecked) exception, the exception
+ * is rethrown to its caller, and no mapping is recorded. Some
+ * attempted update operations on this map by other threads may be
+ * blocked while computation is in progress, so the computation
+ * should be short and simple, and must not attempt to update any
+ * other mappings of this Map. The most appropriate usage is to
+ * construct a new object serving as an initial mapped value, or
+ * memoized result, as in:
+ *
+ * {@code
+ * map.computeIfAbsent(key, new MappingFunction() {
+ * public V map(K k) { return new Value(f(k)); }});}
*
* @param key key with which the specified value is to be associated
* @param mappingFunction the function to compute a value
* @return the current (existing or computed) value associated with
- * the specified key, or {@code null} if the computation
- * returned {@code null}.
+ * the specified key, or null if the computed value is null.
* @throws NullPointerException if the specified key or mappingFunction
- * is null,
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
* @throws RuntimeException or Error if the mappingFunction does so,
- * in which case the mapping is left unestablished.
+ * in which case the mapping is left unestablished
*/
+ @SuppressWarnings("unchecked")
public V computeIfAbsent(K key, MappingFunction super K, ? extends V> mappingFunction) {
if (key == null || mappingFunction == null)
throw new NullPointerException();
- return internalCompute(key, mappingFunction, false);
+ return (V)internalComputeIfAbsent(key, mappingFunction);
}
/**
- * Computes the value associated with he given key using the given
- * mappingFunction, and if non-null, enters it into the map. This
- * is equivalent to
- *
- *
- * value = mappingFunction.map(key);
+ * Computes a new mapping value given a key and
+ * its current mapped value (or {@code null} if there is no current
+ * mapping). This is equivalent to
+ * {@code
+ * value = remappingFunction.remap(key, map.get(key));
* if (value != null)
- * map.put(key, value);
+ * map.put(key, value);
* else
- * return map.get(key);
- *
+ * map.remove(key);
+ * }
*
- * except that the action is performed atomically. Some attempted
- * operations on this map by other threads may be blocked while
- * computation is in progress, so the computation should be short
- * and simple, and must not attempt to update any other mappings
- * of this Map.
+ * except that the action is performed atomically. If the
+ * function returns {@code null}, the mapping is removed. If the
+ * function itself throws an (unchecked) exception, the exception
+ * is rethrown to its caller, and the current mapping is left
+ * unchanged. Some attempted update operations on this map by
+ * other threads may be blocked while computation is in progress,
+ * so the computation should be short and simple, and must not
+ * attempt to update any other mappings of this Map. For example,
+ * to either create or append new messages to a value mapping:
+ *
+ * {@code
+ * Map map = ...;
+ * final String msg = ...;
+ * map.compute(key, new RemappingFunction() {
+ * public String remap(Key k, String v) {
+ * return (v == null) ? msg : v + msg;});}}
*
* @param key key with which the specified value is to be associated
- * @param mappingFunction the function to compute a value
- * @return the current value associated with
- * the specified key, or {@code null} if the computation
- * returned {@code null} and the value was not otherwise present.
- * @throws NullPointerException if the specified key or mappingFunction
- * is null,
- * @throws RuntimeException or Error if the mappingFunction does so,
- * in which case the mapping is unchanged.
+ * @param remappingFunction the function to compute a value
+ * @return the new value associated with
+ * the specified key, or null if none.
+ * @throws NullPointerException if the specified key or remappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
*/
- public V compute(K key, MappingFunction super K, ? extends V> mappingFunction) {
- if (key == null || mappingFunction == null)
+ @SuppressWarnings("unchecked")
+ public V compute(K key, RemappingFunction super K, V> remappingFunction) {
+ if (key == null || remappingFunction == null)
throw new NullPointerException();
- return internalCompute(key, mappingFunction, true);
+ return (V)internalCompute(key, remappingFunction);
}
/**
@@ -1187,7 +2596,7 @@ public class ConcurrentHashMapV8
public V remove(Object key) {
if (key == null)
throw new NullPointerException();
- return (V)internalReplace(key, null, null);
+ return (V)internalReplace(key, null, null);
}
/**
@@ -1211,7 +2620,7 @@ public class ConcurrentHashMapV8
public boolean replace(K key, V oldValue, V newValue) {
if (key == null || oldValue == null || newValue == null)
throw new NullPointerException();
- return internalReplace(key, newValue, oldValue) != null;
+ return internalReplace(key, newValue, oldValue) != null;
}
/**
@@ -1225,7 +2634,7 @@ public class ConcurrentHashMapV8
public V replace(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
- return (V)internalReplace(key, value, null);
+ return (V)internalReplace(key, value, null);
}
/**
@@ -1252,8 +2661,8 @@ public class ConcurrentHashMapV8
* reflect any modifications subsequent to construction.
*/
public Set keySet() {
- Set ks = keySet;
- return (ks != null) ? ks : (keySet = new KeySet());
+ KeySet ks = keySet;
+ return (ks != null) ? ks : (keySet = new KeySet(this));
}
/**
@@ -1273,8 +2682,8 @@ public class ConcurrentHashMapV8
* reflect any modifications subsequent to construction.
*/
public Collection values() {
- Collection vs = values;
- return (vs != null) ? vs : (values = new Values());
+ Values vs = values;
+ return (vs != null) ? vs : (values = new Values(this));
}
/**
@@ -1294,8 +2703,8 @@ public class ConcurrentHashMapV8
* reflect any modifications subsequent to construction.
*/
public Set> entrySet() {
- Set> es = entrySet;
- return (es != null) ? es : (entrySet = new EntrySet());
+ EntrySet es = entrySet;
+ return (es != null) ? es : (entrySet = new EntrySet(this));
}
/**
@@ -1305,7 +2714,7 @@ public class ConcurrentHashMapV8
* @see #keySet()
*/
public Enumeration keys() {
- return new KeyIterator();
+ return new KeyIterator(this);
}
/**
@@ -1315,7 +2724,34 @@ public class ConcurrentHashMapV8
* @see #values()
*/
public Enumeration elements() {
- return new ValueIterator();
+ return new ValueIterator(this);
+ }
+
+ /**
+ * Returns a partionable iterator of the keys in this map.
+ *
+ * @return a partionable iterator of the keys in this map
+ */
+ public Spliterator keySpliterator() {
+ return new KeyIterator(this);
+ }
+
+ /**
+ * Returns a partionable iterator of the values in this map.
+ *
+ * @return a partionable iterator of the values in this map
+ */
+ public Spliterator valueSpliterator() {
+ return new ValueIterator(this);
+ }
+
+ /**
+ * Returns a partionable iterator of the entries in this map.
+ *
+ * @return a partionable iterator of the entries in this map
+ */
+ public Spliterator> entrySpliterator() {
+ return new EntryIterator(this);
}
/**
@@ -1326,7 +2762,13 @@ public class ConcurrentHashMapV8
* @return the hash code value for this map
*/
public int hashCode() {
- return new HashIterator().mapHashCode();
+ int h = 0;
+ InternalIterator it = new InternalIterator(this);
+ Object v;
+ while ((v = it.advance()) != null) {
+ h += it.nextKey.hashCode() ^ v.hashCode();
+ }
+ return h;
}
/**
@@ -1341,7 +2783,22 @@ public class ConcurrentHashMapV8
* @return a string representation of this map
*/
public String toString() {
- return new HashIterator().mapToString();
+ InternalIterator it = new InternalIterator(this);
+ StringBuilder sb = new StringBuilder();
+ sb.append('{');
+ Object v;
+ if ((v = it.advance()) != null) {
+ for (;;) {
+ Object k = it.nextKey;
+ sb.append(k == this ? "(this Map)" : k);
+ sb.append('=');
+ sb.append(v == this ? "(this Map)" : v);
+ if ((v = it.advance()) == null)
+ break;
+ sb.append(',').append(' ');
+ }
+ }
+ return sb.append('}').toString();
}
/**
@@ -1355,152 +2812,382 @@ public class ConcurrentHashMapV8
* @return {@code true} if the specified object is equal to this map
*/
public boolean equals(Object o) {
- if (o == this)
- return true;
- if (!(o instanceof Map))
- return false;
- Map,?> m = (Map,?>) o;
- try {
- for (Map.Entry e : this.entrySet())
- if (! e.getValue().equals(m.get(e.getKey())))
+ if (o != this) {
+ if (!(o instanceof Map))
+ return false;
+ Map,?> m = (Map,?>) o;
+ InternalIterator it = new InternalIterator(this);
+ Object val;
+ while ((val = it.advance()) != null) {
+ Object v = m.get(it.nextKey);
+ if (v == null || (v != val && !v.equals(val)))
return false;
+ }
for (Map.Entry,?> e : m.entrySet()) {
- Object k = e.getKey();
- Object v = e.getValue();
- if (k == null || v == null || !v.equals(get(k)))
+ Object mk, mv, v;
+ if ((mk = e.getKey()) == null ||
+ (mv = e.getValue()) == null ||
+ (v = internalGet(mk)) == null ||
+ (mv != v && !mv.equals(v)))
return false;
}
- return true;
- } catch (ClassCastException unused) {
- return false;
- } catch (NullPointerException unused) {
- return false;
+ }
+ return true;
+ }
+
+ /* ----------------Iterators -------------- */
+
+ static final class KeyIterator extends InternalIterator
+ implements Spliterator, Enumeration {
+ KeyIterator(ConcurrentHashMapV8 map) { super(map); }
+ KeyIterator(InternalIterator it, boolean split) {
+ super(it, split);
+ }
+ public KeyIterator split() {
+ if (last != null || (next != null && nextVal == null))
+ throw new IllegalStateException();
+ return new KeyIterator(this, true);
+ }
+ public KeyIterator clone() {
+ if (last != null || (next != null && nextVal == null))
+ throw new IllegalStateException();
+ return new KeyIterator(this, false);
+ }
+
+ @SuppressWarnings("unchecked")
+ public final K next() {
+ if (nextVal == null && advance() == null)
+ throw new NoSuchElementException();
+ Object k = nextKey;
+ nextVal = null;
+ return (K) k;
+ }
+
+ public final K nextElement() { return next(); }
+ }
+
+ static final class ValueIterator extends InternalIterator
+ implements Spliterator, Enumeration {
+ ValueIterator(ConcurrentHashMapV8 map) { super(map); }
+ ValueIterator(InternalIterator it, boolean split) {
+ super(it, split);
+ }
+ public ValueIterator split() {
+ if (last != null || (next != null && nextVal == null))
+ throw new IllegalStateException();
+ return new ValueIterator(this, true);
+ }
+
+ public ValueIterator clone() {
+ if (last != null || (next != null && nextVal == null))
+ throw new IllegalStateException();
+ return new ValueIterator(this, false);
+ }
+
+ @SuppressWarnings("unchecked")
+ public final V next() {
+ Object v;
+ if ((v = nextVal) == null && (v = advance()) == null)
+ throw new NoSuchElementException();
+ nextVal = null;
+ return (V) v;
+ }
+
+ public final V nextElement() { return next(); }
+ }
+
+ static final class EntryIterator extends InternalIterator
+ implements Spliterator> {
+ EntryIterator(ConcurrentHashMapV8 map) { super(map); }
+ EntryIterator(InternalIterator it, boolean split) {
+ super(it, split);
+ }
+ public EntryIterator split() {
+ if (last != null || (next != null && nextVal == null))
+ throw new IllegalStateException();
+ return new EntryIterator(this, true);
+ }
+ public EntryIterator clone() {
+ if (last != null || (next != null && nextVal == null))
+ throw new IllegalStateException();
+ return new EntryIterator(this, false);
+ }
+
+ @SuppressWarnings("unchecked")
+ public final Map.Entry next() {
+ Object v;
+ if ((v = nextVal) == null && (v = advance()) == null)
+ throw new NoSuchElementException();
+ Object k = nextKey;
+ nextVal = null;
+ return new MapEntry((K)k, (V)v, map);
}
}
/**
- * Custom Entry class used by EntryIterator.next(), that relays
- * setValue changes to the underlying map.
+ * Exported Entry for iterators
*/
- final class WriteThroughEntry extends AbstractMap.SimpleEntry {
- @SuppressWarnings("unchecked")
- WriteThroughEntry(Object k, Object v) {
- super((K)k, (V)v);
+ static final class MapEntry implements Map.Entry {
+ final K key; // non-null
+ V val; // non-null
+ final ConcurrentHashMapV8 map;
+ MapEntry(K key, V val, ConcurrentHashMapV8 map) {
+ this.key = key;
+ this.val = val;
+ this.map = map;
+ }
+ public final K getKey() { return key; }
+ public final V getValue() { return val; }
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
+ public final String toString(){ return key + "=" + val; }
+
+ public final boolean equals(Object o) {
+ Object k, v; Map.Entry,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ (k == key || k.equals(key)) &&
+ (v == val || v.equals(val)));
}
/**
* Sets our entry's value and writes through to the map. The
- * value to return is somewhat arbitrary here. Since a
- * WriteThroughEntry does not necessarily track asynchronous
- * changes, the most recent "previous" value could be
- * different from what we return (or could even have been
- * removed in which case the put will re-establish). We do not
- * and cannot guarantee more.
+ * value to return is somewhat arbitrary here. Since a we do
+ * not necessarily track asynchronous changes, the most recent
+ * "previous" value could be different from what we return (or
+ * could even have been removed in which case the put will
+ * re-establish). We do not and cannot guarantee more.
*/
- public V setValue(V value) {
+ public final V setValue(V value) {
if (value == null) throw new NullPointerException();
- V v = super.setValue(value);
- ConcurrentHashMapV8.this.put(getKey(), value);
+ V v = val;
+ val = value;
+ map.put(key, value);
return v;
}
}
- final class KeyIterator extends HashIterator
- implements Iterator, Enumeration {
- @SuppressWarnings("unchecked")
- public final K next() { return (K)super.nextKey(); }
- @SuppressWarnings("unchecked")
- public final K nextElement() { return (K)super.nextKey(); }
- }
-
- final class ValueIterator extends HashIterator
- implements Iterator, Enumeration {
- @SuppressWarnings("unchecked")
- public final V next() { return (V)super.nextValue(); }
- @SuppressWarnings("unchecked")
- public final V nextElement() { return (V)super.nextValue(); }
- }
+ /* ----------------Views -------------- */
- final class EntryIterator extends HashIterator
- implements Iterator> {
- public final Map.Entry next() { return super.nextEntry(); }
- }
+ /**
+ * Base class for views.
+ */
+ static abstract class MapView {
+ final ConcurrentHashMapV8 map;
+ MapView(ConcurrentHashMapV8 map) { this.map = map; }
+ public final int size() { return map.size(); }
+ public final boolean isEmpty() { return map.isEmpty(); }
+ public final void clear() { map.clear(); }
+
+ // implementations below rely on concrete classes supplying these
+ abstract public Iterator> iterator();
+ abstract public boolean contains(Object o);
+ abstract public boolean remove(Object o);
+
+ private static final String oomeMsg = "Required array size too large";
+
+ public final Object[] toArray() {
+ long sz = map.longSize();
+ if (sz > (long)(MAX_ARRAY_SIZE))
+ throw new OutOfMemoryError(oomeMsg);
+ int n = (int)sz;
+ Object[] r = new Object[n];
+ int i = 0;
+ Iterator> it = iterator();
+ while (it.hasNext()) {
+ if (i == n) {
+ if (n >= MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
+ n = MAX_ARRAY_SIZE;
+ else
+ n += (n >>> 1) + 1;
+ r = Arrays.copyOf(r, n);
+ }
+ r[i++] = it.next();
+ }
+ return (i == n) ? r : Arrays.copyOf(r, i);
+ }
- final class KeySet extends AbstractSet {
- public int size() {
- return ConcurrentHashMapV8.this.size();
+ @SuppressWarnings("unchecked")
+ public final T[] toArray(T[] a) {
+ long sz = map.longSize();
+ if (sz > (long)(MAX_ARRAY_SIZE))
+ throw new OutOfMemoryError(oomeMsg);
+ int m = (int)sz;
+ T[] r = (a.length >= m) ? a :
+ (T[])java.lang.reflect.Array
+ .newInstance(a.getClass().getComponentType(), m);
+ int n = r.length;
+ int i = 0;
+ Iterator> it = iterator();
+ while (it.hasNext()) {
+ if (i == n) {
+ if (n >= MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
+ n = MAX_ARRAY_SIZE;
+ else
+ n += (n >>> 1) + 1;
+ r = Arrays.copyOf(r, n);
+ }
+ r[i++] = (T)it.next();
+ }
+ if (a == r && i < n) {
+ r[i] = null; // null-terminate
+ return r;
+ }
+ return (i == n) ? r : Arrays.copyOf(r, i);
}
- public boolean isEmpty() {
- return ConcurrentHashMapV8.this.isEmpty();
+
+ public final int hashCode() {
+ int h = 0;
+ for (Iterator> it = iterator(); it.hasNext();)
+ h += it.next().hashCode();
+ return h;
}
- public void clear() {
- ConcurrentHashMapV8.this.clear();
+
+ public final String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append('[');
+ Iterator> it = iterator();
+ if (it.hasNext()) {
+ for (;;) {
+ Object e = it.next();
+ sb.append(e == this ? "(this Collection)" : e);
+ if (!it.hasNext())
+ break;
+ sb.append(',').append(' ');
+ }
+ }
+ return sb.append(']').toString();
}
- public Iterator iterator() {
- return new KeyIterator();
+
+ public final boolean containsAll(Collection> c) {
+ if (c != this) {
+ for (Iterator> it = c.iterator(); it.hasNext();) {
+ Object e = it.next();
+ if (e == null || !contains(e))
+ return false;
+ }
+ }
+ return true;
}
- public boolean contains(Object o) {
- return ConcurrentHashMapV8.this.containsKey(o);
+
+ public final boolean removeAll(Collection> c) {
+ boolean modified = false;
+ for (Iterator> it = iterator(); it.hasNext();) {
+ if (c.contains(it.next())) {
+ it.remove();
+ modified = true;
+ }
+ }
+ return modified;
}
- public boolean remove(Object o) {
- return ConcurrentHashMapV8.this.remove(o) != null;
+
+ public final boolean retainAll(Collection> c) {
+ boolean modified = false;
+ for (Iterator> it = iterator(); it.hasNext();) {
+ if (!c.contains(it.next())) {
+ it.remove();
+ modified = true;
+ }
+ }
+ return modified;
}
+
}
- final class Values extends AbstractCollection {
- public int size() {
- return ConcurrentHashMapV8.this.size();
- }
- public boolean isEmpty() {
- return ConcurrentHashMapV8.this.isEmpty();
+ static final class KeySet extends MapView implements Set {
+ KeySet(ConcurrentHashMapV8 map) { super(map); }
+ public final boolean contains(Object o) { return map.containsKey(o); }
+ public final boolean remove(Object o) { return map.remove(o) != null; }
+ public final Iterator iterator() {
+ return new KeyIterator(map);
}
- public void clear() {
- ConcurrentHashMapV8.this.clear();
+ public final boolean add(K e) {
+ throw new UnsupportedOperationException();
}
- public Iterator iterator() {
- return new ValueIterator();
+ public final boolean addAll(Collection extends K> c) {
+ throw new UnsupportedOperationException();
}
- public boolean contains(Object o) {
- return ConcurrentHashMapV8.this.containsValue(o);
+ public boolean equals(Object o) {
+ Set> c;
+ return ((o instanceof Set) &&
+ ((c = (Set>)o) == this ||
+ (containsAll(c) && c.containsAll(this))));
}
}
- final class EntrySet extends AbstractSet> {
- public int size() {
- return ConcurrentHashMapV8.this.size();
+ static final class Values extends MapView
+ implements Collection {
+ Values(ConcurrentHashMapV8 map) { super(map); }
+ public final boolean contains(Object o) { return map.containsValue(o); }
+ public final boolean remove(Object o) {
+ if (o != null) {
+ Iterator it = new ValueIterator(map);
+ while (it.hasNext()) {
+ if (o.equals(it.next())) {
+ it.remove();
+ return true;
+ }
+ }
+ }
+ return false;
}
- public boolean isEmpty() {
- return ConcurrentHashMapV8.this.isEmpty();
+ public final Iterator iterator() {
+ return new ValueIterator(map);
}
- public void clear() {
- ConcurrentHashMapV8.this.clear();
+ public final boolean add(V e) {
+ throw new UnsupportedOperationException();
}
- public Iterator> iterator() {
- return new EntryIterator();
+ public final boolean addAll(Collection extends V> c) {
+ throw new UnsupportedOperationException();
}
- public boolean contains(Object o) {
- if (!(o instanceof Map.Entry))
- return false;
- Map.Entry,?> e = (Map.Entry,?>)o;
- V v = ConcurrentHashMapV8.this.get(e.getKey());
- return v != null && v.equals(e.getValue());
- }
- public boolean remove(Object o) {
- if (!(o instanceof Map.Entry))
- return false;
- Map.Entry,?> e = (Map.Entry,?>)o;
- return ConcurrentHashMapV8.this.remove(e.getKey(), e.getValue());
+ }
+
+ static final class EntrySet extends MapView
+ implements Set> {
+ EntrySet(ConcurrentHashMapV8 map) { super(map); }
+ public final boolean contains(Object o) {
+ Object k, v, r; Map.Entry,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry,?>)o).getKey()) != null &&
+ (r = map.get(k)) != null &&
+ (v = e.getValue()) != null &&
+ (v == r || v.equals(r)));
+ }
+ public final boolean remove(Object o) {
+ Object k, v; Map.Entry,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ map.remove(k, v));
+ }
+ public final Iterator> iterator() {
+ return new EntryIterator(map);
+ }
+ public final boolean add(Entry e) {
+ throw new UnsupportedOperationException();
+ }
+ public final boolean addAll(Collection extends Entry> c) {
+ throw new UnsupportedOperationException();
+ }
+ public boolean equals(Object o) {
+ Set> c;
+ return ((o instanceof Set) &&
+ ((c = (Set>)o) == this ||
+ (containsAll(c) && c.containsAll(this))));
}
}
/* ---------------- Serialization Support -------------- */
/**
- * Helper class used in previous version, declared for the sake of
- * serialization compatibility
+ * Stripped-down version of helper class used in previous version,
+ * declared for the sake of serialization compatibility
*/
- static class Segment extends java.util.concurrent.locks.ReentrantLock
- implements Serializable {
+ static class Segment implements Serializable {
private static final long serialVersionUID = 2249069246763182397L;
final float loadFactor;
Segment(float lf) { this.loadFactor = lf; }
@@ -1522,48 +3209,107 @@ public class ConcurrentHashMapV8
segments = (Segment[])
new Segment,?>[DEFAULT_CONCURRENCY_LEVEL];
for (int i = 0; i < segments.length; ++i)
- segments[i] = new Segment(loadFactor);
+ segments[i] = new Segment(LOAD_FACTOR);
}
s.defaultWriteObject();
- new HashIterator().writeEntries(s);
+ InternalIterator it = new InternalIterator(this);
+ Object v;
+ while ((v = it.advance()) != null) {
+ s.writeObject(it.nextKey);
+ s.writeObject(v);
+ }
s.writeObject(null);
s.writeObject(null);
segments = null; // throw away
}
/**
- * Reconstitutes the instance from a
- * stream (i.e., deserializes it).
+ * Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
*/
@SuppressWarnings("unchecked")
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
- // find load factor in a segment, if one exists
- if (segments != null && segments.length != 0)
- this.loadFactor = segments[0].loadFactor;
- else
- this.loadFactor = DEFAULT_LOAD_FACTOR;
- this.initCap = DEFAULT_CAPACITY;
- LongAdder ct = new LongAdder(); // force final field write
- UNSAFE.putObjectVolatile(this, counterOffset, ct);
this.segments = null; // unneeded
+ // initialize transient final field
+ UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
- // Read the keys and values, and put the mappings in the table
+ // Create all nodes, then place in table once size is known
+ long size = 0L;
+ Node p = null;
for (;;) {
- K key = (K) s.readObject();
- V value = (V) s.readObject();
- if (key == null)
+ K k = (K) s.readObject();
+ V v = (V) s.readObject();
+ if (k != null && v != null) {
+ int h = spread(k.hashCode());
+ p = new Node(h, k, v, p);
+ ++size;
+ }
+ else
break;
- put(key, value);
+ }
+ if (p != null) {
+ boolean init = false;
+ int n;
+ if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
+ n = MAXIMUM_CAPACITY;
+ else {
+ int sz = (int)size;
+ n = tableSizeFor(sz + (sz >>> 1) + 1);
+ }
+ int sc = sizeCtl;
+ boolean collide = false;
+ if (n > sc &&
+ UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if (table == null) {
+ init = true;
+ Node[] tab = new Node[n];
+ int mask = n - 1;
+ while (p != null) {
+ int j = p.hash & mask;
+ Node next = p.next;
+ Node q = p.next = tabAt(tab, j);
+ setTabAt(tab, j, p);
+ if (!collide && q != null && q.hash == p.hash)
+ collide = true;
+ p = next;
+ }
+ table = tab;
+ counter.add(size);
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ if (collide) { // rescan and convert to TreeBins
+ Node[] tab = table;
+ for (int i = 0; i < tab.length; ++i) {
+ int c = 0;
+ for (Node e = tabAt(tab, i); e != null; e = e.next) {
+ if (++c > TREE_THRESHOLD &&
+ (e.key instanceof Comparable)) {
+ replaceWithTreeBin(tab, i, e.key);
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (!init) { // Can only happen if unsafely published.
+ while (p != null) {
+ internalPut(p.key, p.val);
+ p = p.next;
+ }
+ }
}
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long counterOffset;
- private static final long resizingOffset;
+ private static final long sizeCtlOffset;
private static final long ABASE;
private static final int ASHIFT;
@@ -1574,8 +3320,8 @@ public class ConcurrentHashMapV8
Class> k = ConcurrentHashMapV8.class;
counterOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("counter"));
- resizingOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("resizing"));
+ sizeCtlOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("sizeCtl"));
Class> sc = Node[].class;
ABASE = UNSAFE.arrayBaseOffset(sc);
ss = UNSAFE.arrayIndexScale(sc);