--- jsr166/src/jsr166e/ConcurrentHashMapV8.java 2011/09/10 19:33:14 1.22
+++ jsr166/src/jsr166e/ConcurrentHashMapV8.java 2012/01/02 23:16:22 1.35
@@ -6,6 +6,7 @@
package jsr166e;
import jsr166e.LongAdder;
+import java.util.Arrays;
import java.util.Map;
import java.util.Set;
import java.util.Collection;
@@ -19,6 +20,7 @@ import java.util.Enumeration;
import java.util.ConcurrentModificationException;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.LockSupport;
import java.io.Serializable;
/**
@@ -54,12 +56,13 @@ import java.io.Serializable;
*
The table is dynamically expanded when there are too many
* collisions (i.e., keys that have distinct hash codes but fall into
* the same slot modulo the table size), with the expected average
- * effect of maintaining roughly two bins per mapping. There may be
- * much variance around this average as mappings are added and
- * removed, but overall, this maintains a commonly accepted time/space
- * tradeoff for hash tables. However, resizing this or any other kind
- * of hash table may be a relatively slow operation. When possible, it
- * is a good idea to provide a size estimate as an optional {@code
+ * effect of maintaining roughly two bins per mapping (corresponding
+ * to a 0.75 load factor threshold for resizing). There may be much
+ * variance around this average as mappings are added and removed, but
+ * overall, this maintains a commonly accepted time/space tradeoff for
+ * hash tables. However, resizing this or any other kind of hash
+ * table may be a relatively slow operation. When possible, it is a
+ * good idea to provide a size estimate as an optional {@code
* initialCapacity} constructor argument. An additional optional
* {@code loadFactor} constructor argument provides a further means of
* customizing initial table capacity by specifying the table density
@@ -68,7 +71,7 @@ import java.io.Serializable;
* versions of this class, constructors may optionally specify an
* expected {@code concurrencyLevel} as an additional hint for
* internal sizing. Note that using many keys with exactly the same
- * {@code hashCode{}} is a sure way to slow down performance of any
+ * {@code hashCode()} is a sure way to slow down performance of any
* hash table.
*
*
This class and its views and iterators implement all of the
@@ -95,33 +98,45 @@ public class ConcurrentHashMapV8
private static final long serialVersionUID = 7249069246763182397L;
/**
- * A function computing a mapping from the given key to a value,
- * or {@code null} if there is no mapping. This is a place-holder
- * for an upcoming JDK8 interface.
+ * A function computing a mapping from the given key to a value.
+ * This is a place-holder for an upcoming JDK8 interface.
*/
public static interface MappingFunction {
/**
- * Returns a value for the given key, or null if there is no
- * mapping. If this function throws an (unchecked) exception,
- * the exception is rethrown to its caller, and no mapping is
- * recorded. Because this function is invoked within
- * atomicity control, the computation should be short and
- * simple. The most common usage is to construct a new object
- * serving as an initial mapped value.
+ * Returns a non-null value for the given key.
*
* @param key the (non-null) key
- * @return a value, or null if none
+ * @return a non-null value
*/
V map(K key);
}
+ /**
+ * A function computing a new mapping given a key and its current
+ * mapped value (or {@code null} if there is no current
+ * mapping). This is a place-holder for an upcoming JDK8
+ * interface.
+ */
+ public static interface RemappingFunction {
+ /**
+ * Returns a new value given a key and its current value.
+ *
+ * @param key the (non-null) key
+ * @param value the current value, or null if there is no mapping
+ * @return a non-null value
+ */
+ V remap(K key, V value);
+ }
+
/*
* Overview:
*
* The primary design goal of this hash table is to maintain
* concurrent readability (typically method get(), but also
* iterators and related methods) while minimizing update
- * contention.
+ * contention. Secondary goals are to keep space consumption about
+ * the same or better than java.util.HashMap, and to support high
+ * initial insertion rates on an empty table by many threads.
*
* Each key-value mapping is held in a Node. Because Node fields
* can contain special values, they are defined using plain Object
@@ -129,39 +144,61 @@ public class ConcurrentHashMapV8
* work off Object types. And similarly, so do the internal
* methods of auxiliary iterator and view classes. All public
* generic typed methods relay in/out of these internal methods,
- * supplying null-checks and casts as needed.
+ * supplying null-checks and casts as needed. This also allows
+ * many of the public methods to be factored into a smaller number
+ * of internal methods (although sadly not so for the five
+ * sprawling variants of put-related operations).
*
* The table is lazily initialized to a power-of-two size upon the
* first insertion. Each bin in the table contains a list of
- * Nodes (most often, zero or one Node). Table accesses require
- * volatile/atomic reads, writes, and CASes. Because there is no
- * other way to arrange this without adding further indirections,
- * we use intrinsics (sun.misc.Unsafe) operations. The lists of
- * nodes within bins are always accurately traversable under
- * volatile reads, so long as lookups check hash code and
- * non-nullness of value before checking key equality. (All valid
- * hash codes are nonnegative. Negative values are reserved for
- * special forwarding nodes; see below.)
+ * Nodes (most often, the list has only zero or one Node). Table
+ * accesses require volatile/atomic reads, writes, and CASes.
+ * Because there is no other way to arrange this without adding
+ * further indirections, we use intrinsics (sun.misc.Unsafe)
+ * operations. The lists of nodes within bins are always
+ * accurately traversable under volatile reads, so long as lookups
+ * check hash code and non-nullness of value before checking key
+ * equality.
+ *
+ * We use the top two bits of Node hash fields for control
+ * purposes -- they are available anyway because of addressing
+ * constraints. As explained further below, these top bits are
+ * used as follows:
+ * 00 - Normal
+ * 01 - Locked
+ * 11 - Locked and may have a thread waiting for lock
+ * 10 - Node is a forwarding node
+ *
+ * The lower 30 bits of each Node's hash field contain a
+ * transformation (for better randomization -- method "spread") of
+ * the key's hash code, except for forwarding nodes, for which the
+ * lower bits are zero (and so always have hash field == MOVED).
*
- * Insertion (via put or putIfAbsent) of the first node in an
+ * Insertion (via put or its variants) of the first node in an
* empty bin is performed by just CASing it to the bin. This is
- * on average by far the most common case for put operations.
- * Other update operations (insert, delete, and replace) require
- * locks. We do not want to waste the space required to associate
- * a distinct lock object with each bin, so instead use the first
- * node of a bin list itself as a lock, using plain "synchronized"
- * locks. These save space and we can live with block-structured
- * lock/unlock operations. Using the first node of a list as a
- * lock does not by itself suffice though: When a node is locked,
- * any update must first validate that it is still the first node,
- * and retry if not. Because new nodes are always appended to
- * lists, once a node is first in a bin, it remains first until
- * deleted or the bin becomes invalidated. However, operations
- * that only conditionally update can and sometimes do inspect
- * nodes until the point of update. This is a converse of sorts to
- * the lazy locking technique described by Herlihy & Shavit.
+ * by far the most common case for put operations. Other update
+ * operations (insert, delete, and replace) require locks. We do
+ * not want to waste the space required to associate a distinct
+ * lock object with each bin, so instead use the first node of a
+ * bin list itself as a lock. Blocking support for these locks
+ * relies on the builtin "synchronized" monitors. However, we
+ * also need a tryLock construction, so we overlay these by using
+ * bits of the Node hash field for lock control (see above), and
+ * so normally use builtin monitors only for blocking and
+ * signalling using wait/notifyAll constructions. See
+ * Node.tryAwaitLock.
+ *
+ * Using the first node of a list as a lock does not by itself
+ * suffice though: When a node is locked, any update must first
+ * validate that it is still the first node after locking it, and
+ * retry if not. Because new nodes are always appended to lists,
+ * once a node is first in a bin, it remains first until deleted
+ * or the bin becomes invalidated (upon resizing). However,
+ * operations that only conditionally update may inspect nodes
+ * until the point of update. This is a converse of sorts to the
+ * lazy locking technique described by Herlihy & Shavit.
*
- * The main disadvantage of this approach is that most update
+ * The main disadvantage of per-bin locks is that other update
* operations on other nodes in a bin list protected by the same
* lock can stall, for example when user equals() or mapping
* functions take a long time. However, statistically, this is
@@ -187,27 +224,46 @@ public class ConcurrentHashMapV8
* that these assumptions hold unless users define exactly the
* same value for too many hashCodes.
*
- * The table is resized when occupancy exceeds a threshold. Only
- * a single thread performs the resize (using field "resizing", to
- * arrange exclusion), but the table otherwise remains usable for
- * reads and updates. Resizing proceeds by transferring bins, one
- * by one, from the table to the next table. Upon transfer, the
- * old table bin contains only a special forwarding node (with
- * negative hash field) that contains the next table as its
- * key. On encountering a forwarding node, access and update
- * operations restart, using the new table. To ensure concurrent
- * readability of traversals, transfers must proceed from the last
- * bin (table.length - 1) up towards the first. Upon seeing a
- * forwarding node, traversals (see class InternalIterator)
- * arrange to move to the new table for the rest of the traversal
- * without revisiting nodes. This constrains bin transfers to a
- * particular order, and so can block indefinitely waiting for the
- * next lock, and other threads cannot help with the transfer.
- * However, expected stalls are infrequent enough to not warrant
- * the additional overhead of access and iteration schemes that
- * could admit out-of-order or concurrent bin transfers.
+ * The table is resized when occupancy exceeds an occupancy
+ * threshold (nominally, 0.75, but see below). Only a single
+ * thread performs the resize (using field "sizeCtl", to arrange
+ * exclusion), but the table otherwise remains usable for reads
+ * and updates. Resizing proceeds by transferring bins, one by
+ * one, from the table to the next table. Because we are using
+ * power-of-two expansion, the elements from each bin must either
+ * stay at same index, or move with a power of two offset. We
+ * eliminate unnecessary node creation by catching cases where old
+ * nodes can be reused because their next fields won't change. On
+ * average, only about one-sixth of them need cloning when a table
+ * doubles. The nodes they replace will be garbage collectable as
+ * soon as they are no longer referenced by any reader thread that
+ * may be in the midst of concurrently traversing table. Upon
+ * transfer, the old table bin contains only a special forwarding
+ * node (with hash field "MOVED") that contains the next table as
+ * its key. On encountering a forwarding node, access and update
+ * operations restart, using the new table.
+ *
+ * Each bin transfer requires its bin lock. However, unlike other
+ * cases, a transfer can skip a bin if it fails to acquire its
+ * lock, and revisit it later. Method rebuild maintains a buffer
+ * of TRANSFER_BUFFER_SIZE bins that have been skipped because of
+ * failure to acquire a lock, and blocks only if none are
+ * available (i.e., only very rarely). The transfer operation
+ * must also ensure that all accessible bins in both the old and
+ * new table are usable by any traversal. When there are no lock
+ * acquisition failures, this is arranged simply by proceeding
+ * from the last bin (table.length - 1) up towards the first.
+ * Upon seeing a forwarding node, traversals (see class
+ * InternalIterator) arrange to move to the new table without
+ * revisiting nodes. However, when any node is skipped during a
+ * transfer, all earlier table bins may have become visible, so
+ * are initialized with a reverse-forwarding node back to the old
+ * table until the new ones are established. (This sometimes
+ * requires transiently locking a forwarding node, which is
+ * possible under the above encoding.) These more expensive
+ * mechanics trigger only when necessary.
*
- * This traversal scheme also applies to partial traversals of
+ * The traversal scheme also applies to partial traversals of
* ranges of bins (via an alternate InternalIterator constructor)
* to support partitioned aggregate operations (that are not
* otherwise implemented yet). Also, read-only operations give up
@@ -218,31 +274,32 @@ public class ConcurrentHashMapV8
* Lazy table initialization minimizes footprint until first use,
* and also avoids resizings when the first operation is from a
* putAll, constructor with map argument, or deserialization.
- * These cases attempt to override the targetCapacity used in
- * growTable. These harmlessly fail to take effect in cases of
- * races with other ongoing resizings. Uses of the threshold and
- * targetCapacity during attempted initializations or resizings
- * are racy but fall back on checks to preserve correctness.
+ * These cases attempt to override the initial capacity settings,
+ * but harmlessly fail to take effect in cases of races.
*
* The element count is maintained using a LongAdder, which avoids
* contention on updates but can encounter cache thrashing if read
* too frequently during concurrent access. To avoid reading so
- * often, resizing is normally attempted only upon adding to a bin
- * already holding two or more nodes. Under uniform hash
- * distributions, the probability of this occurring at threshold
- * is around 13%, meaning that only about 1 in 8 puts check
- * threshold (and after resizing, many fewer do so). But this
- * approximation has high variance for small table sizes, so we
- * check on any collision for sizes <= 64. Further, to increase
- * the probability that a resize occurs soon enough, we offset the
- * threshold (see THRESHOLD_OFFSET) by the expected number of puts
- * between checks.
+ * often, resizing is attempted either when a bin lock is
+ * contended, or upon adding to a bin already holding two or more
+ * nodes (checked before adding in the xIfAbsent methods, after
+ * adding in others). Under uniform hash distributions, the
+ * probability of this occurring at threshold is around 13%,
+ * meaning that only about 1 in 8 puts check threshold (and after
+ * resizing, many fewer do so). But this approximation has high
+ * variance for small table sizes, so we check on any collision
+ * for sizes <= 64. The bulk putAll operation further reduces
+ * contention by only committing count updates upon these size
+ * checks.
*
* Maintaining API and serialization compatibility with previous
* versions of this class introduces several oddities. Mainly: We
* leave untouched but unused constructor arguments refering to
- * concurrencyLevel. We also declare an unused "Segment" class
- * that is instantiated in minimal form only when serializing.
+ * concurrencyLevel. We accept a loadFactor constructor argument,
+ * but apply it only to initial table capacity (which is the only
+ * time that we can guarantee to honor it.) We also declare an
+ * unused "Segment" class that is instantiated in minimal form
+ * only when serializing.
*/
/* ---------------- Constants -------------- */
@@ -250,7 +307,9 @@ public class ConcurrentHashMapV8
/**
* The largest possible table capacity. This value must be
* exactly 1<<30 to stay within Java array allocation and indexing
- * bounds for power of two table sizes.
+ * bounds for power of two table sizes, and is further required
+ * because the top two bits of 32bit hash fields are used for
+ * control purposes.
*/
private static final int MAXIMUM_CAPACITY = 1 << 30;
@@ -261,42 +320,86 @@ public class ConcurrentHashMapV8
private static final int DEFAULT_CAPACITY = 16;
/**
+ * The largest possible (non-power of two) array size.
+ * Needed by toArray and related methods.
+ */
+ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
+
+ /**
+ * The default concurrency level for this table. Unused but
+ * defined for compatibility with previous versions of this class.
+ */
+ private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+
+ /**
* The load factor for this table. Overrides of this value in
* constructors affect only the initial table capacity. The
- * actual floating point value isn't normally used, because it is
- * simpler to rely on the expression {@code n - (n >>> 2)} for the
- * associated resizing threshold.
+ * actual floating point value isn't normally used -- it is
+ * simpler to use expressions such as {@code n - (n >>> 2)} for
+ * the associated resizing threshold.
*/
private static final float LOAD_FACTOR = 0.75f;
/**
- * The count value to offset thresholds to compensate for checking
- * for the need to resize only when inserting into bins with two
- * or more elements. See above for explanation.
+ * The buffer size for skipped bins during transfers. The
+ * value is arbitrary but should be large enough to avoid
+ * most locking stalls during resizes.
+ */
+ private static final int TRANSFER_BUFFER_SIZE = 32;
+
+ /*
+ * Encodings for special uses of Node hash fields. See above for
+ * explanation.
+ */
+ static final int MOVED = 0x80000000; // hash field for forwarding nodes
+ static final int LOCKED = 0x40000000; // set/tested only as a bit
+ static final int WAITING = 0xc0000000; // both bits set/tested together
+ static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
+
+ /* ---------------- Fields -------------- */
+
+ /**
+ * The array of bins. Lazily initialized upon first insertion.
+ * Size is always a power of two. Accessed directly by iterators.
+ */
+ transient volatile Node[] table;
+
+ /**
+ * The counter maintaining number of elements.
*/
- private static final int THRESHOLD_OFFSET = 8;
+ private transient final LongAdder counter;
/**
- * The default concurrency level for this table. Unused except as
- * a sizing hint, but defined for compatibility with previous
- * versions of this class.
+ * Table initialization and resizing control. When negative, the
+ * table is being initialized or resized. Otherwise, when table is
+ * null, holds the initial table size to use upon creation, or 0
+ * for default. After initialization, holds the next element count
+ * value upon which to resize the table.
*/
- private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+ private transient volatile int sizeCtl;
+
+ // views
+ private transient KeySet keySet;
+ private transient Values values;
+ private transient EntrySet entrySet;
+
+ /** For serialization compatibility. Null unless serialized; see below */
+ private Segment[] segments;
/* ---------------- Nodes -------------- */
/**
* Key-value entry. Note that this is never exported out as a
- * user-visible Map.Entry. Nodes with a negative hash field are
- * special, and do not contain user keys or values. Otherwise,
- * keys are never null, and null val fields indicate that a node
- * is in the process of being deleted or created. For purposes of
- * read-only, access, a key may be read before a val, but can only
- * be used after checking val. (For an update operation, when a
- * lock is held on a node, order doesn't matter.)
+ * user-visible Map.Entry (see WriteThroughEntry and SnapshotEntry
+ * below). Nodes with a hash field of MOVED are special, and do
+ * not contain user keys or values. Otherwise, keys are never
+ * null, and null val fields indicate that a node is in the
+ * process of being deleted or created. For purposes of read-only
+ * access, a key may be read before a val, but can only be used
+ * after checking val to be non-null.
*/
static final class Node {
- final int hash;
+ volatile int hash;
final Object key;
volatile Object val;
volatile Node next;
@@ -307,37 +410,71 @@ public class ConcurrentHashMapV8
this.val = val;
this.next = next;
}
- }
- /**
- * Sign bit of node hash value indicating to use table in node.key.
- */
- private static final int SIGN_BIT = 0x80000000;
-
- /* ---------------- Fields -------------- */
+ /** CompareAndSet the hash field */
+ final boolean casHash(int cmp, int val) {
+ return UNSAFE.compareAndSwapInt(this, hashOffset, cmp, val);
+ }
- /**
- * The array of bins. Lazily initialized upon first insertion.
- * Size is always a power of two. Accessed directly by iterators.
- */
- transient volatile Node[] table;
+ /** The number of spins before blocking for a lock */
+ static final int MAX_SPINS =
+ Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
- /** The counter maintaining number of elements. */
- private transient final LongAdder counter;
- /** Nonzero when table is being initialized or resized. Updated via CAS. */
- private transient volatile int resizing;
- /** The next element count value upon which to resize the table. */
- private transient int threshold;
- /** The target capacity; volatile to cover initialization races. */
- private transient volatile int targetCapacity;
+ /**
+ * Spins a while if LOCKED bit set and this node is the first
+ * of its bin, and then sets WAITING bits on hash field and
+ * blocks (once) if they are still set. It is OK for this
+ * method to return even if lock is not available upon exit,
+ * which enables these simple single-wait mechanics.
+ *
+ * The corresponding signalling operation is performed within
+ * callers: Upon detecting that WAITING has been set when
+ * unlocking lock (via a failed CAS from non-waiting LOCKED
+ * state), unlockers acquire the sync lock and perform a
+ * notifyAll.
+ */
+ final void tryAwaitLock(Node[] tab, int i) {
+ if (tab != null && i >= 0 && i < tab.length) { // bounds check
+ int spins = MAX_SPINS, h;
+ while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
+ if (spins >= 0) {
+ if (--spins == MAX_SPINS >>> 1)
+ Thread.yield(); // heuristically yield mid-way
+ }
+ else if (casHash(h, h | WAITING)) {
+ synchronized (this) {
+ if (tabAt(tab, i) == this &&
+ (hash & WAITING) == WAITING) {
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ else
+ notifyAll(); // possibly won race vs signaller
+ }
+ break;
+ }
+ }
+ }
+ }
- // views
- private transient KeySet keySet;
- private transient Values values;
- private transient EntrySet entrySet;
+ // Unsafe mechanics for casHash
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long hashOffset;
- /** For serialization compatibility. Null unless serialized; see below */
- private Segment[] segments;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class> k = Node.class;
+ hashOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("hash"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+ }
/* ---------------- Table element access -------------- */
@@ -365,397 +502,787 @@ public class ConcurrentHashMapV8
UNSAFE.putObjectVolatile(tab, ((long)i<>> 1;
- n |= n >>> 2;
- n |= n >>> 4;
- n |= n >>> 8;
- n |= n >>> 16;
- return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
- }
-
- /**
- * If not already resizing, initializes or creates next table and
- * transfers bins. Initial table size uses the capacity recorded
- * in targetCapacity. Rechecks occupancy after a transfer to see
- * if another resize is already needed because resizings are
- * lagging additions.
- *
- * @return current table
- */
- private final Node[] growTable() {
- if (resizing == 0 &&
- UNSAFE.compareAndSwapInt(this, resizingOffset, 0, 1)) {
- try {
- for (;;) {
- Node[] tab = table;
- int n, c, m;
- if (tab == null)
- n = (c = targetCapacity) > 0 ? c : DEFAULT_CAPACITY;
- else if ((m = tab.length) < MAXIMUM_CAPACITY &&
- counter.sum() >= (long)threshold)
- n = m << 1;
- else
- break;
- threshold = n - (n >>> 2) - THRESHOLD_OFFSET;
- Node[] nextTab = new Node[n];
- if (tab != null)
- transfer(tab, nextTab,
- new Node(SIGN_BIT, nextTab, null, null));
- table = nextTab;
- if (tab == null)
- break;
- }
- } finally {
- resizing = 0;
- }
- }
- else if (table == null)
- Thread.yield(); // lost initialization race; just spin
- return table;
- }
-
- /**
- * Reclassifies nodes in each bin to new table. Because we are
- * using power-of-two expansion, the elements from each bin must
- * either stay at same index, or move with a power of two
- * offset. We eliminate unnecessary node creation by catching
- * cases where old nodes can be reused because their next fields
- * won't change. Statistically, only about one-sixth of them need
- * cloning when a table doubles. The nodes they replace will be
- * garbage collectable as soon as they are no longer referenced by
- * any reader thread that may be in the midst of concurrently
- * traversing table.
- *
- * Transfers are done from the bottom up to preserve iterator
- * traversability. On each step, the old bin is locked,
- * moved/copied, and then replaced with a forwarding node.
- */
- private static final void transfer(Node[] tab, Node[] nextTab, Node fwd) {
- int n = tab.length;
- Node ignore = nextTab[n + n - 1]; // force bounds check
- for (int i = n - 1; i >= 0; --i) {
- for (Node e;;) {
- if ((e = tabAt(tab, i)) != null) {
- boolean validated = false;
- synchronized (e) {
- if (tabAt(tab, i) == e) {
- validated = true;
- Node lo = null, hi = null, lastRun = e;
- int runBit = e.hash & n;
- for (Node p = e.next; p != null; p = p.next) {
- int b = p.hash & n;
- if (b != runBit) {
- runBit = b;
- lastRun = p;
- }
- }
- if (runBit == 0)
- lo = lastRun;
- else
- hi = lastRun;
- for (Node p = e; p != lastRun; p = p.next) {
- int ph = p.hash;
- Object pk = p.key, pv = p.val;
- if ((ph & n) == 0)
- lo = new Node(ph, pk, pv, lo);
- else
- hi = new Node(ph, pk, pv, hi);
- }
- setTabAt(nextTab, i, lo);
- setTabAt(nextTab, i + n, hi);
- setTabAt(tab, i, fwd);
- }
- }
- if (validated)
- break;
- }
- else if (casTabAt(tab, i, e, fwd))
- break;
- }
- }
- }
-
/* ---------------- Internal access and update methods -------------- */
/**
* Applies a supplemental hash function to a given hashCode, which
* defends against poor quality hash functions. The result must
- * be non-negative, and for reasonable performance must have good
- * avalanche properties; i.e., that each bit of the argument
- * affects each bit (except sign bit) of the result.
+ * be have the top 2 bits clear. For reasonable performance, this
+ * function must have good avalanche properties; i.e., that each
+ * bit of the argument affects each bit of the result. (Although
+ * we don't care about the unused top 2 bits.)
*/
private static final int spread(int h) {
// Apply base step of MurmurHash; see http://code.google.com/p/smhasher/
+ // Despite two multiplies, this is often faster than others
+ // with comparable bit-spread properties.
h ^= h >>> 16;
h *= 0x85ebca6b;
h ^= h >>> 13;
h *= 0xc2b2ae35;
- return (h >>> 16) ^ (h & 0x7fffffff); // mask out sign bit
+ return ((h >>> 16) ^ h) & HASH_BITS; // mask out top bits
}
/** Implementation for get and containsKey */
private final Object internalGet(Object k) {
int h = spread(k.hashCode());
retry: for (Node[] tab = table; tab != null;) {
- Node e; Object ek, ev; int eh; // locals to read fields once
+ Node e; Object ek, ev; int eh; // locals to read fields once
for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
- if ((eh = e.hash) == h) {
- if ((ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek)))
- return ev;
- }
- else if (eh < 0) { // sign bit set
- tab = (Node[])e.key; // bin was moved during resize
+ if ((eh = e.hash) == MOVED) {
+ tab = (Node[])e.key; // restart with new table
continue retry;
}
+ if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek)))
+ return ev;
}
break;
}
return null;
}
- /** Implementation for put and putIfAbsent */
- private final Object internalPut(Object k, Object v, boolean replace) {
+ /**
+ * Implementation for the four public remove/replace methods:
+ * Replaces node value with v, conditional upon match of cv if
+ * non-null. If resulting value is null, delete.
+ */
+ private final Object internalReplace(Object k, Object v, Object cv) {
+ int h = spread(k.hashCode());
+ Object oldVal = null;
+ for (Node[] tab = table;;) {
+ Node f; int i, fh;
+ if (tab == null ||
+ (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
+ break;
+ else if ((fh = f.hash) == MOVED)
+ tab = (Node[])f.key;
+ else if ((fh & HASH_BITS) != h && f.next == null) // precheck
+ break; // rules out possible existence
+ else if ((fh & LOCKED) != 0) {
+ checkForResize(); // try resizing if can't get lock
+ f.tryAwaitLock(tab, i);
+ }
+ else if (f.casHash(fh, fh | LOCKED)) {
+ boolean validated = false;
+ boolean deleted = false;
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ for (Node e = f, pred = null;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ ((ev = e.val) != null) &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ if (cv == null || cv == ev || cv.equals(ev)) {
+ oldVal = ev;
+ if ((e.val = v) == null) {
+ deleted = true;
+ Node en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null)
+ break;
+ }
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (validated) {
+ if (deleted)
+ counter.add(-1L);
+ break;
+ }
+ }
+ }
+ return oldVal;
+ }
+
+ /*
+ * Internal versions of the five insertion methods, each a
+ * little more complicated than the last. All have
+ * the same basic structure as the first (internalPut):
+ * 1. If table uninitialized, create
+ * 2. If bin empty, try to CAS new node
+ * 3. If bin stale, use new table
+ * 4. Lock and validate; if valid, scan and add or update
+ *
+ * The others interweave other checks and/or alternative actions:
+ * * Plain put checks for and performs resize after insertion.
+ * * putIfAbsent prescans for mapping without lock (and fails to add
+ * if present), which also makes pre-emptive resize checks worthwhile.
+ * * computeIfAbsent extends form used in putIfAbsent with additional
+ * mechanics to deal with, calls, potential exceptions and null
+ * returns from function call.
+ * * compute uses the same function-call mechanics, but without
+ * the prescans
+ * * putAll attempts to pre-allocate enough table space
+ * and more lazily performs count updates and checks.
+ *
+ * Someday when details settle down a bit more, it might be worth
+ * some factoring to reduce sprawl.
+ */
+
+ /** Implementation for put */
+ private final Object internalPut(Object k, Object v) {
int h = spread(k.hashCode());
- Object oldVal = null; // previous value or null if none
+ boolean checkSize = false;
for (Node[] tab = table;;) {
- Node e; int i; Object ek, ev;
+ int i; Node f; int fh;
if (tab == null)
- tab = growTable();
- else if ((e = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
if (casTabAt(tab, i, null, new Node(h, k, v, null)))
break; // no lock when adding to empty bin
}
- else if (e.hash < 0) // resized -- restart with new table
- tab = (Node[])e.key;
- else if (!replace && e.hash == h && (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek))) {
- if (tabAt(tab, i) == e) { // inspect and validate 1st node
- oldVal = ev; // without lock for putIfAbsent
- break;
- }
+ else if ((fh = f.hash) == MOVED)
+ tab = (Node[])f.key;
+ else if ((fh & LOCKED) != 0) {
+ checkForResize();
+ f.tryAwaitLock(tab, i);
}
- else {
+ else if (f.casHash(fh, fh | LOCKED)) {
+ Object oldVal = null;
boolean validated = false;
- boolean checkSize = false;
- synchronized (e) { // lock the 1st node of bin list
- if (tabAt(tab, i) == e) {
+ try { // needed in case equals() throws
+ if (tabAt(tab, i) == f) {
validated = true; // retry if 1st already deleted
- for (Node first = e;;) {
- if (e.hash == h &&
- ((ek = e.key) == k || k.equals(ek)) &&
- (ev = e.val) != null) {
+ for (Node e = f;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
oldVal = ev;
- if (replace)
- e.val = v;
+ e.val = v;
break;
}
Node last = e;
if ((e = e.next) == null) {
last.next = new Node(h, k, v, null);
- if (last != first || tab.length <= 64)
+ if (last != f || tab.length <= 64)
checkSize = true;
break;
}
}
}
+ } finally { // unlock and signal if needed
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
}
if (validated) {
- if (checkSize && tab.length < MAXIMUM_CAPACITY &&
- resizing == 0 && counter.sum() >= (long)threshold)
- growTable();
+ if (oldVal != null)
+ return oldVal;
break;
}
}
}
- if (oldVal == null)
- counter.increment(); // update counter outside of locks
- return oldVal;
+ counter.add(1L);
+ if (checkSize)
+ checkForResize();
+ return null;
}
- /**
- * Implementation for the four public remove/replace methods:
- * Replaces node value with v, conditional upon match of cv if
- * non-null. If resulting value is null, delete.
- */
- private final Object internalReplace(Object k, Object v, Object cv) {
+ /** Implementation for putIfAbsent */
+ private final Object internalPutIfAbsent(Object k, Object v) {
int h = spread(k.hashCode());
for (Node[] tab = table;;) {
- Node e; int i;
- if (tab == null ||
- (e = tabAt(tab, i = (tab.length - 1) & h)) == null)
- return null;
- else if (e.hash < 0)
- tab = (Node[])e.key;
+ int i; Node f; int fh; Object fk, fv;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ if (casTabAt(tab, i, null, new Node(h, k, v, null)))
+ break;
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = (Node[])f.key;
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
+ ((fk = f.key) == k || k.equals(fk)))
+ return fv;
else {
- Object oldVal = null;
- boolean validated = false;
- boolean deleted = false;
- synchronized (e) {
- if (tabAt(tab, i) == e) {
- validated = true;
- Node pred = null;
- do {
- Object ek, ev;
- if (e.hash == h &&
- ((ek = e.key) == k || k.equals(ek)) &&
- ((ev = e.val) != null)) {
- if (cv == null || cv == ev || cv.equals(ev)) {
+ Node g = f.next;
+ if (g != null) { // at least 2 nodes -- search and maybe resize
+ for (Node e = g;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek)))
+ return ev;
+ if ((e = e.next) == null) {
+ checkForResize();
+ break;
+ }
+ }
+ }
+ if (((fh = f.hash) & LOCKED) != 0) {
+ checkForResize();
+ f.tryAwaitLock(tab, i);
+ }
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
+ Object oldVal = null;
+ boolean validated = false;
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ for (Node e = f;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
oldVal = ev;
- if ((e.val = v) == null) {
- deleted = true;
- Node en = e.next;
- if (pred != null)
- pred.next = en;
- else
- setTabAt(tab, i, en);
- }
+ break;
+ }
+ Node last = e;
+ if ((e = e.next) == null) {
+ last.next = new Node(h, k, v, null);
+ break;
}
- break;
}
- } while ((e = (pred = e).next) != null);
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (validated) {
+ if (oldVal != null)
+ return oldVal;
+ break;
}
}
- if (validated) {
- if (deleted)
- counter.decrement();
- return oldVal;
+ }
+ }
+ counter.add(1L);
+ return null;
+ }
+
+ /** Implementation for computeIfAbsent */
+ private final Object internalComputeIfAbsent(K k,
+ MappingFunction super K, ?> mf) {
+ int h = spread(k.hashCode());
+ Object val = null;
+ for (Node[] tab = table;;) {
+ Node f; int i, fh; Object fk, fv;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ Node node = new Node(fh = h | LOCKED, k, null, null);
+ boolean validated = false;
+ if (casTabAt(tab, i, null, node)) {
+ validated = true;
+ try {
+ if ((val = mf.map(k)) != null)
+ node.val = val;
+ } finally {
+ if (val == null)
+ setTabAt(tab, i, null);
+ if (!node.casHash(fh, h)) {
+ node.hash = h;
+ synchronized (node) { node.notifyAll(); };
+ }
+ }
+ }
+ if (validated)
+ break;
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = (Node[])f.key;
+ else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
+ ((fk = f.key) == k || k.equals(fk)))
+ return fv;
+ else {
+ Node g = f.next;
+ if (g != null) {
+ for (Node e = g;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h && (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek)))
+ return ev;
+ if ((e = e.next) == null) {
+ checkForResize();
+ break;
+ }
+ }
+ }
+ if (((fh = f.hash) & LOCKED) != 0) {
+ checkForResize();
+ f.tryAwaitLock(tab, i);
+ }
+ else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
+ boolean validated = false;
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ for (Node e = f;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ val = ev;
+ break;
+ }
+ Node last = e;
+ if ((e = e.next) == null) {
+ if ((val = mf.map(k)) != null)
+ last.next = new Node(h, k, val, null);
+ break;
+ }
+ }
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (validated)
+ break;
}
}
}
+ if (val == null)
+ throw new NullPointerException();
+ counter.add(1L);
+ return val;
}
- /** Implementation for computeIfAbsent and compute. Like put, but messier. */
+ /** Implementation for compute */
@SuppressWarnings("unchecked")
- private final V internalCompute(K k,
- MappingFunction super K, ? extends V> f,
- boolean replace) {
+ private final Object internalCompute(K k,
+ RemappingFunction super K, V> mf) {
int h = spread(k.hashCode());
- V val = null;
+ Object val = null;
boolean added = false;
- Node[] tab = table;
- outer:for (;;) {
- Node e; int i; Object ek, ev;
+ boolean checkSize = false;
+ for (Node[] tab = table;;) {
+ Node f; int i, fh;
if (tab == null)
- tab = growTable();
- else if ((e = tabAt(tab, i = (tab.length - 1) & h)) == null) {
- Node node = new Node(h, k, null, null);
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ Node node = new Node(fh = h | LOCKED, k, null, null);
boolean validated = false;
- synchronized (node) { // must lock while computing value
- if (casTabAt(tab, i, null, node)) {
- validated = true;
- try {
- val = f.map(k);
- if (val != null) {
- node.val = val;
- added = true;
- }
- } finally {
- if (!added)
- setTabAt(tab, i, null);
+ if (casTabAt(tab, i, null, node)) {
+ validated = true;
+ try {
+ if ((val = mf.remap(k, null)) != null) {
+ node.val = val;
+ added = true;
+ }
+ } finally {
+ if (!added)
+ setTabAt(tab, i, null);
+ if (!node.casHash(fh, h)) {
+ node.hash = h;
+ synchronized (node) { node.notifyAll(); };
}
}
}
if (validated)
break;
}
- else if (e.hash < 0)
- tab = (Node[])e.key;
- else if (!replace && e.hash == h && (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek))) {
- if (tabAt(tab, i) == e) {
- val = (V)ev;
- break;
- }
+ else if ((fh = f.hash) == MOVED)
+ tab = (Node[])f.key;
+ else if ((fh & LOCKED) != 0) {
+ checkForResize();
+ f.tryAwaitLock(tab, i);
}
- else if (Thread.holdsLock(e))
- throw new IllegalStateException("Recursive map computation");
- else {
+ else if (f.casHash(fh, fh | LOCKED)) {
boolean validated = false;
- boolean checkSize = false;
- synchronized (e) {
- if (tabAt(tab, i) == e) {
+ try {
+ if (tabAt(tab, i) == f) {
validated = true;
- for (Node first = e;;) {
- if (e.hash == h &&
- ((ek = e.key) == k || k.equals(ek)) &&
- ((ev = e.val) != null)) {
- Object fv;
- if (replace && (fv = f.map(k)) != null)
- ev = e.val = fv;
- val = (V)ev;
+ for (Node e = f;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ val = mf.remap(k, (V)ev);
+ if (val != null)
+ e.val = val;
break;
}
Node last = e;
if ((e = e.next) == null) {
- if ((val = f.map(k)) != null) {
+ if ((val = mf.remap(k, null)) != null) {
last.next = new Node(h, k, val, null);
added = true;
- if (last != first || tab.length <= 64)
+ if (last != f || tab.length <= 64)
checkSize = true;
}
break;
}
}
}
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
}
- if (validated) {
- if (checkSize && tab.length < MAXIMUM_CAPACITY &&
- resizing == 0 && counter.sum() >= (long)threshold)
- growTable();
+ if (validated)
break;
- }
}
}
- if (added)
- counter.increment();
+ if (val == null)
+ throw new NullPointerException();
+ if (added) {
+ counter.add(1L);
+ if (checkSize)
+ checkForResize();
+ }
return val;
}
+ /** Implementation for putAll */
+ private final void internalPutAll(Map, ?> m) {
+ tryPresize(m.size());
+ long delta = 0L; // number of uncommitted additions
+ boolean npe = false; // to throw exception on exit for nulls
+ try { // to clean up counts on other exceptions
+ for (Map.Entry, ?> entry : m.entrySet()) {
+ Object k, v;
+ if (entry == null || (k = entry.getKey()) == null ||
+ (v = entry.getValue()) == null) {
+ npe = true;
+ break;
+ }
+ int h = spread(k.hashCode());
+ for (Node[] tab = table;;) {
+ int i; Node f; int fh;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
+ if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
+ ++delta;
+ break;
+ }
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = (Node[])f.key;
+ else if ((fh & LOCKED) != 0) {
+ counter.add(delta);
+ delta = 0L;
+ checkForResize();
+ f.tryAwaitLock(tab, i);
+ }
+ else if (f.casHash(fh, fh | LOCKED)) {
+ boolean validated = false;
+ boolean tooLong = false;
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ for (Node e = f;;) {
+ Object ek, ev;
+ if ((e.hash & HASH_BITS) == h &&
+ (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ e.val = v;
+ break;
+ }
+ Node last = e;
+ if ((e = e.next) == null) {
+ ++delta;
+ last.next = new Node(h, k, v, null);
+ break;
+ }
+ tooLong = true;
+ }
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (validated) {
+ if (tooLong) {
+ counter.add(delta);
+ delta = 0L;
+ checkForResize();
+ }
+ break;
+ }
+ }
+ }
+ }
+ } finally {
+ if (delta != 0)
+ counter.add(delta);
+ }
+ if (npe)
+ throw new NullPointerException();
+ }
+
+ /* ---------------- Table Initialization and Resizing -------------- */
+
+ /**
+ * Returns a power of two table size for the given desired capacity.
+ * See Hackers Delight, sec 3.2
+ */
+ private static final int tableSizeFor(int c) {
+ int n = c - 1;
+ n |= n >>> 1;
+ n |= n >>> 2;
+ n |= n >>> 4;
+ n |= n >>> 8;
+ n |= n >>> 16;
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
+ }
+
+ /**
+ * Initializes table, using the size recorded in sizeCtl.
+ */
+ private final Node[] initTable() {
+ Node[] tab; int sc;
+ while ((tab = table) == null) {
+ if ((sc = sizeCtl) < 0)
+ Thread.yield(); // lost initialization race; just spin
+ else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if ((tab = table) == null) {
+ int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
+ tab = table = new Node[n];
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ break;
+ }
+ }
+ return tab;
+ }
+
+ /**
+ * If table is too small and not already resizing, creates next
+ * table and transfers bins. Rechecks occupancy after a transfer
+ * to see if another resize is already needed because resizings
+ * are lagging additions.
+ */
+ private final void checkForResize() {
+ Node[] tab; int n, sc;
+ while ((tab = table) != null &&
+ (n = tab.length) < MAXIMUM_CAPACITY &&
+ (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc &&
+ UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if (tab == table) {
+ table = rebuild(tab);
+ sc = (n << 1) - (n >>> 1);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ }
+ }
+
+ /**
+ * Tries to presize table to accommodate the given number of elements.
+ *
+ * @param size number of elements (doesn't need to be perfectly accurate)
+ */
+ private final void tryPresize(int size) {
+ int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
+ tableSizeFor(size + (size >>> 1) + 1);
+ int sc;
+ while ((sc = sizeCtl) >= 0) {
+ Node[] tab = table; int n;
+ if (tab == null || (n = tab.length) == 0) {
+ n = (sc > c) ? sc : c;
+ if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if (table == tab) {
+ table = new Node[n];
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ }
+ }
+ else if (c <= sc || n >= MAXIMUM_CAPACITY)
+ break;
+ else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
+ try {
+ if (table == tab) {
+ table = rebuild(tab);
+ sc = (n << 1) - (n >>> 1);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ }
+ }
+ }
+
+ /*
+ * Moves and/or copies the nodes in each bin to new table. See
+ * above for explanation.
+ *
+ * @return the new table
+ */
+ private static final Node[] rebuild(Node[] tab) {
+ int n = tab.length;
+ Node[] nextTab = new Node[n << 1];
+ Node fwd = new Node(MOVED, nextTab, null, null);
+ int[] buffer = null; // holds bins to revisit; null until needed
+ Node rev = null; // reverse forwarder; null until needed
+ int nbuffered = 0; // the number of bins in buffer list
+ int bufferIndex = 0; // buffer index of current buffered bin
+ int bin = n - 1; // current non-buffered bin or -1 if none
+
+ for (int i = bin;;) { // start upwards sweep
+ int fh; Node f;
+ if ((f = tabAt(tab, i)) == null) {
+ if (bin >= 0) { // no lock needed (or available)
+ if (!casTabAt(tab, i, f, fwd))
+ continue;
+ }
+ else { // transiently use a locked forwarding node
+ Node g = new Node(MOVED|LOCKED, nextTab, null, null);
+ if (!casTabAt(tab, i, f, g))
+ continue;
+ setTabAt(nextTab, i, null);
+ setTabAt(nextTab, i + n, null);
+ setTabAt(tab, i, fwd);
+ if (!g.casHash(MOVED|LOCKED, MOVED)) {
+ g.hash = MOVED;
+ synchronized (g) { g.notifyAll(); }
+ }
+ }
+ }
+ else if (((fh = f.hash) & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
+ boolean validated = false;
+ try { // split to lo and hi lists; copying as needed
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ Node e = f, lastRun = f;
+ Node lo = null, hi = null;
+ int runBit = e.hash & n;
+ for (Node p = e.next; p != null; p = p.next) {
+ int b = p.hash & n;
+ if (b != runBit) {
+ runBit = b;
+ lastRun = p;
+ }
+ }
+ if (runBit == 0)
+ lo = lastRun;
+ else
+ hi = lastRun;
+ for (Node p = e; p != lastRun; p = p.next) {
+ int ph = p.hash & HASH_BITS;
+ Object pk = p.key, pv = p.val;
+ if ((ph & n) == 0)
+ lo = new Node(ph, pk, pv, lo);
+ else
+ hi = new Node(ph, pk, pv, hi);
+ }
+ setTabAt(nextTab, i, lo);
+ setTabAt(nextTab, i + n, hi);
+ setTabAt(tab, i, fwd);
+ }
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
+ }
+ if (!validated)
+ continue;
+ }
+ else {
+ if (buffer == null) // initialize buffer for revisits
+ buffer = new int[TRANSFER_BUFFER_SIZE];
+ if (bin < 0 && bufferIndex > 0) {
+ int j = buffer[--bufferIndex];
+ buffer[bufferIndex] = i;
+ i = j; // swap with another bin
+ continue;
+ }
+ if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) {
+ f.tryAwaitLock(tab, i);
+ continue; // no other options -- block
+ }
+ if (rev == null) // initialize reverse-forwarder
+ rev = new Node(MOVED, tab, null, null);
+ if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0)
+ continue; // recheck before adding to list
+ buffer[nbuffered++] = i;
+ setTabAt(nextTab, i, rev); // install place-holders
+ setTabAt(nextTab, i + n, rev);
+ }
+
+ if (bin > 0)
+ i = --bin;
+ else if (buffer != null && nbuffered > 0) {
+ bin = -1;
+ i = buffer[bufferIndex = --nbuffered];
+ }
+ else
+ return nextTab;
+ }
+ }
+
/**
- * Implementation for clear. Steps through each bin, removing all nodes.
+ * Implementation for clear. Steps through each bin, removing all
+ * nodes.
*/
private final void internalClear() {
long delta = 0L; // negative number of deletions
int i = 0;
Node[] tab = table;
while (tab != null && i < tab.length) {
- Node e = tabAt(tab, i);
- if (e == null)
+ int fh;
+ Node f = tabAt(tab, i);
+ if (f == null)
++i;
- else if (e.hash < 0)
- tab = (Node[])e.key;
- else {
+ else if ((fh = f.hash) == MOVED)
+ tab = (Node[])f.key;
+ else if ((fh & LOCKED) != 0) {
+ counter.add(delta); // opportunistically update count
+ delta = 0L;
+ f.tryAwaitLock(tab, i);
+ }
+ else if (f.casHash(fh, fh | LOCKED)) {
boolean validated = false;
- synchronized (e) {
- if (tabAt(tab, i) == e) {
+ try {
+ if (tabAt(tab, i) == f) {
validated = true;
- Node en;
- do {
- en = e.next;
+ for (Node e = f; e != null; e = e.next) {
if (e.val != null) { // currently always true
e.val = null;
--delta;
}
- } while ((e = en) != null);
+ }
setTabAt(tab, i, null);
}
+ } finally {
+ if (!f.casHash(fh | LOCKED, fh)) {
+ f.hash = fh;
+ synchronized (f) { f.notifyAll(); };
+ }
}
if (validated)
++i;
}
}
- counter.add(delta);
+ if (delta != 0)
+ counter.add(delta);
}
+
/* ----------------Table Traversal -------------- */
/**
@@ -771,19 +1298,20 @@ public class ConcurrentHashMapV8
* valid.
*
* Internal traversals directly access these fields, as in:
- * {@code while (it.next != null) { process(nextKey); it.advance(); }}
+ * {@code while (it.next != null) { process(it.nextKey); it.advance(); }}
*
* Exported iterators (subclasses of ViewIterator) extract key,
* value, or key-value pairs as return values of Iterator.next(),
* and encapsulate the it.next check as hasNext();
*
- * The iterator visits each valid node that was reachable upon
- * iterator construction once. It might miss some that were added
- * to a bin after the bin was visited, which is OK wrt consistency
- * guarantees. Maintaining this property in the face of possible
- * ongoing resizes requires a fair amount of bookkeeping state
- * that is difficult to optimize away amidst volatile accesses.
- * Even so, traversal maintains reasonable throughput.
+ * The iterator visits once each still-valid node that was
+ * reachable upon iterator construction. It might miss some that
+ * were added to a bin after the bin was visited, which is OK wrt
+ * consistency guarantees. Maintaining this property in the face
+ * of possible ongoing resizes requires a fair amount of
+ * bookkeeping state that is difficult to optimize away amidst
+ * volatile accesses. Even so, traversal maintains reasonable
+ * throughput.
*
* Normally, iteration proceeds bin-by-bin traversing lists.
* However, if the table has been resized, then all future steps
@@ -821,7 +1349,7 @@ public class ConcurrentHashMapV8
this.tab = tab;
baseSize = (tab == null) ? 0 : tab.length;
baseLimit = (hi <= baseSize) ? hi : baseSize;
- index = baseIndex = lo;
+ index = baseIndex = (lo >= 0) ? lo : 0;
next = null;
advance();
}
@@ -830,20 +1358,20 @@ public class ConcurrentHashMapV8
final void advance() {
Node e = last = next;
outer: do {
- if (e != null) // pass used or skipped node
+ if (e != null) // advance past used/skipped node
e = e.next;
- while (e == null) { // get to next non-null bin
- Node[] t; int b, i, n; // checks must use locals
+ while (e == null) { // get to next non-null bin
+ Node[] t; int b, i, n; // checks must use locals
if ((b = baseIndex) >= baseLimit || (i = index) < 0 ||
(t = tab) == null || i >= (n = t.length))
break outer;
- else if ((e = tabAt(t, i)) != null && e.hash < 0)
- tab = (Node[])e.key; // restarts due to null val
- else // visit upper slots if present
+ else if ((e = tabAt(t, i)) != null && e.hash == MOVED)
+ tab = (Node[])e.key; // restarts due to null val
+ else // visit upper slots if present
index = (i += baseSize) < n ? i : (baseIndex = b + 1);
}
nextKey = e.key;
- } while ((nextVal = e.val) == null); // skip deleted or special nodes
+ } while ((nextVal = e.val) == null);// skip deleted or special nodes
next = e;
}
}
@@ -855,7 +1383,6 @@ public class ConcurrentHashMapV8
*/
public ConcurrentHashMapV8() {
this.counter = new LongAdder();
- this.targetCapacity = DEFAULT_CAPACITY;
}
/**
@@ -875,7 +1402,7 @@ public class ConcurrentHashMapV8
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.counter = new LongAdder();
- this.targetCapacity = cap;
+ this.sizeCtl = cap;
}
/**
@@ -885,8 +1412,8 @@ public class ConcurrentHashMapV8
*/
public ConcurrentHashMapV8(Map extends K, ? extends V> m) {
this.counter = new LongAdder();
- this.targetCapacity = DEFAULT_CAPACITY;
- putAll(m);
+ this.sizeCtl = DEFAULT_CAPACITY;
+ internalPutAll(m);
}
/**
@@ -933,10 +1460,10 @@ public class ConcurrentHashMapV8
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
- int cap = ((size >= (long)MAXIMUM_CAPACITY) ?
- MAXIMUM_CAPACITY: tableSizeFor((int)size));
+ int cap = ((size >= (long)MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY: tableSizeFor((int)size));
this.counter = new LongAdder();
- this.targetCapacity = cap;
+ this.sizeCtl = cap;
}
/**
@@ -956,6 +1483,11 @@ public class ConcurrentHashMapV8
(int)n);
}
+ final long longSize() { // accurate version of size needed for views
+ long n = counter.sum();
+ return (n < 0L) ? 0L : n;
+ }
+
/**
* Returns the value to which the specified key is mapped,
* or {@code null} if this map contains no mapping for the key.
@@ -1048,7 +1580,7 @@ public class ConcurrentHashMapV8
public V put(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
- return (V)internalPut(key, value, true);
+ return (V)internalPut(key, value);
}
/**
@@ -1062,7 +1594,7 @@ public class ConcurrentHashMapV8
public V putIfAbsent(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
- return (V)internalPut(key, value, false);
+ return (V)internalPutIfAbsent(key, value);
}
/**
@@ -1073,42 +1605,32 @@ public class ConcurrentHashMapV8
* @param m mappings to be stored in this map
*/
public void putAll(Map extends K, ? extends V> m) {
- if (m == null)
- throw new NullPointerException();
- /*
- * If uninitialized, try to adjust targetCapacity to
- * accommodate the given number of elements.
- */
- if (table == null) {
- int size = m.size();
- int cap = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
- tableSizeFor(size + (size >>> 1) + 1);
- if (cap > targetCapacity)
- targetCapacity = cap;
- }
- for (Map.Entry extends K, ? extends V> e : m.entrySet())
- put(e.getKey(), e.getValue());
+ internalPutAll(m);
}
/**
* If the specified key is not already associated with a value,
- * computes its value using the given mappingFunction, and if
- * non-null, enters it into the map. This is equivalent to
- * {@code
+ * computes its value using the given mappingFunction and
+ * enters it into the map. This is equivalent to
+ * {@code
* if (map.containsKey(key))
* return map.get(key);
* value = mappingFunction.map(key);
- * if (value != null)
- * map.put(key, value);
+ * map.put(key, value);
* return value;}
*
- * except that the action is performed atomically. Some attempted
- * update operations on this map by other threads may be blocked
- * while computation is in progress, so the computation should be
- * short and simple, and must not attempt to update any other
- * mappings of this Map. The most appropriate usage is to
- * construct a new object serving as an initial mapped value, or
- * memoized result, as in:
+ * except that the action is performed atomically. If the
+ * function returns {@code null} (in which case a {@code
+ * NullPointerException} is thrown), or the function itself throws
+ * an (unchecked) exception, the exception is rethrown to its
+ * caller, and no mapping is recorded. Some attempted update
+ * operations on this map by other threads may be blocked while
+ * computation is in progress, so the computation should be short
+ * and simple, and must not attempt to update any other mappings
+ * of this Map. The most appropriate usage is to construct a new
+ * object serving as an initial mapped value, or memoized result,
+ * as in:
+ *
* {@code
* map.computeIfAbsent(key, new MappingFunction() {
* public V map(K k) { return new Value(f(k)); }});}
@@ -1116,57 +1638,65 @@ public class ConcurrentHashMapV8
* @param key key with which the specified value is to be associated
* @param mappingFunction the function to compute a value
* @return the current (existing or computed) value associated with
- * the specified key, or {@code null} if the computation
- * returned {@code null}
- * @throws NullPointerException if the specified key or mappingFunction
- * is null
+ * the specified key.
+ * @throws NullPointerException if the specified key, mappingFunction,
+ * or computed value is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the mappingFunction does so,
* in which case the mapping is left unestablished
*/
+ @SuppressWarnings("unchecked")
public V computeIfAbsent(K key, MappingFunction super K, ? extends V> mappingFunction) {
if (key == null || mappingFunction == null)
throw new NullPointerException();
- return internalCompute(key, mappingFunction, false);
+ return (V)internalComputeIfAbsent(key, mappingFunction);
}
/**
- * Computes the value associated with the given key using the given
- * mappingFunction, and if non-null, enters it into the map. This
- * is equivalent to
+ * Computes and enters a new mapping value given a key and
+ * its current mapped value (or {@code null} if there is no current
+ * mapping). This is equivalent to
* {@code
- * value = mappingFunction.map(key);
- * if (value != null)
- * map.put(key, value);
- * else
- * value = map.get(key);
- * return value;}
+ * map.put(key, remappingFunction.remap(key, map.get(key));
+ * }
*
- * except that the action is performed atomically. Some attempted
+ * except that the action is performed atomically. If the
+ * function returns {@code null} (in which case a {@code
+ * NullPointerException} is thrown), or the function itself throws
+ * an (unchecked) exception, the exception is rethrown to its
+ * caller, and current mapping is left unchanged. Some attempted
* update operations on this map by other threads may be blocked
* while computation is in progress, so the computation should be
* short and simple, and must not attempt to update any other
- * mappings of this Map.
+ * mappings of this Map. For example, to either create or
+ * append new messages to a value mapping:
+ *
+ * {@code
+ * Map map = ...;
+ * final String msg = ...;
+ * map.compute(key, new RemappingFunction() {
+ * public String remap(Key k, String v) {
+ * return (v == null) ? msg : v + msg;});}}
*
* @param key key with which the specified value is to be associated
- * @param mappingFunction the function to compute a value
- * @return the current value associated with
- * the specified key, or {@code null} if the computation
- * returned {@code null} and the value was not otherwise present
- * @throws NullPointerException if the specified key or mappingFunction
- * is null
+ * @param remappingFunction the function to compute a value
+ * @return the new value associated with
+ * the specified key.
+ * @throws NullPointerException if the specified key or remappingFunction
+ * or computed value is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
- * @throws RuntimeException or Error if the mappingFunction does so,
+ * @throws RuntimeException or Error if the remappingFunction does so,
* in which case the mapping is unchanged
*/
- public V compute(K key, MappingFunction super K, ? extends V> mappingFunction) {
- if (key == null || mappingFunction == null)
+ @SuppressWarnings("unchecked")
+ public V compute(K key, RemappingFunction super K, V> remappingFunction) {
+ if (key == null || remappingFunction == null)
throw new NullPointerException();
- return internalCompute(key, mappingFunction, true);
+ return (V)internalCompute(key, remappingFunction);
}
/**
@@ -1462,22 +1992,32 @@ public class ConcurrentHashMapV8
Object k = nextKey;
Object v = nextVal;
advance();
- return new WriteThroughEntry(map, (K)k, (V)v);
+ return new WriteThroughEntry((K)k, (V)v, map);
+ }
+ }
+
+ static final class SnapshotEntryIterator extends ViewIterator
+ implements Iterator> {
+ SnapshotEntryIterator(ConcurrentHashMapV8 map) { super(map); }
+
+ @SuppressWarnings("unchecked")
+ public final Map.Entry next() {
+ if (next == null)
+ throw new NoSuchElementException();
+ Object k = nextKey;
+ Object v = nextVal;
+ advance();
+ return new SnapshotEntry((K)k, (V)v);
}
}
/**
- * Custom Entry class used by EntryIterator.next(), that relays
- * setValue changes to the underlying map.
+ * Base of writeThrough and Snapshot entry classes
*/
- static final class WriteThroughEntry implements Map.Entry {
- final ConcurrentHashMapV8 map;
+ static abstract class MapEntry implements Map.Entry {
final K key; // non-null
V val; // non-null
- WriteThroughEntry(ConcurrentHashMapV8 map, K key, V val) {
- this.map = map; this.key = key; this.val = val;
- }
-
+ MapEntry(K key, V val) { this.key = key; this.val = val; }
public final K getKey() { return key; }
public final V getValue() { return val; }
public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
@@ -1492,6 +2032,21 @@ public class ConcurrentHashMapV8
(v == val || v.equals(val)));
}
+ public abstract V setValue(V value);
+ }
+
+ /**
+ * Entry used by EntryIterator.next(), that relays setValue
+ * changes to the underlying map.
+ */
+ static final class WriteThroughEntry extends MapEntry
+ implements Map.Entry {
+ final ConcurrentHashMapV8 map;
+ WriteThroughEntry(K key, V val, ConcurrentHashMapV8 map) {
+ super(key, val);
+ this.map = map;
+ }
+
/**
* Sets our entry's value and writes through to the map. The
* value to return is somewhat arbitrary here. Since a
@@ -1510,50 +2065,211 @@ public class ConcurrentHashMapV8
}
}
+ /**
+ * Internal version of entry, that doesn't write though changes
+ */
+ static final class SnapshotEntry extends MapEntry
+ implements Map.Entry {
+ SnapshotEntry(K key, V val) { super(key, val); }
+ public final V setValue(V value) { // only locally update
+ if (value == null) throw new NullPointerException();
+ V v = val;
+ val = value;
+ return v;
+ }
+ }
+
/* ----------------Views -------------- */
- /*
- * These currently just extend java.util.AbstractX classes, but
- * may need a new custom base to support partitioned traversal.
+ /**
+ * Base class for views. This is done mainly to allow adding
+ * customized parallel traversals (not yet implemented.)
*/
-
- static final class KeySet extends AbstractSet {
+ static abstract class MapView {
final ConcurrentHashMapV8 map;
- KeySet(ConcurrentHashMapV8 map) { this.map = map; }
-
+ MapView(ConcurrentHashMapV8 map) { this.map = map; }
public final int size() { return map.size(); }
public final boolean isEmpty() { return map.isEmpty(); }
public final void clear() { map.clear(); }
+
+ // implementations below rely on concrete classes supplying these
+ abstract Iterator> iter();
+ abstract public boolean contains(Object o);
+ abstract public boolean remove(Object o);
+
+ private static final String oomeMsg = "Required array size too large";
+
+ public final Object[] toArray() {
+ long sz = map.longSize();
+ if (sz > (long)(MAX_ARRAY_SIZE))
+ throw new OutOfMemoryError(oomeMsg);
+ int n = (int)sz;
+ Object[] r = new Object[n];
+ int i = 0;
+ Iterator> it = iter();
+ while (it.hasNext()) {
+ if (i == n) {
+ if (n >= MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
+ n = MAX_ARRAY_SIZE;
+ else
+ n += (n >>> 1) + 1;
+ r = Arrays.copyOf(r, n);
+ }
+ r[i++] = it.next();
+ }
+ return (i == n) ? r : Arrays.copyOf(r, i);
+ }
+
+ @SuppressWarnings("unchecked")
+ public final T[] toArray(T[] a) {
+ long sz = map.longSize();
+ if (sz > (long)(MAX_ARRAY_SIZE))
+ throw new OutOfMemoryError(oomeMsg);
+ int m = (int)sz;
+ T[] r = (a.length >= m) ? a :
+ (T[])java.lang.reflect.Array
+ .newInstance(a.getClass().getComponentType(), m);
+ int n = r.length;
+ int i = 0;
+ Iterator> it = iter();
+ while (it.hasNext()) {
+ if (i == n) {
+ if (n >= MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
+ n = MAX_ARRAY_SIZE;
+ else
+ n += (n >>> 1) + 1;
+ r = Arrays.copyOf(r, n);
+ }
+ r[i++] = (T)it.next();
+ }
+ if (a == r && i < n) {
+ r[i] = null; // null-terminate
+ return r;
+ }
+ return (i == n) ? r : Arrays.copyOf(r, i);
+ }
+
+ public final int hashCode() {
+ int h = 0;
+ for (Iterator> it = iter(); it.hasNext();)
+ h += it.next().hashCode();
+ return h;
+ }
+
+ public final String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append('[');
+ Iterator> it = iter();
+ if (it.hasNext()) {
+ for (;;) {
+ Object e = it.next();
+ sb.append(e == this ? "(this Collection)" : e);
+ if (!it.hasNext())
+ break;
+ sb.append(',').append(' ');
+ }
+ }
+ return sb.append(']').toString();
+ }
+
+ public final boolean containsAll(Collection> c) {
+ if (c != this) {
+ for (Iterator> it = c.iterator(); it.hasNext();) {
+ Object e = it.next();
+ if (e == null || !contains(e))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public final boolean removeAll(Collection> c) {
+ boolean modified = false;
+ for (Iterator> it = iter(); it.hasNext();) {
+ if (c.contains(it.next())) {
+ it.remove();
+ modified = true;
+ }
+ }
+ return modified;
+ }
+
+ public final boolean retainAll(Collection> c) {
+ boolean modified = false;
+ for (Iterator> it = iter(); it.hasNext();) {
+ if (!c.contains(it.next())) {
+ it.remove();
+ modified = true;
+ }
+ }
+ return modified;
+ }
+
+ }
+
+ static final class KeySet extends MapView implements Set {
+ KeySet(ConcurrentHashMapV8 map) { super(map); }
public final boolean contains(Object o) { return map.containsKey(o); }
public final boolean remove(Object o) { return map.remove(o) != null; }
+
public final Iterator iterator() {
return new KeyIterator(map);
}
+ final Iterator> iter() {
+ return new KeyIterator(map);
+ }
+ public final boolean add(K e) {
+ throw new UnsupportedOperationException();
+ }
+ public final boolean addAll(Collection extends K> c) {
+ throw new UnsupportedOperationException();
+ }
+ public boolean equals(Object o) {
+ Set> c;
+ return ((o instanceof Set) &&
+ ((c = (Set>)o) == this ||
+ (containsAll(c) && c.containsAll(this))));
+ }
}
- static final class Values extends AbstractCollection {
- final ConcurrentHashMapV8 map;
- Values(ConcurrentHashMapV8 map) { this.map = map; }
-
- public final int size() { return map.size(); }
- public final boolean isEmpty() { return map.isEmpty(); }
- public final void clear() { map.clear(); }
+ static final class Values extends MapView
+ implements Collection {
+ Values(ConcurrentHashMapV8 map) { super(map); }
public final boolean contains(Object o) { return map.containsValue(o); }
+
+ public final boolean remove(Object o) {
+ if (o != null) {
+ Iterator it = new ValueIterator(map);
+ while (it.hasNext()) {
+ if (o.equals(it.next())) {
+ it.remove();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
public final Iterator iterator() {
return new ValueIterator(map);
}
+ final Iterator> iter() {
+ return new ValueIterator(map);
+ }
+ public final boolean add(V e) {
+ throw new UnsupportedOperationException();
+ }
+ public final boolean addAll(Collection extends V> c) {
+ throw new UnsupportedOperationException();
+ }
}
- static final class EntrySet extends AbstractSet> {
- final ConcurrentHashMapV8 map;
- EntrySet(ConcurrentHashMapV8 map) { this.map = map; }
-
- public final int size() { return map.size(); }
- public final boolean isEmpty() { return map.isEmpty(); }
- public final void clear() { map.clear(); }
- public final Iterator> iterator() {
- return new EntryIterator(map);
- }
+ static final class EntrySet extends MapView
+ implements Set> {
+ EntrySet(ConcurrentHashMapV8 map) { super(map); }
public final boolean contains(Object o) {
Object k, v, r; Map.Entry,?> e;
@@ -1571,6 +2287,25 @@ public class ConcurrentHashMapV8
(v = e.getValue()) != null &&
map.remove(k, v));
}
+
+ public final Iterator> iterator() {
+ return new EntryIterator(map);
+ }
+ final Iterator> iter() {
+ return new SnapshotEntryIterator(map);
+ }
+ public final boolean add(Entry e) {
+ throw new UnsupportedOperationException();
+ }
+ public final boolean addAll(Collection extends Entry> c) {
+ throw new UnsupportedOperationException();
+ }
+ public boolean equals(Object o) {
+ Set> c;
+ return ((o instanceof Set) &&
+ ((c = (Set>)o) == this ||
+ (containsAll(c) && c.containsAll(this))));
+ }
}
/* ---------------- Serialization Support -------------- */
@@ -1626,7 +2361,6 @@ public class ConcurrentHashMapV8
this.segments = null; // unneeded
// initialize transient final field
UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder());
- this.targetCapacity = DEFAULT_CAPACITY;
// Create all nodes, then place in table once size is known
long size = 0L;
@@ -1643,19 +2377,19 @@ public class ConcurrentHashMapV8
}
if (p != null) {
boolean init = false;
- if (resizing == 0 &&
- UNSAFE.compareAndSwapInt(this, resizingOffset, 0, 1)) {
+ int n;
+ if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
+ n = MAXIMUM_CAPACITY;
+ else {
+ int sz = (int)size;
+ n = tableSizeFor(sz + (sz >>> 1) + 1);
+ }
+ int sc = sizeCtl;
+ if (n > sc &&
+ UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
try {
if (table == null) {
init = true;
- int n;
- if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
- n = MAXIMUM_CAPACITY;
- else {
- int sz = (int)size;
- n = tableSizeFor(sz + (sz >>> 1) + 1);
- }
- threshold = n - (n >>> 2) - THRESHOLD_OFFSET;
Node[] tab = new Node[n];
int mask = n - 1;
while (p != null) {
@@ -1667,14 +2401,15 @@ public class ConcurrentHashMapV8
}
table = tab;
counter.add(size);
+ sc = n - (n >>> 2);
}
} finally {
- resizing = 0;
+ sizeCtl = sc;
}
}
if (!init) { // Can only happen if unsafely published.
while (p != null) {
- internalPut(p.key, p.val, true);
+ internalPut(p.key, p.val);
p = p.next;
}
}
@@ -1684,7 +2419,7 @@ public class ConcurrentHashMapV8
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long counterOffset;
- private static final long resizingOffset;
+ private static final long sizeCtlOffset;
private static final long ABASE;
private static final int ASHIFT;
@@ -1695,8 +2430,8 @@ public class ConcurrentHashMapV8
Class> k = ConcurrentHashMapV8.class;
counterOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("counter"));
- resizingOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("resizing"));
+ sizeCtlOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("sizeCtl"));
Class> sc = Node[].class;
ABASE = UNSAFE.arrayBaseOffset(sc);
ss = UNSAFE.arrayIndexScale(sc);