--- jsr166/src/jsr166e/ConcurrentHashMapV8.java 2011/10/03 11:20:47 1.29
+++ jsr166/src/jsr166e/ConcurrentHashMapV8.java 2012/06/09 16:54:12 1.39
@@ -4,6 +4,8 @@
* http://creativecommons.org/publicdomain/zero/1.0/
*/
+// Snapshot Tue Jun 5 14:56:09 2012 Doug Lea (dl at altair)
+
package jsr166e;
import jsr166e.LongAdder;
import java.util.Arrays;
@@ -20,7 +22,9 @@ import java.util.Enumeration;
import java.util.ConcurrentModificationException;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.locks.LockSupport;
+import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import java.io.Serializable;
/**
@@ -71,7 +75,7 @@ import java.io.Serializable;
* versions of this class, constructors may optionally specify an
* expected {@code concurrencyLevel} as an additional hint for
* internal sizing. Note that using many keys with exactly the same
- * {@code hashCode{}} is a sure way to slow down performance of any
+ * {@code hashCode()} is a sure way to slow down performance of any
* hash table.
*
*
This class and its views and iterators implement all of the
@@ -147,18 +151,20 @@ public class ConcurrentHashMapV8
* supplying null-checks and casts as needed. This also allows
* many of the public methods to be factored into a smaller number
* of internal methods (although sadly not so for the five
- * sprawling variants of put-related operations).
+ * variants of put-related operations). The validation-based
+ * approach explained below leads to a lot of code sprawl because
+ * retry-control precludes factoring into smaller methods.
*
* The table is lazily initialized to a power-of-two size upon the
- * first insertion. Each bin in the table contains a list of
- * Nodes (most often, the list has only zero or one Node). Table
- * accesses require volatile/atomic reads, writes, and CASes.
- * Because there is no other way to arrange this without adding
- * further indirections, we use intrinsics (sun.misc.Unsafe)
- * operations. The lists of nodes within bins are always
- * accurately traversable under volatile reads, so long as lookups
- * check hash code and non-nullness of value before checking key
- * equality.
+ * first insertion. Each bin in the table normally contains a
+ * list of Nodes (most often, the list has only zero or one Node).
+ * Table accesses require volatile/atomic reads, writes, and
+ * CASes. Because there is no other way to arrange this without
+ * adding further indirections, we use intrinsics
+ * (sun.misc.Unsafe) operations. The lists of nodes within bins
+ * are always accurately traversable under volatile reads, so long
+ * as lookups check hash code and non-nullness of value before
+ * checking key equality.
*
* We use the top two bits of Node hash fields for control
* purposes -- they are available anyway because of addressing
@@ -170,23 +176,23 @@ public class ConcurrentHashMapV8
* 10 - Node is a forwarding node
*
* The lower 30 bits of each Node's hash field contain a
- * transformation (for better randomization -- method "spread") of
- * the key's hash code, except for forwarding nodes, for which the
- * lower bits are zero (and so always have hash field == MOVED).
+ * transformation of the key's hash code, except for forwarding
+ * nodes, for which the lower bits are zero (and so always have
+ * hash field == MOVED).
*
* Insertion (via put or its variants) of the first node in an
* empty bin is performed by just CASing it to the bin. This is
- * by far the most common case for put operations. Other update
- * operations (insert, delete, and replace) require locks. We do
- * not want to waste the space required to associate a distinct
- * lock object with each bin, so instead use the first node of a
- * bin list itself as a lock. Blocking support for these locks
- * relies on the builtin "synchronized" monitors. However, we
- * also need a tryLock construction, so we overlay these by using
- * bits of the Node hash field for lock control (see above), and
- * so normally use builtin monitors only for blocking and
- * signalling using wait/notifyAll constructions. See
- * Node.tryAwaitLock.
+ * by far the most common case for put operations under most
+ * key/hash distributions. Other update operations (insert,
+ * delete, and replace) require locks. We do not want to waste
+ * the space required to associate a distinct lock object with
+ * each bin, so instead use the first node of a bin list itself as
+ * a lock. Blocking support for these locks relies on the builtin
+ * "synchronized" monitors. However, we also need a tryLock
+ * construction, so we overlay these by using bits of the Node
+ * hash field for lock control (see above), and so normally use
+ * builtin monitors only for blocking and signalling using
+ * wait/notifyAll constructions. See Node.tryAwaitLock.
*
* Using the first node of a list as a lock does not by itself
* suffice though: When a node is locked, any update must first
@@ -201,30 +207,53 @@ public class ConcurrentHashMapV8
* The main disadvantage of per-bin locks is that other update
* operations on other nodes in a bin list protected by the same
* lock can stall, for example when user equals() or mapping
- * functions take a long time. However, statistically, this is
- * not a common enough problem to outweigh the time/space overhead
- * of alternatives: Under random hash codes, the frequency of
- * nodes in bins follows a Poisson distribution
+ * functions take a long time. However, statistically, under
+ * random hash codes, this is not a common problem. Ideally, the
+ * frequency of nodes in bins follows a Poisson distribution
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a
* parameter of about 0.5 on average, given the resizing threshold
* of 0.75, although with a large variance because of resizing
* granularity. Ignoring variance, the expected occurrences of
* list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
- * first few values are:
+ * first values are:
*
- * 0: 0.607
- * 1: 0.303
- * 2: 0.076
- * 3: 0.012
- * more: 0.002
+ * 0: 0.60653066
+ * 1: 0.30326533
+ * 2: 0.07581633
+ * 3: 0.01263606
+ * 4: 0.00157952
+ * 5: 0.00015795
+ * 6: 0.00001316
+ * 7: 0.00000094
+ * 8: 0.00000006
+ * more: less than 1 in ten million
*
* Lock contention probability for two threads accessing distinct
- * elements is roughly 1 / (8 * #elements). Function "spread"
- * performs hashCode randomization that improves the likelihood
- * that these assumptions hold unless users define exactly the
- * same value for too many hashCodes.
+ * elements is roughly 1 / (8 * #elements) under random hashes.
+ *
+ * Actual hash code distributions encountered in practice
+ * sometimes deviate significantly from uniform randomness. This
+ * includes the case when N > (1<<30), so some keys MUST collide.
+ * Similarly for dumb or hostile usages in which multiple keys are
+ * designed to have identical hash codes. Also, although we guard
+ * against the worst effects of this (see method spread), sets of
+ * hashes may differ only in bits that do not impact their bin
+ * index for a given power-of-two mask. So we use a secondary
+ * strategy that applies when the number of nodes in a bin exceeds
+ * a threshold, and at least one of the keys implements
+ * Comparable. These TreeBins use a balanced tree to hold nodes
+ * (a specialized form of red-black trees), bounding search time
+ * to O(log N). Each search step in a TreeBin is around twice as
+ * slow as in a regular list, but given that N cannot exceed
+ * (1<<64) (before running out of addresses) this bounds search
+ * steps, lock hold times, etc, to reasonable constants (roughly
+ * 100 nodes inspected per operation worst case) so long as keys
+ * are Comparable (which is very common -- String, Long, etc).
+ * TreeBin nodes (TreeNodes) also maintain the same "next"
+ * traversal pointers as regular nodes, so can be traversed in
+ * iterators in the same way.
*
- * The table is resized when occupancy exceeds an occupancy
+ * The table is resized when occupancy exceeds a percentage
* threshold (nominally, 0.75, but see below). Only a single
* thread performs the resize (using field "sizeCtl", to arrange
* exclusion), but the table otherwise remains usable for reads
@@ -245,22 +274,22 @@ public class ConcurrentHashMapV8
*
* Each bin transfer requires its bin lock. However, unlike other
* cases, a transfer can skip a bin if it fails to acquire its
- * lock, and revisit it later. Method rebuild maintains a buffer
- * of TRANSFER_BUFFER_SIZE bins that have been skipped because of
- * failure to acquire a lock, and blocks only if none are
- * available (i.e., only very rarely). The transfer operation
- * must also ensure that all accessible bins in both the old and
- * new table are usable by any traversal. When there are no lock
- * acquisition failures, this is arranged simply by proceeding
- * from the last bin (table.length - 1) up towards the first.
- * Upon seeing a forwarding node, traversals (see class
- * InternalIterator) arrange to move to the new table without
- * revisiting nodes. However, when any node is skipped during a
- * transfer, all earlier table bins may have become visible, so
- * are initialized with a reverse-forwarding node back to the old
- * table until the new ones are established. (This sometimes
- * requires transiently locking a forwarding node, which is
- * possible under the above encoding.) These more expensive
+ * lock, and revisit it later (unless it is a TreeBin). Method
+ * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that
+ * have been skipped because of failure to acquire a lock, and
+ * blocks only if none are available (i.e., only very rarely).
+ * The transfer operation must also ensure that all accessible
+ * bins in both the old and new table are usable by any traversal.
+ * When there are no lock acquisition failures, this is arranged
+ * simply by proceeding from the last bin (table.length - 1) up
+ * towards the first. Upon seeing a forwarding node, traversals
+ * (see class InternalIterator) arrange to move to the new table
+ * without revisiting nodes. However, when any node is skipped
+ * during a transfer, all earlier table bins may have become
+ * visible, so are initialized with a reverse-forwarding node back
+ * to the old table until the new ones are established. (This
+ * sometimes requires transiently locking a forwarding node, which
+ * is possible under the above encoding.) These more expensive
* mechanics trigger only when necessary.
*
* The traversal scheme also applies to partial traversals of
@@ -347,11 +376,18 @@ public class ConcurrentHashMapV8
*/
private static final int TRANSFER_BUFFER_SIZE = 32;
+ /**
+ * The bin count threshold for using a tree rather than list for a
+ * bin. The value reflects the approximate break-even point for
+ * using tree-based operations.
+ */
+ private static final int TREE_THRESHOLD = 8;
+
/*
* Encodings for special uses of Node hash fields. See above for
* explanation.
*/
- static final int MOVED = 0x80000000; // hash field for fowarding nodes
+ static final int MOVED = 0x80000000; // hash field for forwarding nodes
static final int LOCKED = 0x40000000; // set/tested only as a bit
static final int WAITING = 0xc0000000; // both bits set/tested together
static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash
@@ -386,6 +422,32 @@ public class ConcurrentHashMapV8
/** For serialization compatibility. Null unless serialized; see below */
private Segment[] segments;
+ /* ---------------- Table element access -------------- */
+
+ /*
+ * Volatile access methods are used for table elements as well as
+ * elements of in-progress next table while resizing. Uses are
+ * null checked by callers, and implicitly bounds-checked, relying
+ * on the invariants that tab arrays have non-zero size, and all
+ * indices are masked with (tab.length - 1) which is never
+ * negative and always less than length. Note that, to be correct
+ * wrt arbitrary concurrency errors by users, bounds checks must
+ * operate on local variables, which accounts for some odd-looking
+ * inline assignments below.
+ */
+
+ static final Node tabAt(Node[] tab, int i) { // used by InternalIterator
+ return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<
* access, a key may be read before a val, but can only be used
* after checking val to be non-null.
*/
- static final class Node {
+ static class Node {
volatile int hash;
final Object key;
volatile Object val;
@@ -435,11 +497,13 @@ public class ConcurrentHashMapV8
*/
final void tryAwaitLock(Node[] tab, int i) {
if (tab != null && i >= 0 && i < tab.length) { // bounds check
+ int r = ThreadLocalRandom.current().nextInt(); // randomize spins
int spins = MAX_SPINS, h;
while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) {
if (spins >= 0) {
- if (--spins == MAX_SPINS >>> 1)
- Thread.yield(); // heuristically yield mid-way
+ r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
+ if (r >= 0 && --spins == 0)
+ Thread.yield(); // yield before block
}
else if (casHash(h, h | WAITING)) {
synchronized (this) {
@@ -476,65 +540,533 @@ public class ConcurrentHashMapV8
}
}
- /* ---------------- Table element access -------------- */
+ /* ---------------- TreeBins -------------- */
- /*
- * Volatile access methods are used for table elements as well as
- * elements of in-progress next table while resizing. Uses are
- * null checked by callers, and implicitly bounds-checked, relying
- * on the invariants that tab arrays have non-zero size, and all
- * indices are masked with (tab.length - 1) which is never
- * negative and always less than length. Note that, to be correct
- * wrt arbitrary concurrency errors by users, bounds checks must
- * operate on local variables, which accounts for some odd-looking
- * inline assignments below.
+ /**
+ * Nodes for use in TreeBins
*/
-
- static final Node tabAt(Node[] tab, int i) { // used by InternalIterator
- return (Node)UNSAFE.getObjectVolatile(tab, ((long)i<
+ * for the same T, so we cannot invoke compareTo among them. To
+ * handle this, the tree is ordered primarily by hash value, then
+ * by getClass().getName() order, and then by Comparator order
+ * among elements of the same class. On lookup at a node, if
+ * non-Comparable, both left and right children may need to be
+ * searched in the case of tied hash values. (This corresponds to
+ * the full list search that would be necessary if all elements
+ * were non-Comparable and had tied hashes.)
+ *
+ * TreeBins also maintain a separate locking discipline than
+ * regular bins. Because they are forwarded via special MOVED
+ * nodes at bin heads (which can never change once established),
+ * we cannot use use those nodes as locks. Instead, TreeBin
+ * extends AbstractQueuedSynchronizer to support a simple form of
+ * read-write lock. For update operations and table validation,
+ * the exclusive form of lock behaves in the same way as bin-head
+ * locks. However, lookups use shared read-lock mechanics to allow
+ * multiple readers in the absence of writers. Additionally,
+ * these lookups do not ever block: While the lock is not
+ * available, they proceed along the slow traversal path (via
+ * next-pointers) until the lock becomes available or the list is
+ * exhausted, whichever comes first. (These cases are not fast,
+ * but maximize aggregate expected throughput.) The AQS mechanics
+ * for doing this are straightforward. The lock state is held as
+ * AQS getState(). Read counts are negative; the write count (1)
+ * is positive. There are no signalling preferences among readers
+ * and writers. Since we don't need to export full Lock API, we
+ * just override the minimal AQS methods and use them directly.
+ */
+ static final class TreeBin extends AbstractQueuedSynchronizer {
+ private static final long serialVersionUID = 2249069246763182397L;
+ TreeNode root; // root of tree
+ TreeNode first; // head of next-pointer list
- private static final void setTabAt(Node[] tab, int i, Node v) {
- UNSAFE.putObjectVolatile(tab, ((long)i< 0; }
+ public final boolean tryAcquire(int ignore) {
+ if (compareAndSetState(0, 1)) {
+ setExclusiveOwnerThread(Thread.currentThread());
+ return true;
+ }
+ return false;
+ }
+ public final boolean tryRelease(int ignore) {
+ setExclusiveOwnerThread(null);
+ setState(0);
+ return true;
+ }
+ public final int tryAcquireShared(int ignore) {
+ for (int c;;) {
+ if ((c = getState()) > 0)
+ return -1;
+ if (compareAndSetState(c, c -1))
+ return 1;
+ }
+ }
+ public final boolean tryReleaseShared(int ignore) {
+ int c;
+ do {} while (!compareAndSetState(c = getState(), c + 1));
+ return c == -1;
+ }
+
+ /**
+ * Return the TreeNode (or null if not found) for the given key
+ * starting at given root.
+ */
+ @SuppressWarnings("unchecked") // suppress Comparable cast warning
+ final TreeNode getTreeNode(int h, Object k, TreeNode p) {
+ Class> c = k.getClass();
+ while (p != null) {
+ int dir, ph; Object pk; Class> pc; TreeNode r;
+ if (h < (ph = p.hash))
+ dir = -1;
+ else if (h > ph)
+ dir = 1;
+ else if ((pk = p.key) == k || k.equals(pk))
+ return p;
+ else if (c != (pc = pk.getClass()))
+ dir = c.getName().compareTo(pc.getName());
+ else if (k instanceof Comparable)
+ dir = ((Comparable)k).compareTo((Comparable)pk);
+ else
+ dir = 0;
+ TreeNode pr = p.right;
+ if (dir > 0)
+ p = pr;
+ else if (dir == 0 && pr != null && h >= pr.hash &&
+ (r = getTreeNode(h, k, pr)) != null)
+ return r;
+ else
+ p = p.left;
+ }
+ return null;
+ }
+
+ /**
+ * Wrapper for getTreeNode used by CHM.get. Tries to obtain
+ * read-lock to call getTreeNode, but during failure to get
+ * lock, searches along next links.
+ */
+ final Object getValue(int h, Object k) {
+ Node r = null;
+ int c = getState(); // Must read lock state first
+ for (Node e = first; e != null; e = e.next) {
+ if (c <= 0 && compareAndSetState(c, c - 1)) {
+ try {
+ r = getTreeNode(h, k, root);
+ } finally {
+ releaseShared(0);
+ }
+ break;
+ }
+ else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) {
+ r = e;
+ break;
+ }
+ else
+ c = getState();
+ }
+ return r == null ? null : r.val;
+ }
+
+ /**
+ * Find or add a node
+ * @return null if added
+ */
+ @SuppressWarnings("unchecked") // suppress Comparable cast warning
+ final TreeNode putTreeNode(int h, Object k, Object v) {
+ Class> c = k.getClass();
+ TreeNode p = root;
+ int dir = 0;
+ if (p != null) {
+ for (;;) {
+ int ph; Object pk; Class> pc; TreeNode r;
+ if (h < (ph = p.hash))
+ dir = -1;
+ else if (h > ph)
+ dir = 1;
+ else if ((pk = p.key) == k || k.equals(pk))
+ return p;
+ else if (c != (pc = (pk = p.key).getClass()))
+ dir = c.getName().compareTo(pc.getName());
+ else if (k instanceof Comparable)
+ dir = ((Comparable)k).compareTo((Comparable)pk);
+ else
+ dir = 0;
+ TreeNode pr = p.right, pl;
+ if (dir > 0) {
+ if (pr == null)
+ break;
+ p = pr;
+ }
+ else if (dir == 0 && pr != null && h >= pr.hash &&
+ (r = getTreeNode(h, k, pr)) != null)
+ return r;
+ else if ((pl = p.left) == null)
+ break;
+ else
+ p = pl;
+ }
+ }
+ TreeNode f = first;
+ TreeNode r = first = new TreeNode(h, k, v, f, p);
+ if (p == null)
+ root = r;
+ else {
+ if (dir <= 0)
+ p.left = r;
+ else
+ p.right = r;
+ if (f != null)
+ f.prev = r;
+ fixAfterInsertion(r);
+ }
+ return null;
+ }
+
+ /**
+ * Removes the given node, that must be present before this
+ * call. This is messier than typical red-black deletion code
+ * because we cannot swap the contents of an interior node
+ * with a leaf successor that is pinned by "next" pointers
+ * that are accessible independently of lock. So instead we
+ * swap the tree linkages.
+ */
+ final void deleteTreeNode(TreeNode p) {
+ TreeNode next = (TreeNode)p.next; // unlink traversal pointers
+ TreeNode pred = p.prev;
+ if (pred == null)
+ first = next;
+ else
+ pred.next = next;
+ if (next != null)
+ next.prev = pred;
+ TreeNode replacement;
+ TreeNode pl = p.left;
+ TreeNode pr = p.right;
+ if (pl != null && pr != null) {
+ TreeNode s = pr;
+ while (s.left != null) // find successor
+ s = s.left;
+ boolean c = s.red; s.red = p.red; p.red = c; // swap colors
+ TreeNode sr = s.right;
+ TreeNode pp = p.parent;
+ if (s == pr) { // p was s's direct parent
+ p.parent = s;
+ s.right = p;
+ }
+ else {
+ TreeNode sp = s.parent;
+ if ((p.parent = sp) != null) {
+ if (s == sp.left)
+ sp.left = p;
+ else
+ sp.right = p;
+ }
+ if ((s.right = pr) != null)
+ pr.parent = s;
+ }
+ p.left = null;
+ if ((p.right = sr) != null)
+ sr.parent = p;
+ if ((s.left = pl) != null)
+ pl.parent = s;
+ if ((s.parent = pp) == null)
+ root = s;
+ else if (p == pp.left)
+ pp.left = s;
+ else
+ pp.right = s;
+ replacement = sr;
+ }
+ else
+ replacement = (pl != null) ? pl : pr;
+ TreeNode pp = p.parent;
+ if (replacement == null) {
+ if (pp == null) {
+ root = null;
+ return;
+ }
+ replacement = p;
+ }
+ else {
+ replacement.parent = pp;
+ if (pp == null)
+ root = replacement;
+ else if (p == pp.left)
+ pp.left = replacement;
+ else
+ pp.right = replacement;
+ p.left = p.right = p.parent = null;
+ }
+ if (!p.red)
+ fixAfterDeletion(replacement);
+ if (p == replacement && (pp = p.parent) != null) {
+ if (p == pp.left) // detach pointers
+ pp.left = null;
+ else if (p == pp.right)
+ pp.right = null;
+ p.parent = null;
+ }
+ }
+
+ // CLR code updated from pre-jdk-collections version at
+ // http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java
+
+ /** From CLR */
+ private void rotateLeft(TreeNode p) {
+ if (p != null) {
+ TreeNode r = p.right, pp, rl;
+ if ((rl = p.right = r.left) != null)
+ rl.parent = p;
+ if ((pp = r.parent = p.parent) == null)
+ root = r;
+ else if (pp.left == p)
+ pp.left = r;
+ else
+ pp.right = r;
+ r.left = p;
+ p.parent = r;
+ }
+ }
+
+ /** From CLR */
+ private void rotateRight(TreeNode p) {
+ if (p != null) {
+ TreeNode l = p.left, pp, lr;
+ if ((lr = p.left = l.right) != null)
+ lr.parent = p;
+ if ((pp = l.parent = p.parent) == null)
+ root = l;
+ else if (pp.right == p)
+ pp.right = l;
+ else
+ pp.left = l;
+ l.right = p;
+ p.parent = l;
+ }
+ }
+
+ /** From CLR */
+ private void fixAfterInsertion(TreeNode x) {
+ x.red = true;
+ TreeNode xp, xpp;
+ while (x != null && (xp = x.parent) != null && xp.red &&
+ (xpp = xp.parent) != null) {
+ TreeNode xppl = xpp.left;
+ if (xp == xppl) {
+ TreeNode y = xpp.right;
+ if (y != null && y.red) {
+ y.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.right) {
+ x = xp;
+ rotateLeft(x);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ rotateRight(xpp);
+ }
+ }
+ }
+ }
+ else {
+ TreeNode y = xppl;
+ if (y != null && y.red) {
+ y.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.left) {
+ x = xp;
+ rotateRight(x);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ rotateLeft(xpp);
+ }
+ }
+ }
+ }
+ }
+ TreeNode r = root;
+ if (r != null && r.red)
+ r.red = false;
+ }
+
+ /** From CLR */
+ private void fixAfterDeletion(TreeNode x) {
+ while (x != null) {
+ TreeNode xp, xpl;
+ if (x.red || (xp = x.parent) == null) {
+ x.red = false;
+ break;
+ }
+ if (x == (xpl = xp.left)) {
+ TreeNode sib = xp.right;
+ if (sib != null && sib.red) {
+ sib.red = false;
+ xp.red = true;
+ rotateLeft(xp);
+ sib = (xp = x.parent) == null ? null : xp.right;
+ }
+ if (sib == null)
+ x = xp;
+ else {
+ TreeNode sl = sib.left, sr = sib.right;
+ if ((sr == null || !sr.red) &&
+ (sl == null || !sl.red)) {
+ sib.red = true;
+ x = xp;
+ }
+ else {
+ if (sr == null || !sr.red) {
+ if (sl != null)
+ sl.red = false;
+ sib.red = true;
+ rotateRight(sib);
+ sib = (xp = x.parent) == null ? null : xp.right;
+ }
+ if (sib != null) {
+ sib.red = (xp == null) ? false : xp.red;
+ if ((sr = sib.right) != null)
+ sr.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ rotateLeft(xp);
+ }
+ x = root;
+ }
+ }
+ }
+ else { // symmetric
+ TreeNode sib = xpl;
+ if (sib != null && sib.red) {
+ sib.red = false;
+ xp.red = true;
+ rotateRight(xp);
+ sib = (xp = x.parent) == null ? null : xp.left;
+ }
+ if (sib == null)
+ x = xp;
+ else {
+ TreeNode sl = sib.left, sr = sib.right;
+ if ((sl == null || !sl.red) &&
+ (sr == null || !sr.red)) {
+ sib.red = true;
+ x = xp;
+ }
+ else {
+ if (sl == null || !sl.red) {
+ if (sr != null)
+ sr.red = false;
+ sib.red = true;
+ rotateLeft(sib);
+ sib = (xp = x.parent) == null ? null : xp.left;
+ }
+ if (sib != null) {
+ sib.red = (xp == null) ? false : xp.red;
+ if ((sl = sib.left) != null)
+ sl.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ rotateRight(xp);
+ }
+ x = root;
+ }
+ }
+ }
+ }
+ }
}
- /* ---------------- Internal access and update methods -------------- */
+ /* ---------------- Collision reduction methods -------------- */
/**
- * Applies a supplemental hash function to a given hashCode, which
- * defends against poor quality hash functions. The result must
- * be have the top 2 bits clear. For reasonable performance, this
- * function must have good avalanche properties; i.e., that each
- * bit of the argument affects each bit of the result. (Although
- * we don't care about the unused top 2 bits.)
+ * Spreads higher bits to lower, and also forces top 2 bits to 0.
+ * Because the table uses power-of-two masking, sets of hashes
+ * that vary only in bits above the current mask will always
+ * collide. (Among known examples are sets of Float keys holding
+ * consecutive whole numbers in small tables.) To counter this,
+ * we apply a transform that spreads the impact of higher bits
+ * downward. There is a tradeoff between speed, utility, and
+ * quality of bit-spreading. Because many common sets of hashes
+ * are already reaonably distributed across bits (so don't benefit
+ * from spreading), and because we use trees to handle large sets
+ * of collisions in bins, we don't need excessively high quality.
*/
private static final int spread(int h) {
- // Apply base step of MurmurHash; see http://code.google.com/p/smhasher/
- // Despite two multiplies, this is often faster than others
- // with comparable bit-spread properties.
- h ^= h >>> 16;
- h *= 0x85ebca6b;
- h ^= h >>> 13;
- h *= 0xc2b2ae35;
- return ((h >>> 16) ^ h) & HASH_BITS; // mask out top bits
+ h ^= (h >>> 18) ^ (h >>> 12);
+ return (h ^ (h >>> 10)) & HASH_BITS;
+ }
+
+ /**
+ * Replaces a list bin with a tree bin. Call only when locked.
+ * Fails to replace if the given key is non-comparable or table
+ * is, or needs, resizing.
+ */
+ private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
+ if ((key instanceof Comparable) &&
+ (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) {
+ TreeBin t = new TreeBin();
+ for (Node e = tabAt(tab, index); e != null; e = e.next)
+ t.putTreeNode(e.hash & HASH_BITS, e.key, e.val);
+ setTabAt(tab, index, new Node(MOVED, t, null, null));
+ }
}
+ /* ---------------- Internal access and update methods -------------- */
+
/** Implementation for get and containsKey */
private final Object internalGet(Object k) {
int h = spread(k.hashCode());
retry: for (Node[] tab = table; tab != null;) {
- Node e; Object ek, ev; int eh; // locals to read fields once
+ Node e, p; Object ek, ev; int eh; // locals to read fields once
for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
if ((eh = e.hash) == MOVED) {
- tab = (Node[])e.key; // restart with new table
- continue retry;
+ if ((ek = e.key) instanceof TreeBin) // search TreeBin
+ return ((TreeBin)ek).getValue(h, k);
+ else { // restart with new table
+ tab = (Node[])ek;
+ continue retry;
+ }
}
- if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek)))
+ else if ((eh & HASH_BITS) == h && (ev = e.val) != null &&
+ ((ek = e.key) == k || k.equals(ek)))
return ev;
}
break;
@@ -551,12 +1083,43 @@ public class ConcurrentHashMapV8
int h = spread(k.hashCode());
Object oldVal = null;
for (Node[] tab = table;;) {
- Node f; int i, fh;
+ Node f; int i, fh; Object fk;
if (tab == null ||
(f = tabAt(tab, i = (tab.length - 1) & h)) == null)
break;
- else if ((fh = f.hash) == MOVED)
- tab = (Node[])f.key;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ boolean validated = false;
+ boolean deleted = false;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ TreeNode p = t.getTreeNode(h, k, t.root);
+ if (p != null) {
+ Object pv = p.val;
+ if (cv == null || cv == pv || cv.equals(pv)) {
+ oldVal = pv;
+ if ((p.val = v) == null) {
+ deleted = true;
+ t.deleteTreeNode(p);
+ }
+ }
+ }
+ }
+ } finally {
+ t.release(0);
+ }
+ if (validated) {
+ if (deleted)
+ counter.add(-1L);
+ break;
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
else if ((fh & HASH_BITS) != h && f.next == null) // precheck
break; // rules out possible existence
else if ((fh & LOCKED) != 0) {
@@ -595,7 +1158,7 @@ public class ConcurrentHashMapV8
} finally {
if (!f.casHash(fh | LOCKED, fh)) {
f.hash = fh;
- synchronized(f) { f.notifyAll(); };
+ synchronized (f) { f.notifyAll(); };
}
}
if (validated) {
@@ -615,7 +1178,8 @@ public class ConcurrentHashMapV8
* 1. If table uninitialized, create
* 2. If bin empty, try to CAS new node
* 3. If bin stale, use new table
- * 4. Lock and validate; if valid, scan and add or update
+ * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
+ * 5. Lock and validate; if valid, scan and add or update
*
* The others interweave other checks and/or alternative actions:
* * Plain put checks for and performs resize after insertion.
@@ -636,28 +1200,51 @@ public class ConcurrentHashMapV8
/** Implementation for put */
private final Object internalPut(Object k, Object v) {
int h = spread(k.hashCode());
- boolean checkSize = false;
+ int count = 0;
for (Node[] tab = table;;) {
- int i; Node f; int fh;
+ int i; Node f; int fh; Object fk;
if (tab == null)
tab = initTable();
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
if (casTabAt(tab, i, null, new Node(h, k, v, null)))
break; // no lock when adding to empty bin
}
- else if ((fh = f.hash) == MOVED)
- tab = (Node[])f.key;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ Object oldVal = null;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 2;
+ TreeNode p = t.putTreeNode(h, k, v);
+ if (p != null) {
+ oldVal = p.val;
+ p.val = v;
+ }
+ }
+ } finally {
+ t.release(0);
+ }
+ if (count != 0) {
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
else if ((fh & LOCKED) != 0) {
checkForResize();
f.tryAwaitLock(tab, i);
}
else if (f.casHash(fh, fh | LOCKED)) {
Object oldVal = null;
- boolean validated = false;
try { // needed in case equals() throws
if (tabAt(tab, i) == f) {
- validated = true; // retry if 1st already deleted
- for (Node e = f;;) {
+ count = 1;
+ for (Node e = f;; ++count) {
Object ek, ev;
if ((e.hash & HASH_BITS) == h &&
(ev = e.val) != null &&
@@ -669,8 +1256,8 @@ public class ConcurrentHashMapV8
Node last = e;
if ((e = e.next) == null) {
last.next = new Node(h, k, v, null);
- if (last != f || tab.length <= 64)
- checkSize = true;
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
break;
}
}
@@ -681,15 +1268,17 @@ public class ConcurrentHashMapV8
synchronized (f) { f.notifyAll(); };
}
}
- if (validated) {
+ if (count != 0) {
if (oldVal != null)
return oldVal;
+ if (tab.length <= 64)
+ count = 2;
break;
}
}
}
counter.add(1L);
- if (checkSize)
+ if (count > 1)
checkForResize();
return null;
}
@@ -697,6 +1286,7 @@ public class ConcurrentHashMapV8
/** Implementation for putIfAbsent */
private final Object internalPutIfAbsent(Object k, Object v) {
int h = spread(k.hashCode());
+ int count = 0;
for (Node[] tab = table;;) {
int i; Node f; int fh; Object fk, fv;
if (tab == null)
@@ -705,8 +1295,30 @@ public class ConcurrentHashMapV8
if (casTabAt(tab, i, null, new Node(h, k, v, null)))
break;
}
- else if ((fh = f.hash) == MOVED)
- tab = (Node[])f.key;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ Object oldVal = null;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 2;
+ TreeNode p = t.putTreeNode(h, k, v);
+ if (p != null)
+ oldVal = p.val;
+ }
+ } finally {
+ t.release(0);
+ }
+ if (count != 0) {
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
((fk = f.key) == k || k.equals(fk)))
return fv;
@@ -730,11 +1342,10 @@ public class ConcurrentHashMapV8
}
else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
Object oldVal = null;
- boolean validated = false;
try {
if (tabAt(tab, i) == f) {
- validated = true;
- for (Node e = f;;) {
+ count = 1;
+ for (Node e = f;; ++count) {
Object ek, ev;
if ((e.hash & HASH_BITS) == h &&
(ev = e.val) != null &&
@@ -745,6 +1356,8 @@ public class ConcurrentHashMapV8
Node last = e;
if ((e = e.next) == null) {
last.next = new Node(h, k, v, null);
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
break;
}
}
@@ -752,18 +1365,22 @@ public class ConcurrentHashMapV8
} finally {
if (!f.casHash(fh | LOCKED, fh)) {
f.hash = fh;
- synchronized(f) { f.notifyAll(); };
+ synchronized (f) { f.notifyAll(); };
}
}
- if (validated) {
+ if (count != 0) {
if (oldVal != null)
return oldVal;
+ if (tab.length <= 64)
+ count = 2;
break;
}
}
}
}
counter.add(1L);
+ if (count > 1)
+ checkForResize();
return null;
}
@@ -772,15 +1389,15 @@ public class ConcurrentHashMapV8
MappingFunction super K, ?> mf) {
int h = spread(k.hashCode());
Object val = null;
+ int count = 0;
for (Node[] tab = table;;) {
Node f; int i, fh; Object fk, fv;
if (tab == null)
tab = initTable();
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
Node node = new Node(fh = h | LOCKED, k, null, null);
- boolean validated = false;
if (casTabAt(tab, i, null, node)) {
- validated = true;
+ count = 1;
try {
if ((val = mf.map(k)) != null)
node.val = val;
@@ -789,15 +1406,42 @@ public class ConcurrentHashMapV8
setTabAt(tab, i, null);
if (!node.casHash(fh, h)) {
node.hash = h;
- synchronized(node) { node.notifyAll(); };
+ synchronized (node) { node.notifyAll(); };
}
}
}
- if (validated)
+ if (count != 0)
break;
}
- else if ((fh = f.hash) == MOVED)
- tab = (Node[])f.key;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ boolean added = false;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ TreeNode p = t.getTreeNode(h, k, t.root);
+ if (p != null)
+ val = p.val;
+ else if ((val = mf.map(k)) != null) {
+ added = true;
+ count = 2;
+ t.putTreeNode(h, k, val);
+ }
+ }
+ } finally {
+ t.release(0);
+ }
+ if (count != 0) {
+ if (!added)
+ return val;
+ break;
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
else if ((fh & HASH_BITS) == h && (fv = f.val) != null &&
((fk = f.key) == k || k.equals(fk)))
return fv;
@@ -820,11 +1464,11 @@ public class ConcurrentHashMapV8
f.tryAwaitLock(tab, i);
}
else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) {
- boolean validated = false;
+ boolean added = false;
try {
if (tabAt(tab, i) == f) {
- validated = true;
- for (Node e = f;;) {
+ count = 1;
+ for (Node e = f;; ++count) {
Object ek, ev;
if ((e.hash & HASH_BITS) == h &&
(ev = e.val) != null &&
@@ -834,8 +1478,12 @@ public class ConcurrentHashMapV8
}
Node last = e;
if ((e = e.next) == null) {
- if ((val = mf.map(k)) != null)
+ if ((val = mf.map(k)) != null) {
+ added = true;
last.next = new Node(h, k, val, null);
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ }
break;
}
}
@@ -843,17 +1491,24 @@ public class ConcurrentHashMapV8
} finally {
if (!f.casHash(fh | LOCKED, fh)) {
f.hash = fh;
- synchronized(f) { f.notifyAll(); };
+ synchronized (f) { f.notifyAll(); };
}
}
- if (validated)
+ if (count != 0) {
+ if (!added)
+ return val;
+ if (tab.length <= 64)
+ count = 2;
break;
+ }
}
}
}
if (val == null)
throw new NullPointerException();
counter.add(1L);
+ if (count > 1)
+ checkForResize();
return val;
}
@@ -864,17 +1519,16 @@ public class ConcurrentHashMapV8
int h = spread(k.hashCode());
Object val = null;
boolean added = false;
- boolean checkSize = false;
+ int count = 0;
for (Node[] tab = table;;) {
- Node f; int i, fh;
+ Node f; int i, fh; Object fk;
if (tab == null)
tab = initTable();
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
Node node = new Node(fh = h | LOCKED, k, null, null);
- boolean validated = false;
if (casTabAt(tab, i, null, node)) {
- validated = true;
try {
+ count = 1;
if ((val = mf.remap(k, null)) != null) {
node.val = val;
added = true;
@@ -888,21 +1542,46 @@ public class ConcurrentHashMapV8
}
}
}
- if (validated)
+ if (count != 0)
break;
}
- else if ((fh = f.hash) == MOVED)
- tab = (Node[])f.key;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ count = 1;
+ TreeNode p = t.getTreeNode(h, k, t.root);
+ Object pv = (p == null) ? null : p.val;
+ if ((val = mf.remap(k, (V)pv)) != null) {
+ if (p != null)
+ p.val = val;
+ else {
+ count = 2;
+ added = true;
+ t.putTreeNode(h, k, val);
+ }
+ }
+ }
+ } finally {
+ t.release(0);
+ }
+ if (count != 0)
+ break;
+ }
+ else
+ tab = (Node[])fk;
+ }
else if ((fh & LOCKED) != 0) {
checkForResize();
f.tryAwaitLock(tab, i);
}
else if (f.casHash(fh, fh | LOCKED)) {
- boolean validated = false;
try {
if (tabAt(tab, i) == f) {
- validated = true;
- for (Node e = f;;) {
+ count = 1;
+ for (Node e = f;; ++count) {
Object ek, ev;
if ((e.hash & HASH_BITS) == h &&
(ev = e.val) != null &&
@@ -917,8 +1596,8 @@ public class ConcurrentHashMapV8
if ((val = mf.remap(k, null)) != null) {
last.next = new Node(h, k, val, null);
added = true;
- if (last != f || tab.length <= 64)
- checkSize = true;
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
}
break;
}
@@ -930,15 +1609,18 @@ public class ConcurrentHashMapV8
synchronized (f) { f.notifyAll(); };
}
}
- if (validated)
+ if (count != 0) {
+ if (tab.length <= 64)
+ count = 2;
break;
+ }
}
}
if (val == null)
throw new NullPointerException();
if (added) {
counter.add(1L);
- if (checkSize)
+ if (count > 1)
checkForResize();
}
return val;
@@ -959,7 +1641,7 @@ public class ConcurrentHashMapV8
}
int h = spread(k.hashCode());
for (Node[] tab = table;;) {
- int i; Node f; int fh;
+ int i; Node f; int fh; Object fk;
if (tab == null)
tab = initTable();
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
@@ -968,8 +1650,31 @@ public class ConcurrentHashMapV8
break;
}
}
- else if ((fh = f.hash) == MOVED)
- tab = (Node[])f.key;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ boolean validated = false;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ TreeNode p = t.getTreeNode(h, k, t.root);
+ if (p != null)
+ p.val = v;
+ else {
+ t.putTreeNode(h, k, v);
+ ++delta;
+ }
+ }
+ } finally {
+ t.release(0);
+ }
+ if (validated)
+ break;
+ }
+ else
+ tab = (Node[])fk;
+ }
else if ((fh & LOCKED) != 0) {
counter.add(delta);
delta = 0L;
@@ -977,12 +1682,11 @@ public class ConcurrentHashMapV8
f.tryAwaitLock(tab, i);
}
else if (f.casHash(fh, fh | LOCKED)) {
- boolean validated = false;
- boolean tooLong = false;
+ int count = 0;
try {
if (tabAt(tab, i) == f) {
- validated = true;
- for (Node e = f;;) {
+ count = 1;
+ for (Node e = f;; ++count) {
Object ek, ev;
if ((e.hash & HASH_BITS) == h &&
(ev = e.val) != null &&
@@ -994,19 +1698,20 @@ public class ConcurrentHashMapV8
if ((e = e.next) == null) {
++delta;
last.next = new Node(h, k, v, null);
+ if (count >= TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
break;
}
- tooLong = true;
}
}
} finally {
if (!f.casHash(fh | LOCKED, fh)) {
f.hash = fh;
- synchronized(f) { f.notifyAll(); };
+ synchronized (f) { f.notifyAll(); };
}
}
- if (validated) {
- if (tooLong) {
+ if (count != 0) {
+ if (count > 1) {
counter.add(delta);
delta = 0L;
checkForResize();
@@ -1099,7 +1804,7 @@ public class ConcurrentHashMapV8
while ((sc = sizeCtl) >= 0) {
Node[] tab = table; int n;
if (tab == null || (n = tab.length) == 0) {
- n = (sc > c)? sc : c;
+ n = (sc > c) ? sc : c;
if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
try {
if (table == tab) {
@@ -1150,7 +1855,7 @@ public class ConcurrentHashMapV8
continue;
}
else { // transiently use a locked forwarding node
- Node g = new Node(MOVED|LOCKED, nextTab, null, null);
+ Node g = new Node(MOVED|LOCKED, nextTab, null, null);
if (!casTabAt(tab, i, f, g))
continue;
setTabAt(nextTab, i, null);
@@ -1162,35 +1867,31 @@ public class ConcurrentHashMapV8
}
}
}
- else if (((fh = f.hash) & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
+ else if ((fh = f.hash) == MOVED) {
+ Object fk = f.key;
+ if (fk instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ boolean validated = false;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ splitTreeBin(nextTab, i, t);
+ setTabAt(tab, i, fwd);
+ }
+ } finally {
+ t.release(0);
+ }
+ if (!validated)
+ continue;
+ }
+ }
+ else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) {
boolean validated = false;
try { // split to lo and hi lists; copying as needed
if (tabAt(tab, i) == f) {
validated = true;
- Node e = f, lastRun = f;
- Node lo = null, hi = null;
- int runBit = e.hash & n;
- for (Node p = e.next; p != null; p = p.next) {
- int b = p.hash & n;
- if (b != runBit) {
- runBit = b;
- lastRun = p;
- }
- }
- if (runBit == 0)
- lo = lastRun;
- else
- hi = lastRun;
- for (Node p = e; p != lastRun; p = p.next) {
- int ph = p.hash & HASH_BITS;
- Object pk = p.key, pv = p.val;
- if ((ph & n) == 0)
- lo = new Node(ph, pk, pv, lo);
- else
- hi = new Node(ph, pk, pv, hi);
- }
- setTabAt(nextTab, i, lo);
- setTabAt(nextTab, i + n, hi);
+ splitBin(nextTab, i, f);
setTabAt(tab, i, fwd);
}
} finally {
@@ -1236,6 +1937,76 @@ public class ConcurrentHashMapV8
}
/**
+ * Split a normal bin with list headed by e into lo and hi parts;
+ * install in given table
+ */
+ private static void splitBin(Node[] nextTab, int i, Node e) {
+ int bit = nextTab.length >>> 1; // bit to split on
+ int runBit = e.hash & bit;
+ Node lastRun = e, lo = null, hi = null;
+ for (Node p = e.next; p != null; p = p.next) {
+ int b = p.hash & bit;
+ if (b != runBit) {
+ runBit = b;
+ lastRun = p;
+ }
+ }
+ if (runBit == 0)
+ lo = lastRun;
+ else
+ hi = lastRun;
+ for (Node p = e; p != lastRun; p = p.next) {
+ int ph = p.hash & HASH_BITS;
+ Object pk = p.key, pv = p.val;
+ if ((ph & bit) == 0)
+ lo = new Node(ph, pk, pv, lo);
+ else
+ hi = new Node(ph, pk, pv, hi);
+ }
+ setTabAt(nextTab, i, lo);
+ setTabAt(nextTab, i + bit, hi);
+ }
+
+ /**
+ * Split a tree bin into lo and hi parts; install in given table
+ */
+ private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) {
+ int bit = nextTab.length >>> 1;
+ TreeBin lt = new TreeBin();
+ TreeBin ht = new TreeBin();
+ int lc = 0, hc = 0;
+ for (Node e = t.first; e != null; e = e.next) {
+ int h = e.hash & HASH_BITS;
+ Object k = e.key, v = e.val;
+ if ((h & bit) == 0) {
+ ++lc;
+ lt.putTreeNode(h, k, v);
+ }
+ else {
+ ++hc;
+ ht.putTreeNode(h, k, v);
+ }
+ }
+ Node ln, hn; // throw away trees if too small
+ if (lc <= (TREE_THRESHOLD >>> 1)) {
+ ln = null;
+ for (Node p = lt.first; p != null; p = p.next)
+ ln = new Node(p.hash, p.key, p.val, ln);
+ }
+ else
+ ln = new Node(MOVED, lt, null, null);
+ setTabAt(nextTab, i, ln);
+ if (hc <= (TREE_THRESHOLD >>> 1)) {
+ hn = null;
+ for (Node p = ht.first; p != null; p = p.next)
+ hn = new Node(p.hash, p.key, p.val, hn);
+ }
+ else
+ hn = new Node(MOVED, ht, null, null);
+ setTabAt(nextTab, i + bit, hn);
+ }
+
+ /**
* Implementation for clear. Steps through each bin, removing all
* nodes.
*/
@@ -1244,45 +2015,58 @@ public class ConcurrentHashMapV8
int i = 0;
Node[] tab = table;
while (tab != null && i < tab.length) {
- int fh;
+ int fh; Object fk;
Node f = tabAt(tab, i);
if (f == null)
++i;
- else if ((fh = f.hash) == MOVED)
- tab = (Node[])f.key;
+ else if ((fh = f.hash) == MOVED) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin t = (TreeBin)fk;
+ t.acquire(0);
+ try {
+ if (tabAt(tab, i) == f) {
+ for (Node p = t.first; p != null; p = p.next) {
+ p.val = null;
+ --delta;
+ }
+ t.first = null;
+ t.root = null;
+ ++i;
+ }
+ } finally {
+ t.release(0);
+ }
+ }
+ else
+ tab = (Node[])fk;
+ }
else if ((fh & LOCKED) != 0) {
counter.add(delta); // opportunistically update count
delta = 0L;
f.tryAwaitLock(tab, i);
}
else if (f.casHash(fh, fh | LOCKED)) {
- boolean validated = false;
try {
if (tabAt(tab, i) == f) {
- validated = true;
for (Node e = f; e != null; e = e.next) {
- if (e.val != null) { // currently always true
- e.val = null;
- --delta;
- }
+ e.val = null;
+ --delta;
}
setTabAt(tab, i, null);
+ ++i;
}
} finally {
if (!f.casHash(fh | LOCKED, fh)) {
f.hash = fh;
- synchronized(f) { f.notifyAll(); };
+ synchronized (f) { f.notifyAll(); };
}
}
- if (validated)
- ++i;
}
}
if (delta != 0)
counter.add(delta);
}
-
/* ----------------Table Traversal -------------- */
/**
@@ -1291,7 +2075,7 @@ public class ConcurrentHashMapV8
*
* At each step, the iterator snapshots the key ("nextKey") and
* value ("nextVal") of a valid node (i.e., one that, at point of
- * snapshot, has a nonnull user value). Because val fields can
+ * snapshot, has a non-null user value). Because val fields can
* change (including to null, indicating deletion), field nextVal
* might not be accurate at point of use, but still maintains the
* weak consistency property of holding a value that was once
@@ -1361,14 +2145,19 @@ public class ConcurrentHashMapV8
if (e != null) // advance past used/skipped node
e = e.next;
while (e == null) { // get to next non-null bin
- Node[] t; int b, i, n; // checks must use locals
+ Node[] t; int b, i, n; Object ek; // checks must use locals
if ((b = baseIndex) >= baseLimit || (i = index) < 0 ||
(t = tab) == null || i >= (n = t.length))
break outer;
- else if ((e = tabAt(t, i)) != null && e.hash == MOVED)
- tab = (Node[])e.key; // restarts due to null val
- else // visit upper slots if present
- index = (i += baseSize) < n ? i : (baseIndex = b + 1);
+ else if ((e = tabAt(t, i)) != null && e.hash == MOVED) {
+ if ((ek = e.key) instanceof TreeBin)
+ e = ((TreeBin)ek).first;
+ else {
+ tab = (Node[])ek;
+ continue; // restarts due to null val
+ }
+ } // visit upper slots if present
+ index = (i += baseSize) < n ? i : (baseIndex = b + 1);
}
nextKey = e.key;
} while ((nextVal = e.val) == null);// skip deleted or special nodes
@@ -1460,8 +2249,8 @@ public class ConcurrentHashMapV8
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
- int cap = ((size >= (long)MAXIMUM_CAPACITY) ?
- MAXIMUM_CAPACITY: tableSizeFor((int)size));
+ int cap = ((size >= (long)MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY: tableSizeFor((int)size));
this.counter = new LongAdder();
this.sizeCtl = cap;
}
@@ -2187,7 +2976,7 @@ public class ConcurrentHashMapV8
return true;
}
- public final boolean removeAll(Collection c) {
+ public final boolean removeAll(Collection> c) {
boolean modified = false;
for (Iterator> it = iter(); it.hasNext();) {
if (c.contains(it.next())) {
@@ -2237,7 +3026,7 @@ public class ConcurrentHashMapV8
}
static final class Values extends MapView
- implements Collection {
+ implements Collection {
Values(ConcurrentHashMapV8 map) { super(map); }
public final boolean contains(Object o) { return map.containsValue(o); }
@@ -2267,7 +3056,7 @@ public class ConcurrentHashMapV8
}
}
- static final class EntrySet extends MapView
+ static final class EntrySet extends MapView
implements Set> {
EntrySet(ConcurrentHashMapV8 map) { super(map); }
@@ -2369,7 +3158,8 @@ public class ConcurrentHashMapV8
K k = (K) s.readObject();
V v = (V) s.readObject();
if (k != null && v != null) {
- p = new Node(spread(k.hashCode()), k, v, p);
+ int h = spread(k.hashCode());
+ p = new Node(h, k, v, p);
++size;
}
else
@@ -2385,6 +3175,7 @@ public class ConcurrentHashMapV8
n = tableSizeFor(sz + (sz >>> 1) + 1);
}
int sc = sizeCtl;
+ boolean collide = false;
if (n > sc &&
UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) {
try {
@@ -2395,8 +3186,10 @@ public class ConcurrentHashMapV8
while (p != null) {
int j = p.hash & mask;
Node next = p.next;
- p.next = tabAt(tab, j);
+ Node q = p.next = tabAt(tab, j);
setTabAt(tab, j, p);
+ if (!collide && q != null && q.hash == p.hash)
+ collide = true;
p = next;
}
table = tab;
@@ -2406,6 +3199,19 @@ public class ConcurrentHashMapV8
} finally {
sizeCtl = sc;
}
+ if (collide) { // rescan and convert to TreeBins
+ Node[] tab = table;
+ for (int i = 0; i < tab.length; ++i) {
+ int c = 0;
+ for (Node e = tabAt(tab, i); e != null; e = e.next) {
+ if (++c > TREE_THRESHOLD &&
+ (e.key instanceof Comparable)) {
+ replaceWithTreeBin(tab, i, e.key);
+ break;
+ }
+ }
+ }
+ }
}
if (!init) { // Can only happen if unsafely published.
while (p != null) {
@@ -2413,6 +3219,7 @@ public class ConcurrentHashMapV8
p = p.next;
}
}
+
}
}