--- jsr166/src/jsr166e/ConcurrentHashMapV8.java 2013/09/11 14:53:38 1.116
+++ jsr166/src/jsr166e/ConcurrentHashMapV8.java 2015/09/13 16:28:14 1.125
@@ -15,7 +15,6 @@ import java.lang.reflect.Type;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.Collection;
-import java.util.Comparator;
import java.util.ConcurrentModificationException;
import java.util.Enumeration;
import java.util.HashMap;
@@ -115,32 +114,31 @@ import java.util.concurrent.locks.Reentr
* objects do not support method {@code setValue}.
*
*
- * - forEach: Perform a given action on each element.
+ *
- forEach: Perform a given action on each element.
* A variant form applies a given transformation on each element
- * before performing the action.
+ * before performing the action.
*
- * - search: Return the first available non-null result of
+ *
- search: Return the first available non-null result of
* applying a given function on each element; skipping further
- * search when a result is found.
+ * search when a result is found.
*
- * - reduce: Accumulate each element. The supplied reduction
+ *
- reduce: Accumulate each element. The supplied reduction
* function cannot rely on ordering (more formally, it should be
* both associative and commutative). There are five variants:
*
*
*
- * - Plain reductions. (There is not a form of this method for
+ *
- Plain reductions. (There is not a form of this method for
* (key, value) function arguments since there is no corresponding
- * return type.)
+ * return type.)
*
- * - Mapped reductions that accumulate the results of a given
- * function applied to each element.
+ * - Mapped reductions that accumulate the results of a given
+ * function applied to each element.
*
- *
- Reductions to scalar doubles, longs, and ints, using a
- * given basis value.
+ * - Reductions to scalar doubles, longs, and ints, using a
+ * given basis value.
*
*
- *
*
*
* These bulk operations accept a {@code parallelismThreshold}
@@ -389,19 +387,21 @@ public class ConcurrentHashMapV8 ex
* progress. Resizing proceeds by transferring bins, one by one,
* from the table to the next table. However, threads claim small
* blocks of indices to transfer (via field transferIndex) before
- * doing so, reducing contention. Because we are using
- * power-of-two expansion, the elements from each bin must either
- * stay at same index, or move with a power of two offset. We
- * eliminate unnecessary node creation by catching cases where old
- * nodes can be reused because their next fields won't change. On
- * average, only about one-sixth of them need cloning when a table
- * doubles. The nodes they replace will be garbage collectable as
- * soon as they are no longer referenced by any reader thread that
- * may be in the midst of concurrently traversing table. Upon
- * transfer, the old table bin contains only a special forwarding
- * node (with hash field "MOVED") that contains the next table as
- * its key. On encountering a forwarding node, access and update
- * operations restart, using the new table.
+ * doing so, reducing contention. A generation stamp in field
+ * sizeCtl ensures that resizings do not overlap. Because we are
+ * using power-of-two expansion, the elements from each bin must
+ * either stay at same index, or move with a power of two
+ * offset. We eliminate unnecessary node creation by catching
+ * cases where old nodes can be reused because their next fields
+ * won't change. On average, only about one-sixth of them need
+ * cloning when a table doubles. The nodes they replace will be
+ * garbage collectable as soon as they are no longer referenced by
+ * any reader thread that may be in the midst of concurrently
+ * traversing table. Upon transfer, the old table bin contains
+ * only a special forwarding node (with hash field "MOVED") that
+ * contains the next table as its key. On encountering a
+ * forwarding node, access and update operations restart, using
+ * the new table.
*
* Each bin transfer requires its bin lock, which can stall
* waiting for locks while resizing. However, because other
@@ -487,7 +487,7 @@ public class ConcurrentHashMapV8 ex
*
* Maintaining API and serialization compatibility with previous
* versions of this class introduces several oddities. Mainly: We
- * leave untouched but unused constructor arguments refering to
+ * leave untouched but unused constructor arguments referring to
* concurrencyLevel. We accept a loadFactor constructor argument,
* but apply it only to initial table capacity (which is the only
* time that we can guarantee to honor it.) We also declare an
@@ -578,6 +578,23 @@ public class ConcurrentHashMapV8 ex
*/
private static final int MIN_TRANSFER_STRIDE = 16;
+ /**
+ * The number of bits used for generation stamp in sizeCtl.
+ * Must be at least 6 for 32bit arrays.
+ */
+ private static int RESIZE_STAMP_BITS = 16;
+
+ /**
+ * The maximum number of threads that can help resize.
+ * Must fit in 32 - RESIZE_STAMP_BITS bits.
+ */
+ private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
+
+ /**
+ * The bit shift for recording size stamp in sizeCtl.
+ */
+ private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
+
/*
* Encodings for Node hash fields. See above for explanation.
*/
@@ -619,10 +636,10 @@ public class ConcurrentHashMapV8 ex
this.next = next;
}
- public final K getKey() { return key; }
- public final V getValue() { return val; }
- public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
- public final String toString(){ return key + "=" + val; }
+ public final K getKey() { return key; }
+ public final V getValue() { return val; }
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
+ public final String toString() { return key + "=" + val; }
public final V setValue(V value) {
throw new UnsupportedOperationException();
}
@@ -735,7 +752,7 @@ public class ConcurrentHashMapV8 ex
* errors by users, these checks must operate on local variables,
* which accounts for some odd-looking inline assignments below.
* Note that calls to setTabAt always occur within locked regions,
- * and so in principle require only release ordering, not need
+ * and so in principle require only release ordering, not
* full volatile semantics, but are currently coded as volatile
* writes to be conservative.
*/
@@ -2195,6 +2212,14 @@ public class ConcurrentHashMapV8 ex
/* ---------------- Table Initialization and Resizing -------------- */
/**
+ * Returns the stamp bits for resizing a table of size n.
+ * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
+ */
+ static final int resizeStamp(int n) {
+ return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
+ }
+
+ /**
* Initializes table, using the size recorded in sizeCtl.
*/
private final Node[] initTable() {
@@ -2207,7 +2232,7 @@ public class ConcurrentHashMapV8 ex
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
- Node[] nt = (Node[])new Node,?>[n];
+ Node[] nt = (Node[])new Node,?>[n];
table = tab = nt;
sc = n - (n >>> 2);
}
@@ -2249,17 +2274,20 @@ public class ConcurrentHashMapV8 ex
s = sumCount();
}
if (check >= 0) {
- Node[] tab, nt; int sc;
+ Node[] tab, nt; int n, sc;
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
- tab.length < MAXIMUM_CAPACITY) {
+ (n = tab.length) < MAXIMUM_CAPACITY) {
+ int rs = resizeStamp(n);
if (sc < 0) {
- if (sc == -1 || transferIndex <= 0 ||
- (nt = nextTable) == null)
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
+ sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
+ transferIndex <= 0)
break;
- if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
- else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
+ else if (U.compareAndSwapInt(this, SIZECTL, sc,
+ (rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
s = sumCount();
}
@@ -2271,11 +2299,15 @@ public class ConcurrentHashMapV8 ex
*/
final Node[] helpTransfer(Node[] tab, Node f) {
Node[] nextTab; int sc;
- if ((f instanceof ForwardingNode) &&
+ if (tab != null && (f instanceof ForwardingNode) &&
(nextTab = ((ForwardingNode)f).nextTable) != null) {
- while (transferIndex > 0 && nextTab == nextTable &&
- (sc = sizeCtl) < -1) {
- if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) {
+ int rs = resizeStamp(tab.length);
+ while (nextTab == nextTable && table == tab &&
+ (sc = sizeCtl) < 0) {
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
+ sc == rs + MAX_RESIZERS || transferIndex <= 0)
+ break;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
transfer(tab, nextTab);
break;
}
@@ -2302,7 +2334,7 @@ public class ConcurrentHashMapV8 ex
try {
if (table == tab) {
@SuppressWarnings("unchecked")
- Node[] nt = (Node[])new Node,?>[n];
+ Node[] nt = (Node[])new Node,?>[n];
table = nt;
sc = n - (n >>> 2);
}
@@ -2313,9 +2345,21 @@ public class ConcurrentHashMapV8 ex
}
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
- else if (tab == table &&
- U.compareAndSwapInt(this, SIZECTL, sc, -2))
- transfer(tab, null);
+ else if (tab == table) {
+ int rs = resizeStamp(n);
+ if (sc < 0) {
+ Node[] nt;
+ if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
+ sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
+ transferIndex <= 0)
+ break;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
+ transfer(tab, nt);
+ }
+ else if (U.compareAndSwapInt(this, SIZECTL, sc,
+ (rs << RESIZE_STAMP_SHIFT) + 2))
+ transfer(tab, null);
+ }
}
}
@@ -2370,8 +2414,8 @@ public class ConcurrentHashMapV8 ex
sizeCtl = (n << 1) - (n >>> 1);
return;
}
- if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
- if (sc != -1)
+ if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
+ if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
@@ -2465,11 +2509,8 @@ public class ConcurrentHashMapV8 ex
private final void treeifyBin(Node[] tab, int index) {
Node b; int n, sc;
if (tab != null) {
- if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
- if (tab == table && (sc = sizeCtl) >= 0 &&
- U.compareAndSwapInt(this, SIZECTL, sc, -2))
- transfer(tab, null);
- }
+ if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
+ tryPresize(n << 1);
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
synchronized (b) {
if (tabAt(tab, index) == b) {
@@ -2536,7 +2577,7 @@ public class ConcurrentHashMapV8 ex
final TreeNode findTreeNode(int h, Object k, Class> kc) {
if (k != null) {
TreeNode p = this;
- do {
+ do {
int ph, dir; K pk; TreeNode q;
TreeNode pl = p.left, pr = p.right;
if ((ph = p.hash) > h)
@@ -2667,7 +2708,7 @@ public class ConcurrentHashMapV8 ex
private final void contendedLock() {
boolean waiting = false;
for (int s;;) {
- if (((s = lockState) & WRITER) == 0) {
+ if (((s = lockState) & ~WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
if (waiting)
waiter = null;
@@ -2690,14 +2731,15 @@ public class ConcurrentHashMapV8 ex
* using tree comparisons from root, but continues linear
* search when lock not available.
*/
-final Node find(int h, Object k) {
+ final Node find(int h, Object k) {
if (k != null) {
- for (Node e = first; e != null; e = e.next) {
+ for (Node e = first; e != null; ) {
int s; K ek;
if (((s = lockState) & (WAITER|WRITER)) != 0) {
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
+ e = e.next;
}
else if (U.compareAndSwapInt(this, LOCKSTATE, s,
s + READER)) {
@@ -2984,7 +3026,7 @@ final Node find(int h, Object k) {
static TreeNode balanceDeletion(TreeNode root,
TreeNode x) {
- for (TreeNode xp, xpl, xpr;;) {
+ for (TreeNode xp, xpl, xpr;;) {
if (x == null || x == root)
return root;
else if ((xp = x.parent) == null) {