--- jsr166/src/jsr166e/ConcurrentHashMapV8.java 2013/07/04 18:34:49 1.110 +++ jsr166/src/jsr166e/ConcurrentHashMapV8.java 2016/03/07 23:55:31 1.126 @@ -12,9 +12,9 @@ import java.io.ObjectStreamField; import java.io.Serializable; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; +import java.util.AbstractMap; import java.util.Arrays; import java.util.Collection; -import java.util.Comparator; import java.util.ConcurrentModificationException; import java.util.Enumeration; import java.util.HashMap; @@ -114,32 +114,31 @@ import java.util.concurrent.locks.Reentr * objects do not support method {@code setValue}. * * * *

These bulk operations accept a {@code parallelismThreshold} @@ -218,7 +217,7 @@ import java.util.concurrent.locks.Reentr * @param the type of keys maintained by this map * @param the type of mapped values */ -public class ConcurrentHashMapV8 +public class ConcurrentHashMapV8 extends AbstractMap implements ConcurrentMap, Serializable { private static final long serialVersionUID = 7249069246763182397L; @@ -275,6 +274,7 @@ public class ConcurrentHashMapV8 /** Interface describing a function mapping two ints to an int */ public interface IntByIntToInt { int apply(int a, int b); } + /* * Overview: * @@ -380,14 +380,15 @@ public class ConcurrentHashMapV8 * The table is resized when occupancy exceeds a percentage * threshold (nominally, 0.75, but see below). Any thread * noticing an overfull bin may assist in resizing after the - * initiating thread allocates and sets up the replacement - * array. However, rather than stalling, these other threads may - * proceed with insertions etc. The use of TreeBins shields us - * from the worst case effects of overfilling while resizes are in + * initiating thread allocates and sets up the replacement array. + * However, rather than stalling, these other threads may proceed + * with insertions etc. The use of TreeBins shields us from the + * worst case effects of overfilling while resizes are in * progress. Resizing proceeds by transferring bins, one by one, - * from the table to the next table. To enable concurrency, the - * next table must be (incrementally) prefilled with place-holders - * serving as reverse forwarders to the old table. Because we are + * from the table to the next table. However, threads claim small + * blocks of indices to transfer (via field transferIndex) before + * doing so, reducing contention. A generation stamp in field + * sizeCtl ensures that resizings do not overlap. Because we are * using power-of-two expansion, the elements from each bin must * either stay at same index, or move with a power of two * offset. We eliminate unnecessary node creation by catching @@ -408,13 +409,19 @@ public class ConcurrentHashMapV8 * locks, average aggregate waits become shorter as resizing * progresses. The transfer operation must also ensure that all * accessible bins in both the old and new table are usable by any - * traversal. This is arranged by proceeding from the last bin - * (table.length - 1) up towards the first. Upon seeing a - * forwarding node, traversals (see class Traverser) arrange to - * move to the new table without revisiting nodes. However, to - * ensure that no intervening nodes are skipped, bin splitting can - * only begin after the associated reverse-forwarders are in - * place. + * traversal. This is arranged in part by proceeding from the + * last bin (table.length - 1) up towards the first. Upon seeing + * a forwarding node, traversals (see class Traverser) arrange to + * move to the new table without revisiting nodes. To ensure that + * no intervening nodes are skipped even when moved out of order, + * a stack (see class TableStack) is created on first encounter of + * a forwarding node during a traversal, to maintain its place if + * later processing the current table. The need for these + * save/restore mechanics is relatively rare, but when one + * forwarding node is encountered, typically many more will be. + * So Traversers use a simple caching scheme to avoid creating so + * many new TableStack nodes. (Thanks to Peter Levart for + * suggesting use of a stack here.) * * The traversal scheme also applies to partial traversals of * ranges of bins (via an alternate Traverser constructor) @@ -446,16 +453,18 @@ public class ConcurrentHashMapV8 * related operations (which is the main reason we cannot use * existing collections such as TreeMaps). TreeBins contain * Comparable elements, but may contain others, as well as - * elements that are Comparable but not necessarily Comparable - * for the same T, so we cannot invoke compareTo among them. To - * handle this, the tree is ordered primarily by hash value, then - * by Comparable.compareTo order if applicable. On lookup at a - * node, if elements are not comparable or compare as 0 then both - * left and right children may need to be searched in the case of - * tied hash values. (This corresponds to the full list search - * that would be necessary if all elements were non-Comparable and - * had tied hashes.) The red-black balancing code is updated from - * pre-jdk-collections + * elements that are Comparable but not necessarily Comparable for + * the same T, so we cannot invoke compareTo among them. To handle + * this, the tree is ordered primarily by hash value, then by + * Comparable.compareTo order if applicable. On lookup at a node, + * if elements are not comparable or compare as 0 then both left + * and right children may need to be searched in the case of tied + * hash values. (This corresponds to the full list search that + * would be necessary if all elements were non-Comparable and had + * tied hashes.) On insertion, to keep a total ordering (or as + * close as is required here) across rebalancings, we compare + * classes and identityHashCodes as tie-breakers. The red-black + * balancing code is updated from pre-jdk-collections * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) * based in turn on Cormen, Leiserson, and Rivest "Introduction to * Algorithms" (CLR). @@ -478,13 +487,17 @@ public class ConcurrentHashMapV8 * * Maintaining API and serialization compatibility with previous * versions of this class introduces several oddities. Mainly: We - * leave untouched but unused constructor arguments refering to + * leave untouched but unused constructor arguments referring to * concurrencyLevel. We accept a loadFactor constructor argument, * but apply it only to initial table capacity (which is the only * time that we can guarantee to honor it.) We also declare an * unused "Segment" class that is instantiated in minimal form * only when serializing. * + * Also, solely for compatibility with previous versions of this + * class, it extends AbstractMap, even though all of its methods + * are overridden, so it is just useless baggage. + * * This file is organized to make things a little easier to follow * while reading than they might otherwise: First the main static * declarations and utilities, then fields, then main public @@ -565,6 +578,23 @@ public class ConcurrentHashMapV8 */ private static final int MIN_TRANSFER_STRIDE = 16; + /** + * The number of bits used for generation stamp in sizeCtl. + * Must be at least 6 for 32bit arrays. + */ + private static int RESIZE_STAMP_BITS = 16; + + /** + * The maximum number of threads that can help resize. + * Must fit in 32 - RESIZE_STAMP_BITS bits. + */ + private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; + + /** + * The bit shift for recording size stamp in sizeCtl. + */ + private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; + /* * Encodings for Node hash fields. See above for explanation. */ @@ -606,10 +636,10 @@ public class ConcurrentHashMapV8 this.next = next; } - public final K getKey() { return key; } - public final V getValue() { return val; } - public final int hashCode() { return key.hashCode() ^ val.hashCode(); } - public final String toString(){ return key + "=" + val; } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString() { return key + "=" + val; } public final V setValue(V value) { throw new UnsupportedOperationException(); } @@ -722,7 +752,7 @@ public class ConcurrentHashMapV8 * errors by users, these checks must operate on local variables, * which accounts for some odd-looking inline assignments below. * Note that calls to setTabAt always occur within locked regions, - * and so in principle require only release ordering, not need + * and so in principle require only release ordering, not * full volatile semantics, but are currently coded as volatile * writes to be conservative. */ @@ -777,11 +807,6 @@ public class ConcurrentHashMapV8 private transient volatile int transferIndex; /** - * The least available table index to split while resizing. - */ - private transient volatile int transferOrigin; - - /** * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. */ private transient volatile int cellsBusy; @@ -1359,6 +1384,7 @@ public class ConcurrentHashMapV8 * Saves the state of the {@code ConcurrentHashMapV8} instance to a * stream (i.e., serializes it). * @param s the stream + * @throws java.io.IOException if an I/O error occurs * @serialData * the key (Object) and value (Object) * for each key-value mapping, followed by a null pair. @@ -1401,6 +1427,9 @@ public class ConcurrentHashMapV8 /** * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream + * @throws ClassNotFoundException if the class of a serialized object + * could not be found + * @throws java.io.IOException if an I/O error occurs */ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { @@ -1435,8 +1464,8 @@ public class ConcurrentHashMapV8 int sz = (int)size; n = tableSizeFor(sz + (sz >>> 1) + 1); } - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] tab = (Node[])new Node[n]; + @SuppressWarnings("unchecked") + Node[] tab = (Node[])new Node[n]; int mask = n - 1; long added = 0L; while (p != null) { @@ -2101,9 +2130,9 @@ public class ConcurrentHashMapV8 * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. + * @return the new set * @throws IllegalArgumentException if the initial capacity of * elements is negative - * @return the new set * @since 1.8 */ public static KeySetView newKeySet(int initialCapacity) { @@ -2183,6 +2212,14 @@ public class ConcurrentHashMapV8 /* ---------------- Table Initialization and Resizing -------------- */ /** + * Returns the stamp bits for resizing a table of size n. + * Must be negative when shifted left by RESIZE_STAMP_SHIFT. + */ + static final int resizeStamp(int n) { + return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); + } + + /** * Initializes table, using the size recorded in sizeCtl. */ private final Node[] initTable() { @@ -2194,8 +2231,8 @@ public class ConcurrentHashMapV8 try { if ((tab = table) == null || tab.length == 0) { int n = (sc > 0) ? sc : DEFAULT_CAPACITY; - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = (Node[])new Node[n]; + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n]; table = tab = nt; sc = n - (n >>> 2); } @@ -2237,17 +2274,20 @@ public class ConcurrentHashMapV8 s = sumCount(); } if (check >= 0) { - Node[] tab, nt; int sc; + Node[] tab, nt; int n, sc; while (s >= (long)(sc = sizeCtl) && (tab = table) != null && - tab.length < MAXIMUM_CAPACITY) { + (n = tab.length) < MAXIMUM_CAPACITY) { + int rs = resizeStamp(n); if (sc < 0) { - if (sc == -1 || transferIndex <= transferOrigin || - (nt = nextTable) == null) + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || (nt = nextTable) == null || + transferIndex <= 0) break; - if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) transfer(tab, nt); } - else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) + else if (U.compareAndSwapInt(this, SIZECTL, sc, + (rs << RESIZE_STAMP_SHIFT) + 2)) transfer(tab, null); s = sumCount(); } @@ -2259,12 +2299,19 @@ public class ConcurrentHashMapV8 */ final Node[] helpTransfer(Node[] tab, Node f) { Node[] nextTab; int sc; - if ((f instanceof ForwardingNode) && + if (tab != null && (f instanceof ForwardingNode) && (nextTab = ((ForwardingNode)f).nextTable) != null) { - if (nextTab == nextTable && tab == table && - transferIndex > transferOrigin && (sc = sizeCtl) < -1 && - U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) - transfer(tab, nextTab); + int rs = resizeStamp(tab.length); + while (nextTab == nextTable && table == tab && + (sc = sizeCtl) < 0) { + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || transferIndex <= 0) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { + transfer(tab, nextTab); + break; + } + } return nextTab; } return table; @@ -2286,8 +2333,8 @@ public class ConcurrentHashMapV8 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { try { if (table == tab) { - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = (Node[])new Node[n]; + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n]; table = nt; sc = n - (n >>> 2); } @@ -2298,9 +2345,21 @@ public class ConcurrentHashMapV8 } else if (c <= sc || n >= MAXIMUM_CAPACITY) break; - else if (tab == table && - U.compareAndSwapInt(this, SIZECTL, sc, -2)) - transfer(tab, null); + else if (tab == table) { + int rs = resizeStamp(n); + if (sc < 0) { + Node[] nt; + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || (nt = nextTable) == null || + transferIndex <= 0) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) + transfer(tab, nt); + } + else if (U.compareAndSwapInt(this, SIZECTL, sc, + (rs << RESIZE_STAMP_SHIFT) + 2)) + transfer(tab, null); + } } } @@ -2314,36 +2373,27 @@ public class ConcurrentHashMapV8 stride = MIN_TRANSFER_STRIDE; // subdivide range if (nextTab == null) { // initiating try { - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = (Node[])new Node[n << 1]; + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n << 1]; nextTab = nt; } catch (Throwable ex) { // try to cope with OOME sizeCtl = Integer.MAX_VALUE; return; } nextTable = nextTab; - transferOrigin = n; transferIndex = n; - ForwardingNode rev = new ForwardingNode(tab); - for (int k = n; k > 0;) { // progressively reveal ready slots - int nextk = (k > stride) ? k - stride : 0; - for (int m = nextk; m < k; ++m) - nextTab[m] = rev; - for (int m = n + nextk; m < n + k; ++m) - nextTab[m] = rev; - U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); - } } int nextn = nextTab.length; ForwardingNode fwd = new ForwardingNode(nextTab); boolean advance = true; boolean finishing = false; // to ensure sweep before committing nextTab for (int i = 0, bound = 0;;) { - int nextIndex, nextBound, fh; Node f; + Node f; int fh; while (advance) { + int nextIndex, nextBound; if (--i >= bound || finishing) advance = false; - else if ((nextIndex = transferIndex) <= transferOrigin) { + else if ((nextIndex = transferIndex) <= 0) { i = -1; advance = false; } @@ -2357,29 +2407,22 @@ public class ConcurrentHashMapV8 } } if (i < 0 || i >= n || i + n >= nextn) { + int sc; if (finishing) { nextTable = null; table = nextTab; sizeCtl = (n << 1) - (n >>> 1); return; } - for (int sc;;) { - if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { - if (sc != -1) - return; - finishing = advance = true; - i = n; // recheck before commit - break; - } - } - } - else if ((f = tabAt(tab, i)) == null) { - if (casTabAt(tab, i, null, fwd)) { - setTabAt(nextTab, i, null); - setTabAt(nextTab, i + n, null); - advance = true; + if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { + if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) + return; + finishing = advance = true; + i = n; // recheck before commit } } + else if ((f = tabAt(tab, i)) == null) + advance = casTabAt(tab, i, null, fwd); else if ((fh = f.hash) == MOVED) advance = true; // already processed else { @@ -2466,11 +2509,8 @@ public class ConcurrentHashMapV8 private final void treeifyBin(Node[] tab, int index) { Node b; int n, sc; if (tab != null) { - if ((n = tab.length) < MIN_TREEIFY_CAPACITY) { - if (tab == table && (sc = sizeCtl) >= 0 && - U.compareAndSwapInt(this, SIZECTL, sc, -2)) - transfer(tab, null); - } + if ((n = tab.length) < MIN_TREEIFY_CAPACITY) + tryPresize(n << 1); else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { synchronized (b) { if (tabAt(tab, index) == b) { @@ -2493,7 +2533,7 @@ public class ConcurrentHashMapV8 } /** - * Returns a list on non-TreeNodes replacing those in given list. + * Returns a list of non-TreeNodes replacing those in given list. */ static Node untreeify(Node b) { Node hd = null, tl = null; @@ -2537,7 +2577,7 @@ public class ConcurrentHashMapV8 final TreeNode findTreeNode(int h, Object k, Class kc) { if (k != null) { TreeNode p = this; - do { + do { int ph, dir; K pk; TreeNode q; TreeNode pl = p.left, pr = p.right; if ((ph = p.hash) > h) @@ -2546,19 +2586,18 @@ public class ConcurrentHashMapV8 p = pr; else if ((pk = p.key) == k || (pk != null && k.equals(pk))) return p; - else if (pl == null && pr == null) - break; + else if (pl == null) + p = pr; + else if (pr == null) + p = pl; else if ((kc != null || (kc = comparableClassFor(k)) != null) && (dir = compareComparables(kc, k, pk)) != 0) p = (dir < 0) ? pl : pr; - else if (pl == null) - p = pr; - else if (pr == null || - (q = pr.findTreeNode(h, k, kc)) == null) - p = pl; - else + else if ((q = pr.findTreeNode(h, k, kc)) != null) return q; + else + p = pl; } while (p != null); } return null; @@ -2585,6 +2624,23 @@ public class ConcurrentHashMapV8 static final int READER = 4; // increment value for setting read lock /** + * Tie-breaking utility for ordering insertions when equal + * hashCodes and non-comparable. We don't require a total + * order, just a consistent insertion rule to maintain + * equivalence across rebalancings. Tie-breaking further than + * necessary simplifies testing a bit. + */ + static int tieBreakOrder(Object a, Object b) { + int d; + if (a == null || b == null || + (d = a.getClass().getName(). + compareTo(b.getClass().getName())) == 0) + d = (System.identityHashCode(a) <= System.identityHashCode(b) ? + -1 : 1); + return d; + } + + /** * Creates bin with initial set of nodes headed by b. */ TreeBin(TreeNode b) { @@ -2600,21 +2656,21 @@ public class ConcurrentHashMapV8 r = x; } else { - Object key = x.key; - int hash = x.hash; + K k = x.key; + int h = x.hash; Class kc = null; for (TreeNode p = r;;) { int dir, ph; - if ((ph = p.hash) > hash) + K pk = p.key; + if ((ph = p.hash) > h) dir = -1; - else if (ph < hash) + else if (ph < h) dir = 1; - else if ((kc != null || - (kc = comparableClassFor(key)) != null)) - dir = compareComparables(kc, key, p.key); - else - dir = 0; - TreeNode xp = p; + else if ((kc == null && + (kc = comparableClassFor(k)) == null) || + (dir = compareComparables(kc, k, pk)) == 0) + dir = tieBreakOrder(k, pk); + TreeNode xp = p; if ((p = (dir <= 0) ? p.left : p.right) == null) { x.parent = xp; if (dir <= 0) @@ -2628,6 +2684,7 @@ public class ConcurrentHashMapV8 } } this.root = r; + assert checkInvariants(root); } /** @@ -2651,14 +2708,14 @@ public class ConcurrentHashMapV8 private final void contendedLock() { boolean waiting = false; for (int s;;) { - if (((s = lockState) & WRITER) == 0) { + if (((s = lockState) & ~WAITER) == 0) { if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { if (waiting) waiter = null; return; } } - else if ((s | WAITER) == 0) { + else if ((s & WAITER) == 0) { if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { waiting = true; waiter = Thread.currentThread(); @@ -2676,12 +2733,13 @@ public class ConcurrentHashMapV8 */ final Node find(int h, Object k) { if (k != null) { - for (Node e = first; e != null; e = e.next) { + for (Node e = first; e != null; ) { int s; K ek; if (((s = lockState) & (WAITER|WRITER)) != 0) { if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; + e = e.next; } else if (U.compareAndSwapInt(this, LOCKSTATE, s, s + READER)) { @@ -2711,8 +2769,9 @@ public class ConcurrentHashMapV8 */ final TreeNode putTreeVal(int h, K k, V v) { Class kc = null; + boolean searched = false; for (TreeNode p = root;;) { - int dir, ph; K pk; TreeNode q, pr; + int dir, ph; K pk; if (p == null) { first = root = new TreeNode(h, k, v, null, null); break; @@ -2726,21 +2785,25 @@ public class ConcurrentHashMapV8 else if ((kc == null && (kc = comparableClassFor(k)) == null) || (dir = compareComparables(kc, k, pk)) == 0) { - if (p.left == null) - dir = 1; - else if ((pr = p.right) == null || - (q = pr.findTreeNode(h, k, kc)) == null) - dir = -1; - else - return q; + if (!searched) { + TreeNode q, ch; + searched = true; + if (((ch = p.left) != null && + (q = ch.findTreeNode(h, k, kc)) != null) || + ((ch = p.right) != null && + (q = ch.findTreeNode(h, k, kc)) != null)) + return q; + } + dir = tieBreakOrder(k, pk); } + TreeNode xp = p; - if ((p = (dir < 0) ? p.left : p.right) == null) { + if ((p = (dir <= 0) ? p.left : p.right) == null) { TreeNode x, f = first; first = x = new TreeNode(h, k, v, f, xp); if (f != null) f.prev = x; - if (dir < 0) + if (dir <= 0) xp.left = x; else xp.right = x; @@ -2963,7 +3026,7 @@ public class ConcurrentHashMapV8 static TreeNode balanceDeletion(TreeNode root, TreeNode x) { - for (TreeNode xp, xpl, xpr;;) { + for (TreeNode xp, xpl, xpr;;) { if (x == null || x == root) return root; else if ((xp = x.parent) == null) { @@ -3095,6 +3158,18 @@ public class ConcurrentHashMapV8 /* ----------------Table Traversal -------------- */ /** + * Records the table, its length, and current traversal index for a + * traverser that must process a region of a forwarded table before + * proceeding with current table. + */ + static final class TableStack { + int length; + int index; + Node[] tab; + TableStack next; + } + + /** * Encapsulates traversal for methods such as containsValue; also * serves as a base class for other iterators and spliterators. * @@ -3118,6 +3193,7 @@ public class ConcurrentHashMapV8 static class Traverser { Node[] tab; // current table; updated if resized Node next; // the next entry to use + TableStack stack, spare; // to save/restore on ForwardingNodes int index; // index of bin to use next int baseIndex; // current index of initial table int baseLimit; // index bound for initial table @@ -3139,16 +3215,17 @@ public class ConcurrentHashMapV8 if ((e = next) != null) e = e.next; for (;;) { - Node[] t; int i, n; K ek; // must use locals in checks + Node[] t; int i, n; // must use locals in checks if (e != null) return next = e; if (baseIndex >= baseLimit || (t = tab) == null || (n = t.length) <= (i = index) || i < 0) return next = null; - if ((e = tabAt(t, index)) != null && e.hash < 0) { + if ((e = tabAt(t, i)) != null && e.hash < 0) { if (e instanceof ForwardingNode) { tab = ((ForwardingNode)e).nextTable; e = null; + pushState(t, i, n); continue; } else if (e instanceof TreeBin) @@ -3156,9 +3233,48 @@ public class ConcurrentHashMapV8 else e = null; } - if ((index += baseSize) >= n) - index = ++baseIndex; // visit upper slots if present + if (stack != null) + recoverState(n); + else if ((index = i + baseSize) >= n) + index = ++baseIndex; // visit upper slots if present + } + } + + /** + * Saves traversal state upon encountering a forwarding node. + */ + private void pushState(Node[] t, int i, int n) { + TableStack s = spare; // reuse if possible + if (s != null) + spare = s.next; + else + s = new TableStack(); + s.tab = t; + s.length = n; + s.index = i; + s.next = stack; + stack = s; + } + + /** + * Possibly pops traversal state. + * + * @param n length of current table + */ + private void recoverState(int n) { + TableStack s; int len; + while ((s = stack) != null && (index += (len = s.length)) >= n) { + n = len; + index = s.index; + tab = s.tab; + s.tab = null; + TableStack next = s.next; + s.next = spare; // save for reuse + stack = next; + spare = s; } + if (s == null && (index += baseSize) >= n) + index = ++baseIndex; } } @@ -6126,7 +6242,6 @@ public class ConcurrentHashMapV8 private static final sun.misc.Unsafe U; private static final long SIZECTL; private static final long TRANSFERINDEX; - private static final long TRANSFERORIGIN; private static final long BASECOUNT; private static final long CELLSBUSY; private static final long CELLVALUE; @@ -6141,8 +6256,6 @@ public class ConcurrentHashMapV8 (k.getDeclaredField("sizeCtl")); TRANSFERINDEX = U.objectFieldOffset (k.getDeclaredField("transferIndex")); - TRANSFERORIGIN = U.objectFieldOffset - (k.getDeclaredField("transferOrigin")); BASECOUNT = U.objectFieldOffset (k.getDeclaredField("baseCount")); CELLSBUSY = U.objectFieldOffset