276 |
|
/** Interface describing a function mapping two ints to an int */ |
277 |
|
public interface IntByIntToInt { int apply(int a, int b); } |
278 |
|
|
279 |
+ |
|
280 |
|
/* |
281 |
|
* Overview: |
282 |
|
* |
382 |
|
* The table is resized when occupancy exceeds a percentage |
383 |
|
* threshold (nominally, 0.75, but see below). Any thread |
384 |
|
* noticing an overfull bin may assist in resizing after the |
385 |
< |
* initiating thread allocates and sets up the replacement |
386 |
< |
* array. However, rather than stalling, these other threads may |
387 |
< |
* proceed with insertions etc. The use of TreeBins shields us |
388 |
< |
* from the worst case effects of overfilling while resizes are in |
385 |
> |
* initiating thread allocates and sets up the replacement array. |
386 |
> |
* However, rather than stalling, these other threads may proceed |
387 |
> |
* with insertions etc. The use of TreeBins shields us from the |
388 |
> |
* worst case effects of overfilling while resizes are in |
389 |
|
* progress. Resizing proceeds by transferring bins, one by one, |
390 |
< |
* from the table to the next table. To enable concurrency, the |
391 |
< |
* next table must be (incrementally) prefilled with place-holders |
392 |
< |
* serving as reverse forwarders to the old table. Because we are |
393 |
< |
* using power-of-two expansion, the elements from each bin must |
394 |
< |
* either stay at same index, or move with a power of two |
395 |
< |
* offset. We eliminate unnecessary node creation by catching |
396 |
< |
* cases where old nodes can be reused because their next fields |
397 |
< |
* won't change. On average, only about one-sixth of them need |
398 |
< |
* cloning when a table doubles. The nodes they replace will be |
399 |
< |
* garbage collectable as soon as they are no longer referenced by |
400 |
< |
* any reader thread that may be in the midst of concurrently |
401 |
< |
* traversing table. Upon transfer, the old table bin contains |
402 |
< |
* only a special forwarding node (with hash field "MOVED") that |
403 |
< |
* contains the next table as its key. On encountering a |
404 |
< |
* forwarding node, access and update operations restart, using |
404 |
< |
* the new table. |
390 |
> |
* from the table to the next table. However, threads claim small |
391 |
> |
* blocks of indices to transfer (via field transferIndex) before |
392 |
> |
* doing so, reducing contention. Because we are using |
393 |
> |
* power-of-two expansion, the elements from each bin must either |
394 |
> |
* stay at same index, or move with a power of two offset. We |
395 |
> |
* eliminate unnecessary node creation by catching cases where old |
396 |
> |
* nodes can be reused because their next fields won't change. On |
397 |
> |
* average, only about one-sixth of them need cloning when a table |
398 |
> |
* doubles. The nodes they replace will be garbage collectable as |
399 |
> |
* soon as they are no longer referenced by any reader thread that |
400 |
> |
* may be in the midst of concurrently traversing table. Upon |
401 |
> |
* transfer, the old table bin contains only a special forwarding |
402 |
> |
* node (with hash field "MOVED") that contains the next table as |
403 |
> |
* its key. On encountering a forwarding node, access and update |
404 |
> |
* operations restart, using the new table. |
405 |
|
* |
406 |
|
* Each bin transfer requires its bin lock, which can stall |
407 |
|
* waiting for locks while resizing. However, because other |
409 |
|
* locks, average aggregate waits become shorter as resizing |
410 |
|
* progresses. The transfer operation must also ensure that all |
411 |
|
* accessible bins in both the old and new table are usable by any |
412 |
< |
* traversal. This is arranged by proceeding from the last bin |
413 |
< |
* (table.length - 1) up towards the first. Upon seeing a |
414 |
< |
* forwarding node, traversals (see class Traverser) arrange to |
415 |
< |
* move to the new table without revisiting nodes. However, to |
416 |
< |
* ensure that no intervening nodes are skipped, bin splitting can |
417 |
< |
* only begin after the associated reverse-forwarders are in |
418 |
< |
* place. |
412 |
> |
* traversal. This is arranged in part by proceeding from the |
413 |
> |
* last bin (table.length - 1) up towards the first. Upon seeing |
414 |
> |
* a forwarding node, traversals (see class Traverser) arrange to |
415 |
> |
* move to the new table without revisiting nodes. To ensure that |
416 |
> |
* no intervening nodes are skipped even when moved out of order, |
417 |
> |
* a stack (see class TableStack) is created on first encounter of |
418 |
> |
* a forwarding node during a traversal, to maintain its place if |
419 |
> |
* later processing the current table. The need for these |
420 |
> |
* save/restore mechanics is relatively rare, but when one |
421 |
> |
* forwarding node is encountered, typically many more will be. |
422 |
> |
* So Traversers use a simple caching scheme to avoid creating so |
423 |
> |
* many new TableStack nodes. (Thanks to Peter Levart for |
424 |
> |
* suggesting use of a stack here.) |
425 |
|
* |
426 |
|
* The traversal scheme also applies to partial traversals of |
427 |
|
* ranges of bins (via an alternate Traverser constructor) |
790 |
|
private transient volatile int transferIndex; |
791 |
|
|
792 |
|
/** |
787 |
– |
* The least available table index to split while resizing. |
788 |
– |
*/ |
789 |
– |
private transient volatile int transferOrigin; |
790 |
– |
|
791 |
– |
/** |
793 |
|
* Spinlock (locked via CAS) used when resizing and/or creating CounterCells. |
794 |
|
*/ |
795 |
|
private transient volatile int cellsBusy; |
2253 |
|
while (s >= (long)(sc = sizeCtl) && (tab = table) != null && |
2254 |
|
tab.length < MAXIMUM_CAPACITY) { |
2255 |
|
if (sc < 0) { |
2256 |
< |
if (sc == -1 || transferIndex <= transferOrigin || |
2256 |
> |
if (sc == -1 || transferIndex <= 0 || |
2257 |
|
(nt = nextTable) == null) |
2258 |
|
break; |
2259 |
|
if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) |
2273 |
|
Node<K,V>[] nextTab; int sc; |
2274 |
|
if ((f instanceof ForwardingNode) && |
2275 |
|
(nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { |
2276 |
< |
if (nextTab == nextTable && tab == table && |
2277 |
< |
transferIndex > transferOrigin && (sc = sizeCtl) < -1 && |
2278 |
< |
U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) |
2279 |
< |
transfer(tab, nextTab); |
2276 |
> |
while (transferIndex > 0 && nextTab == nextTable && |
2277 |
> |
(sc = sizeCtl) < -1) { |
2278 |
> |
if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) { |
2279 |
> |
transfer(tab, nextTab); |
2280 |
> |
break; |
2281 |
> |
} |
2282 |
> |
} |
2283 |
|
return nextTab; |
2284 |
|
} |
2285 |
|
return table; |
2330 |
|
if (nextTab == null) { // initiating |
2331 |
|
try { |
2332 |
|
@SuppressWarnings("unchecked") |
2333 |
< |
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; |
2333 |
> |
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; |
2334 |
|
nextTab = nt; |
2335 |
|
} catch (Throwable ex) { // try to cope with OOME |
2336 |
|
sizeCtl = Integer.MAX_VALUE; |
2337 |
|
return; |
2338 |
|
} |
2339 |
|
nextTable = nextTab; |
2336 |
– |
transferOrigin = n; |
2340 |
|
transferIndex = n; |
2338 |
– |
ForwardingNode<K,V> rev = new ForwardingNode<K,V>(tab); |
2339 |
– |
for (int k = n; k > 0;) { // progressively reveal ready slots |
2340 |
– |
int nextk = (k > stride) ? k - stride : 0; |
2341 |
– |
for (int m = nextk; m < k; ++m) |
2342 |
– |
nextTab[m] = rev; |
2343 |
– |
for (int m = n + nextk; m < n + k; ++m) |
2344 |
– |
nextTab[m] = rev; |
2345 |
– |
U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); |
2346 |
– |
} |
2341 |
|
} |
2342 |
|
int nextn = nextTab.length; |
2343 |
|
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); |
2344 |
|
boolean advance = true; |
2345 |
|
boolean finishing = false; // to ensure sweep before committing nextTab |
2346 |
|
for (int i = 0, bound = 0;;) { |
2347 |
< |
int nextIndex, nextBound, fh; Node<K,V> f; |
2347 |
> |
Node<K,V> f; int fh; |
2348 |
|
while (advance) { |
2349 |
+ |
int nextIndex, nextBound; |
2350 |
|
if (--i >= bound || finishing) |
2351 |
|
advance = false; |
2352 |
< |
else if ((nextIndex = transferIndex) <= transferOrigin) { |
2352 |
> |
else if ((nextIndex = transferIndex) <= 0) { |
2353 |
|
i = -1; |
2354 |
|
advance = false; |
2355 |
|
} |
2363 |
|
} |
2364 |
|
} |
2365 |
|
if (i < 0 || i >= n || i + n >= nextn) { |
2366 |
+ |
int sc; |
2367 |
|
if (finishing) { |
2368 |
|
nextTable = null; |
2369 |
|
table = nextTab; |
2370 |
|
sizeCtl = (n << 1) - (n >>> 1); |
2371 |
|
return; |
2372 |
|
} |
2373 |
< |
for (int sc;;) { |
2374 |
< |
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { |
2375 |
< |
if (sc != -1) |
2376 |
< |
return; |
2377 |
< |
finishing = advance = true; |
2382 |
< |
i = n; // recheck before commit |
2383 |
< |
break; |
2384 |
< |
} |
2385 |
< |
} |
2386 |
< |
} |
2387 |
< |
else if ((f = tabAt(tab, i)) == null) { |
2388 |
< |
if (casTabAt(tab, i, null, fwd)) { |
2389 |
< |
setTabAt(nextTab, i, null); |
2390 |
< |
setTabAt(nextTab, i + n, null); |
2391 |
< |
advance = true; |
2373 |
> |
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { |
2374 |
> |
if (sc != -1) |
2375 |
> |
return; |
2376 |
> |
finishing = advance = true; |
2377 |
> |
i = n; // recheck before commit |
2378 |
|
} |
2379 |
|
} |
2380 |
+ |
else if ((f = tabAt(tab, i)) == null) |
2381 |
+ |
advance = casTabAt(tab, i, null, fwd); |
2382 |
|
else if ((fh = f.hash) == MOVED) |
2383 |
|
advance = true; // already processed |
2384 |
|
else { |
3116 |
|
/* ----------------Table Traversal -------------- */ |
3117 |
|
|
3118 |
|
/** |
3119 |
+ |
* Records the table, its length, and current traversal index for a |
3120 |
+ |
* traverser that must process a region of a forwarded table before |
3121 |
+ |
* proceeding with current table. |
3122 |
+ |
*/ |
3123 |
+ |
static final class TableStack<K,V> { |
3124 |
+ |
int length; |
3125 |
+ |
int index; |
3126 |
+ |
Node<K,V>[] tab; |
3127 |
+ |
TableStack<K,V> next; |
3128 |
+ |
} |
3129 |
+ |
|
3130 |
+ |
/** |
3131 |
|
* Encapsulates traversal for methods such as containsValue; also |
3132 |
|
* serves as a base class for other iterators and spliterators. |
3133 |
|
* |
3151 |
|
static class Traverser<K,V> { |
3152 |
|
Node<K,V>[] tab; // current table; updated if resized |
3153 |
|
Node<K,V> next; // the next entry to use |
3154 |
+ |
TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes |
3155 |
|
int index; // index of bin to use next |
3156 |
|
int baseIndex; // current index of initial table |
3157 |
|
int baseLimit; // index bound for initial table |
3173 |
|
if ((e = next) != null) |
3174 |
|
e = e.next; |
3175 |
|
for (;;) { |
3176 |
< |
Node<K,V>[] t; int i, n; K ek; // must use locals in checks |
3176 |
> |
Node<K,V>[] t; int i, n; // must use locals in checks |
3177 |
|
if (e != null) |
3178 |
|
return next = e; |
3179 |
|
if (baseIndex >= baseLimit || (t = tab) == null || |
3180 |
|
(n = t.length) <= (i = index) || i < 0) |
3181 |
|
return next = null; |
3182 |
< |
if ((e = tabAt(t, index)) != null && e.hash < 0) { |
3182 |
> |
if ((e = tabAt(t, i)) != null && e.hash < 0) { |
3183 |
|
if (e instanceof ForwardingNode) { |
3184 |
|
tab = ((ForwardingNode<K,V>)e).nextTable; |
3185 |
|
e = null; |
3186 |
+ |
pushState(t, i, n); |
3187 |
|
continue; |
3188 |
|
} |
3189 |
|
else if (e instanceof TreeBin) |
3191 |
|
else |
3192 |
|
e = null; |
3193 |
|
} |
3194 |
< |
if ((index += baseSize) >= n) |
3195 |
< |
index = ++baseIndex; // visit upper slots if present |
3194 |
> |
if (stack != null) |
3195 |
> |
recoverState(n); |
3196 |
> |
else if ((index = i + baseSize) >= n) |
3197 |
> |
index = ++baseIndex; // visit upper slots if present |
3198 |
> |
} |
3199 |
> |
} |
3200 |
> |
|
3201 |
> |
/** |
3202 |
> |
* Saves traversal state upon encountering a forwarding node. |
3203 |
> |
*/ |
3204 |
> |
private void pushState(Node<K,V>[] t, int i, int n) { |
3205 |
> |
TableStack<K,V> s = spare; // reuse if possible |
3206 |
> |
if (s != null) |
3207 |
> |
spare = s.next; |
3208 |
> |
else |
3209 |
> |
s = new TableStack<K,V>(); |
3210 |
> |
s.tab = t; |
3211 |
> |
s.length = n; |
3212 |
> |
s.index = i; |
3213 |
> |
s.next = stack; |
3214 |
> |
stack = s; |
3215 |
> |
} |
3216 |
> |
|
3217 |
> |
/** |
3218 |
> |
* Possibly pops traversal state. |
3219 |
> |
* |
3220 |
> |
* @param n length of current table |
3221 |
> |
*/ |
3222 |
> |
private void recoverState(int n) { |
3223 |
> |
TableStack<K,V> s; int len; |
3224 |
> |
while ((s = stack) != null && (index += (len = s.length)) >= n) { |
3225 |
> |
n = len; |
3226 |
> |
index = s.index; |
3227 |
> |
tab = s.tab; |
3228 |
> |
s.tab = null; |
3229 |
> |
TableStack<K,V> next = s.next; |
3230 |
> |
s.next = spare; // save for reuse |
3231 |
> |
stack = next; |
3232 |
> |
spare = s; |
3233 |
|
} |
3234 |
+ |
if (s == null && (index += baseSize) >= n) |
3235 |
+ |
index = ++baseIndex; |
3236 |
|
} |
3237 |
|
} |
3238 |
|
|
6200 |
|
private static final sun.misc.Unsafe U; |
6201 |
|
private static final long SIZECTL; |
6202 |
|
private static final long TRANSFERINDEX; |
6162 |
– |
private static final long TRANSFERORIGIN; |
6203 |
|
private static final long BASECOUNT; |
6204 |
|
private static final long CELLSBUSY; |
6205 |
|
private static final long CELLVALUE; |
6214 |
|
(k.getDeclaredField("sizeCtl")); |
6215 |
|
TRANSFERINDEX = U.objectFieldOffset |
6216 |
|
(k.getDeclaredField("transferIndex")); |
6177 |
– |
TRANSFERORIGIN = U.objectFieldOffset |
6178 |
– |
(k.getDeclaredField("transferOrigin")); |
6217 |
|
BASECOUNT = U.objectFieldOffset |
6218 |
|
(k.getDeclaredField("baseCount")); |
6219 |
|
CELLSBUSY = U.objectFieldOffset |