299 |
|
* because they have negative hash fields and null key and value |
300 |
|
* fields. (These special nodes are either uncommon or transient, |
301 |
|
* so the impact of carrying around some unused fields is |
302 |
< |
* insignficant.) |
302 |
> |
* insignificant.) |
303 |
|
* |
304 |
|
* The table is lazily initialized to a power-of-two size upon the |
305 |
|
* first insertion. Each bin in the table normally contains a |
462 |
|
* |
463 |
|
* TreeBins also require an additional locking mechanism. While |
464 |
|
* list traversal is always possible by readers even during |
465 |
< |
* updates, tree traversal is not, mainly beause of tree-rotations |
465 |
> |
* updates, tree traversal is not, mainly because of tree-rotations |
466 |
|
* that may change the root node and/or its linkages. TreeBins |
467 |
|
* include a simple read-write lock mechanism parasitic on the |
468 |
|
* main bin-synchronization strategy: Structural adjustments |
568 |
|
/* |
569 |
|
* Encodings for Node hash fields. See above for explanation. |
570 |
|
*/ |
571 |
< |
static final int MOVED = 0x8fffffff; // (-1) hash for forwarding nodes |
572 |
< |
static final int TREEBIN = 0x80000000; // hash for heads of treea |
573 |
< |
static final int RESERVED = 0x80000001; // hash for transient reservations |
571 |
> |
static final int MOVED = -1; // hash for forwarding nodes |
572 |
> |
static final int TREEBIN = -2; // hash for roots of trees |
573 |
> |
static final int RESERVED = -3; // hash for transient reservations |
574 |
|
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash |
575 |
|
|
576 |
|
/** Number of CPUS, to place bounds on some sizings */ |
589 |
|
* Key-value entry. This class is never exported out as a |
590 |
|
* user-mutable Map.Entry (i.e., one supporting setValue; see |
591 |
|
* MapEntry below), but can be used for read-only traversals used |
592 |
< |
* in bulk tasks. Subclasses of Node with a negativehash field |
592 |
> |
* in bulk tasks. Subclasses of Node with a negative hash field |
593 |
|
* are special, and contain null keys and values (but are never |
594 |
|
* exported). Otherwise, keys and vals are never null. |
595 |
|
*/ |
597 |
|
final int hash; |
598 |
|
final K key; |
599 |
|
volatile V val; |
600 |
< |
Node<K,V> next; |
600 |
> |
volatile Node<K,V> next; |
601 |
|
|
602 |
|
Node(int hash, K key, V val, Node<K,V> next) { |
603 |
|
this.hash = hash; |
722 |
|
* errors by users, these checks must operate on local variables, |
723 |
|
* which accounts for some odd-looking inline assignments below. |
724 |
|
* Note that calls to setTabAt always occur within locked regions, |
725 |
< |
* and so do not need full volatile semantics, but still require |
726 |
< |
* ordering to maintain concurrent readability. |
725 |
> |
* and so in principle require only release ordering, not need |
726 |
> |
* full volatile semantics, but are currently coded as volatile |
727 |
> |
* writes to be conservative. |
728 |
|
*/ |
729 |
|
|
730 |
|
@SuppressWarnings("unchecked") |
738 |
|
} |
739 |
|
|
740 |
|
static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { |
741 |
< |
U.putOrderedObject(tab, ((long)i << ASHIFT) + ABASE, v); |
741 |
> |
U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); |
742 |
|
} |
743 |
|
|
744 |
|
/* ---------------- Fields -------------- */ |
2101 |
|
* |
2102 |
|
* @param initialCapacity The implementation performs internal |
2103 |
|
* sizing to accommodate this many elements. |
2104 |
+ |
* @return the new set |
2105 |
|
* @throws IllegalArgumentException if the initial capacity of |
2106 |
|
* elements is negative |
2105 |
– |
* @return the new set |
2107 |
|
* @since 1.8 |
2108 |
|
*/ |
2109 |
|
public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { |
2141 |
|
} |
2142 |
|
|
2143 |
|
Node<K,V> find(int h, Object k) { |
2144 |
< |
Node<K,V> e; int n; |
2145 |
< |
Node<K,V>[] tab = nextTable; |
2146 |
< |
if (k != null && tab != null && (n = tab.length) > 0 && |
2147 |
< |
(e = tabAt(tab, (n - 1) & h)) != null) { |
2148 |
< |
do { |
2144 |
> |
// loop to avoid arbitrarily deep recursion on forwarding nodes |
2145 |
> |
outer: for (Node<K,V>[] tab = nextTable;;) { |
2146 |
> |
Node<K,V> e; int n; |
2147 |
> |
if (k == null || tab == null || (n = tab.length) == 0 || |
2148 |
> |
(e = tabAt(tab, (n - 1) & h)) == null) |
2149 |
> |
return null; |
2150 |
> |
for (;;) { |
2151 |
|
int eh; K ek; |
2152 |
|
if ((eh = e.hash) == h && |
2153 |
|
((ek = e.key) == k || (ek != null && k.equals(ek)))) |
2154 |
|
return e; |
2155 |
< |
if (eh < 0) |
2156 |
< |
return e.find(h, k); |
2157 |
< |
} while ((e = e.next) != null); |
2155 |
> |
if (eh < 0) { |
2156 |
> |
if (e instanceof ForwardingNode) { |
2157 |
> |
tab = ((ForwardingNode<K,V>)e).nextTable; |
2158 |
> |
continue outer; |
2159 |
> |
} |
2160 |
> |
else |
2161 |
> |
return e.find(h, k); |
2162 |
> |
} |
2163 |
> |
if ((e = e.next) == null) |
2164 |
> |
return null; |
2165 |
> |
} |
2166 |
|
} |
2156 |
– |
return null; |
2167 |
|
} |
2168 |
|
} |
2169 |
|
|
2337 |
|
int nextn = nextTab.length; |
2338 |
|
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); |
2339 |
|
boolean advance = true; |
2340 |
+ |
boolean finishing = false; // to ensure sweep before committing nextTab |
2341 |
|
for (int i = 0, bound = 0;;) { |
2342 |
|
int nextIndex, nextBound, fh; Node<K,V> f; |
2343 |
|
while (advance) { |
2344 |
< |
if (--i >= bound) |
2344 |
> |
if (--i >= bound || finishing) |
2345 |
|
advance = false; |
2346 |
|
else if ((nextIndex = transferIndex) <= transferOrigin) { |
2347 |
|
i = -1; |
2357 |
|
} |
2358 |
|
} |
2359 |
|
if (i < 0 || i >= n || i + n >= nextn) { |
2360 |
+ |
if (finishing) { |
2361 |
+ |
nextTable = null; |
2362 |
+ |
table = nextTab; |
2363 |
+ |
sizeCtl = (n << 1) - (n >>> 1); |
2364 |
+ |
return; |
2365 |
+ |
} |
2366 |
|
for (int sc;;) { |
2367 |
|
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { |
2368 |
< |
if (sc == -1) { |
2369 |
< |
nextTable = null; |
2370 |
< |
table = nextTab; |
2371 |
< |
sizeCtl = (n << 1) - (n >>> 1); |
2372 |
< |
} |
2356 |
< |
return; |
2368 |
> |
if (sc != -1) |
2369 |
> |
return; |
2370 |
> |
finishing = advance = true; |
2371 |
> |
i = n; // recheck before commit |
2372 |
> |
break; |
2373 |
|
} |
2374 |
|
} |
2375 |
|
} |
2411 |
|
else |
2412 |
|
hn = new Node<K,V>(ph, pk, pv, hn); |
2413 |
|
} |
2414 |
+ |
setTabAt(nextTab, i, ln); |
2415 |
+ |
setTabAt(nextTab, i + n, hn); |
2416 |
+ |
setTabAt(tab, i, fwd); |
2417 |
+ |
advance = true; |
2418 |
|
} |
2419 |
|
else if (f instanceof TreeBin) { |
2420 |
|
TreeBin<K,V> t = (TreeBin<K,V>)f; |
2442 |
|
++hc; |
2443 |
|
} |
2444 |
|
} |
2445 |
< |
ln = (lc <= UNTREEIFY_THRESHOLD ? untreeify(lo) : |
2446 |
< |
(hc != 0) ? new TreeBin<K,V>(lo) : t); |
2447 |
< |
hn = (hc <= UNTREEIFY_THRESHOLD ? untreeify(hi) : |
2448 |
< |
(lc != 0) ? new TreeBin<K,V>(hi) : t); |
2445 |
> |
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : |
2446 |
> |
(hc != 0) ? new TreeBin<K,V>(lo) : t; |
2447 |
> |
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : |
2448 |
> |
(lc != 0) ? new TreeBin<K,V>(hi) : t; |
2449 |
> |
setTabAt(nextTab, i, ln); |
2450 |
> |
setTabAt(nextTab, i + n, hn); |
2451 |
> |
setTabAt(tab, i, fwd); |
2452 |
> |
advance = true; |
2453 |
|
} |
2430 |
– |
else |
2431 |
– |
ln = hn = null; |
2432 |
– |
setTabAt(nextTab, i, ln); |
2433 |
– |
setTabAt(nextTab, i + n, hn); |
2434 |
– |
setTabAt(tab, i, fwd); |
2435 |
– |
advance = true; |
2454 |
|
} |
2455 |
|
} |
2456 |
|
} |
2471 |
|
U.compareAndSwapInt(this, SIZECTL, sc, -2)) |
2472 |
|
transfer(tab, null); |
2473 |
|
} |
2474 |
< |
else if ((b = tabAt(tab, index)) != null) { |
2474 |
> |
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { |
2475 |
|
synchronized (b) { |
2476 |
|
if (tabAt(tab, index) == b) { |
2477 |
|
TreeNode<K,V> hd = null, tl = null; |
2493 |
|
} |
2494 |
|
|
2495 |
|
/** |
2496 |
< |
* Returns a list on non-TreeNodes replacing those in given list |
2496 |
> |
* Returns a list on non-TreeNodes replacing those in given list. |
2497 |
|
*/ |
2498 |
|
static <K,V> Node<K,V> untreeify(Node<K,V> b) { |
2499 |
|
Node<K,V> hd = null, tl = null; |
2631 |
|
} |
2632 |
|
|
2633 |
|
/** |
2634 |
< |
* Acquires write lock for tree restructuring |
2634 |
> |
* Acquires write lock for tree restructuring. |
2635 |
|
*/ |
2636 |
|
private final void lockRoot() { |
2637 |
|
if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) |
2639 |
|
} |
2640 |
|
|
2641 |
|
/** |
2642 |
< |
* Releases write lock for tree restructuring |
2642 |
> |
* Releases write lock for tree restructuring. |
2643 |
|
*/ |
2644 |
|
private final void unlockRoot() { |
2645 |
|
lockState = 0; |
2646 |
|
} |
2647 |
|
|
2648 |
|
/** |
2649 |
< |
* Possibly blocks awaiting root lock |
2649 |
> |
* Possibly blocks awaiting root lock. |
2650 |
|
*/ |
2651 |
|
private final void contendedLock() { |
2652 |
|
boolean waiting = false; |
2671 |
|
|
2672 |
|
/** |
2673 |
|
* Returns matching node or null if none. Tries to search |
2674 |
< |
* using tree compareisons from root, but continues linear |
2674 |
> |
* using tree comparisons from root, but continues linear |
2675 |
|
* search when lock not available. |
2676 |
|
*/ |
2677 |
|
final Node<K,V> find(int h, Object k) { |
2769 |
|
* that are accessible independently of lock. So instead we |
2770 |
|
* swap the tree linkages. |
2771 |
|
* |
2772 |
< |
* @return true if now too small so should be untreeified. |
2772 |
> |
* @return true if now too small, so should be untreeified |
2773 |
|
*/ |
2774 |
|
final boolean removeTreeNode(TreeNode<K,V> p) { |
2775 |
|
TreeNode<K,V> next = (TreeNode<K,V>)p.next; |
3164 |
|
|
3165 |
|
/** |
3166 |
|
* Base of key, value, and entry Iterators. Adds fields to |
3167 |
< |
* Traverser to support iterator.remove |
3167 |
> |
* Traverser to support iterator.remove. |
3168 |
|
*/ |
3169 |
|
static class BaseIterator<K,V> extends Traverser<K,V> { |
3170 |
|
final ConcurrentHashMapV8<K,V> map; |
3516 |
|
* of all (key, value) pairs |
3517 |
|
* @since 1.8 |
3518 |
|
*/ |
3519 |
< |
public double reduceToDoubleIn(long parallelismThreshold, |
3520 |
< |
ObjectByObjectToDouble<? super K, ? super V> transformer, |
3521 |
< |
double basis, |
3522 |
< |
DoubleByDoubleToDouble reducer) { |
3519 |
> |
public double reduceToDouble(long parallelismThreshold, |
3520 |
> |
ObjectByObjectToDouble<? super K, ? super V> transformer, |
3521 |
> |
double basis, |
3522 |
> |
DoubleByDoubleToDouble reducer) { |
3523 |
|
if (transformer == null || reducer == null) |
3524 |
|
throw new NullPointerException(); |
3525 |
|
return new MapReduceMappingsToDoubleTask<K,V> |
6004 |
|
} |
6005 |
|
|
6006 |
|
/** |
6007 |
< |
* Generates initial value for per-thread CounterHashCodes |
6007 |
> |
* Generates initial value for per-thread CounterHashCodes. |
6008 |
|
*/ |
6009 |
|
static final AtomicInteger counterHashCodeGenerator = new AtomicInteger(); |
6010 |
|
|