12 |
|
import java.io.Serializable; |
13 |
|
import java.lang.reflect.ParameterizedType; |
14 |
|
import java.lang.reflect.Type; |
15 |
+ |
import java.util.AbstractMap; |
16 |
|
import java.util.Arrays; |
17 |
|
import java.util.Collection; |
18 |
|
import java.util.Comparator; |
219 |
|
* @param <K> the type of keys maintained by this map |
220 |
|
* @param <V> the type of mapped values |
221 |
|
*/ |
222 |
< |
public class ConcurrentHashMapV8<K,V> |
222 |
> |
public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V> |
223 |
|
implements ConcurrentMap<K,V>, Serializable { |
224 |
|
private static final long serialVersionUID = 7249069246763182397L; |
225 |
|
|
300 |
|
* because they have negative hash fields and null key and value |
301 |
|
* fields. (These special nodes are either uncommon or transient, |
302 |
|
* so the impact of carrying around some unused fields is |
303 |
< |
* insignficant.) |
303 |
> |
* insignificant.) |
304 |
|
* |
305 |
|
* The table is lazily initialized to a power-of-two size upon the |
306 |
|
* first insertion. Each bin in the table normally contains a |
447 |
|
* related operations (which is the main reason we cannot use |
448 |
|
* existing collections such as TreeMaps). TreeBins contain |
449 |
|
* Comparable elements, but may contain others, as well as |
450 |
< |
* elements that are Comparable but not necessarily Comparable |
451 |
< |
* for the same T, so we cannot invoke compareTo among them. To |
452 |
< |
* handle this, the tree is ordered primarily by hash value, then |
453 |
< |
* by Comparable.compareTo order if applicable. On lookup at a |
454 |
< |
* node, if elements are not comparable or compare as 0 then both |
455 |
< |
* left and right children may need to be searched in the case of |
456 |
< |
* tied hash values. (This corresponds to the full list search |
457 |
< |
* that would be necessary if all elements were non-Comparable and |
458 |
< |
* had tied hashes.) The red-black balancing code is updated from |
459 |
< |
* pre-jdk-collections |
450 |
> |
* elements that are Comparable but not necessarily Comparable for |
451 |
> |
* the same T, so we cannot invoke compareTo among them. To handle |
452 |
> |
* this, the tree is ordered primarily by hash value, then by |
453 |
> |
* Comparable.compareTo order if applicable. On lookup at a node, |
454 |
> |
* if elements are not comparable or compare as 0 then both left |
455 |
> |
* and right children may need to be searched in the case of tied |
456 |
> |
* hash values. (This corresponds to the full list search that |
457 |
> |
* would be necessary if all elements were non-Comparable and had |
458 |
> |
* tied hashes.) On insertion, to keep a total ordering (or as |
459 |
> |
* close as is required here) across rebalancings, we compare |
460 |
> |
* classes and identityHashCodes as tie-breakers. The red-black |
461 |
> |
* balancing code is updated from pre-jdk-collections |
462 |
|
* (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) |
463 |
|
* based in turn on Cormen, Leiserson, and Rivest "Introduction to |
464 |
|
* Algorithms" (CLR). |
465 |
|
* |
466 |
|
* TreeBins also require an additional locking mechanism. While |
467 |
|
* list traversal is always possible by readers even during |
468 |
< |
* updates, tree traversal is not, mainly beause of tree-rotations |
468 |
> |
* updates, tree traversal is not, mainly because of tree-rotations |
469 |
|
* that may change the root node and/or its linkages. TreeBins |
470 |
|
* include a simple read-write lock mechanism parasitic on the |
471 |
|
* main bin-synchronization strategy: Structural adjustments |
488 |
|
* unused "Segment" class that is instantiated in minimal form |
489 |
|
* only when serializing. |
490 |
|
* |
491 |
+ |
* Also, solely for compatibility with previous versions of this |
492 |
+ |
* class, it extends AbstractMap, even though all of its methods |
493 |
+ |
* are overridden, so it is just useless baggage. |
494 |
+ |
* |
495 |
|
* This file is organized to make things a little easier to follow |
496 |
|
* while reading than they might otherwise: First the main static |
497 |
|
* declarations and utilities, then fields, then main public |
575 |
|
/* |
576 |
|
* Encodings for Node hash fields. See above for explanation. |
577 |
|
*/ |
578 |
< |
static final int MOVED = 0x8fffffff; // (-1) hash for forwarding nodes |
579 |
< |
static final int TREEBIN = 0x80000000; // hash for heads of treea |
580 |
< |
static final int RESERVED = 0x80000001; // hash for transient reservations |
578 |
> |
static final int MOVED = -1; // hash for forwarding nodes |
579 |
> |
static final int TREEBIN = -2; // hash for roots of trees |
580 |
> |
static final int RESERVED = -3; // hash for transient reservations |
581 |
|
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash |
582 |
|
|
583 |
|
/** Number of CPUS, to place bounds on some sizings */ |
596 |
|
* Key-value entry. This class is never exported out as a |
597 |
|
* user-mutable Map.Entry (i.e., one supporting setValue; see |
598 |
|
* MapEntry below), but can be used for read-only traversals used |
599 |
< |
* in bulk tasks. Subclasses of Node with a negativehash field |
599 |
> |
* in bulk tasks. Subclasses of Node with a negative hash field |
600 |
|
* are special, and contain null keys and values (but are never |
601 |
|
* exported). Otherwise, keys and vals are never null. |
602 |
|
*/ |
604 |
|
final int hash; |
605 |
|
final K key; |
606 |
|
volatile V val; |
607 |
< |
Node<K,V> next; |
607 |
> |
volatile Node<K,V> next; |
608 |
|
|
609 |
|
Node(int hash, K key, V val, Node<K,V> next) { |
610 |
|
this.hash = hash; |
729 |
|
* errors by users, these checks must operate on local variables, |
730 |
|
* which accounts for some odd-looking inline assignments below. |
731 |
|
* Note that calls to setTabAt always occur within locked regions, |
732 |
< |
* and so do not need full volatile semantics, but still require |
733 |
< |
* ordering to maintain concurrent readability. |
732 |
> |
* and so in principle require only release ordering, not need |
733 |
> |
* full volatile semantics, but are currently coded as volatile |
734 |
> |
* writes to be conservative. |
735 |
|
*/ |
736 |
|
|
737 |
|
@SuppressWarnings("unchecked") |
745 |
|
} |
746 |
|
|
747 |
|
static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { |
748 |
< |
U.putOrderedObject(tab, ((long)i << ASHIFT) + ABASE, v); |
748 |
> |
U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); |
749 |
|
} |
750 |
|
|
751 |
|
/* ---------------- Fields -------------- */ |
2108 |
|
* |
2109 |
|
* @param initialCapacity The implementation performs internal |
2110 |
|
* sizing to accommodate this many elements. |
2111 |
+ |
* @return the new set |
2112 |
|
* @throws IllegalArgumentException if the initial capacity of |
2113 |
|
* elements is negative |
2105 |
– |
* @return the new set |
2114 |
|
* @since 1.8 |
2115 |
|
*/ |
2116 |
|
public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { |
2148 |
|
} |
2149 |
|
|
2150 |
|
Node<K,V> find(int h, Object k) { |
2151 |
< |
Node<K,V> e; int n; |
2152 |
< |
Node<K,V>[] tab = nextTable; |
2153 |
< |
if (k != null && tab != null && (n = tab.length) > 0 && |
2154 |
< |
(e = tabAt(tab, (n - 1) & h)) != null) { |
2155 |
< |
do { |
2151 |
> |
// loop to avoid arbitrarily deep recursion on forwarding nodes |
2152 |
> |
outer: for (Node<K,V>[] tab = nextTable;;) { |
2153 |
> |
Node<K,V> e; int n; |
2154 |
> |
if (k == null || tab == null || (n = tab.length) == 0 || |
2155 |
> |
(e = tabAt(tab, (n - 1) & h)) == null) |
2156 |
> |
return null; |
2157 |
> |
for (;;) { |
2158 |
|
int eh; K ek; |
2159 |
|
if ((eh = e.hash) == h && |
2160 |
|
((ek = e.key) == k || (ek != null && k.equals(ek)))) |
2161 |
|
return e; |
2162 |
< |
if (eh < 0) |
2163 |
< |
return e.find(h, k); |
2164 |
< |
} while ((e = e.next) != null); |
2162 |
> |
if (eh < 0) { |
2163 |
> |
if (e instanceof ForwardingNode) { |
2164 |
> |
tab = ((ForwardingNode<K,V>)e).nextTable; |
2165 |
> |
continue outer; |
2166 |
> |
} |
2167 |
> |
else |
2168 |
> |
return e.find(h, k); |
2169 |
> |
} |
2170 |
> |
if ((e = e.next) == null) |
2171 |
> |
return null; |
2172 |
> |
} |
2173 |
|
} |
2156 |
– |
return null; |
2174 |
|
} |
2175 |
|
} |
2176 |
|
|
2344 |
|
int nextn = nextTab.length; |
2345 |
|
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); |
2346 |
|
boolean advance = true; |
2347 |
+ |
boolean finishing = false; // to ensure sweep before committing nextTab |
2348 |
|
for (int i = 0, bound = 0;;) { |
2349 |
|
int nextIndex, nextBound, fh; Node<K,V> f; |
2350 |
|
while (advance) { |
2351 |
< |
if (--i >= bound) |
2351 |
> |
if (--i >= bound || finishing) |
2352 |
|
advance = false; |
2353 |
|
else if ((nextIndex = transferIndex) <= transferOrigin) { |
2354 |
|
i = -1; |
2364 |
|
} |
2365 |
|
} |
2366 |
|
if (i < 0 || i >= n || i + n >= nextn) { |
2367 |
+ |
if (finishing) { |
2368 |
+ |
nextTable = null; |
2369 |
+ |
table = nextTab; |
2370 |
+ |
sizeCtl = (n << 1) - (n >>> 1); |
2371 |
+ |
return; |
2372 |
+ |
} |
2373 |
|
for (int sc;;) { |
2374 |
|
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { |
2375 |
< |
if (sc == -1) { |
2376 |
< |
nextTable = null; |
2377 |
< |
table = nextTab; |
2378 |
< |
sizeCtl = (n << 1) - (n >>> 1); |
2379 |
< |
} |
2356 |
< |
return; |
2375 |
> |
if (sc != -1) |
2376 |
> |
return; |
2377 |
> |
finishing = advance = true; |
2378 |
> |
i = n; // recheck before commit |
2379 |
> |
break; |
2380 |
|
} |
2381 |
|
} |
2382 |
|
} |
2418 |
|
else |
2419 |
|
hn = new Node<K,V>(ph, pk, pv, hn); |
2420 |
|
} |
2421 |
+ |
setTabAt(nextTab, i, ln); |
2422 |
+ |
setTabAt(nextTab, i + n, hn); |
2423 |
+ |
setTabAt(tab, i, fwd); |
2424 |
+ |
advance = true; |
2425 |
|
} |
2426 |
|
else if (f instanceof TreeBin) { |
2427 |
|
TreeBin<K,V> t = (TreeBin<K,V>)f; |
2449 |
|
++hc; |
2450 |
|
} |
2451 |
|
} |
2452 |
< |
ln = (lc <= UNTREEIFY_THRESHOLD ? untreeify(lo) : |
2453 |
< |
(hc != 0) ? new TreeBin<K,V>(lo) : t); |
2454 |
< |
hn = (hc <= UNTREEIFY_THRESHOLD ? untreeify(hi) : |
2455 |
< |
(lc != 0) ? new TreeBin<K,V>(hi) : t); |
2452 |
> |
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : |
2453 |
> |
(hc != 0) ? new TreeBin<K,V>(lo) : t; |
2454 |
> |
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : |
2455 |
> |
(lc != 0) ? new TreeBin<K,V>(hi) : t; |
2456 |
> |
setTabAt(nextTab, i, ln); |
2457 |
> |
setTabAt(nextTab, i + n, hn); |
2458 |
> |
setTabAt(tab, i, fwd); |
2459 |
> |
advance = true; |
2460 |
|
} |
2430 |
– |
else |
2431 |
– |
ln = hn = null; |
2432 |
– |
setTabAt(nextTab, i, ln); |
2433 |
– |
setTabAt(nextTab, i + n, hn); |
2434 |
– |
setTabAt(tab, i, fwd); |
2435 |
– |
advance = true; |
2461 |
|
} |
2462 |
|
} |
2463 |
|
} |
2478 |
|
U.compareAndSwapInt(this, SIZECTL, sc, -2)) |
2479 |
|
transfer(tab, null); |
2480 |
|
} |
2481 |
< |
else if ((b = tabAt(tab, index)) != null) { |
2481 |
> |
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { |
2482 |
|
synchronized (b) { |
2483 |
|
if (tabAt(tab, index) == b) { |
2484 |
|
TreeNode<K,V> hd = null, tl = null; |
2500 |
|
} |
2501 |
|
|
2502 |
|
/** |
2503 |
< |
* Returns a list on non-TreeNodes replacing those in given list |
2503 |
> |
* Returns a list on non-TreeNodes replacing those in given list. |
2504 |
|
*/ |
2505 |
|
static <K,V> Node<K,V> untreeify(Node<K,V> b) { |
2506 |
|
Node<K,V> hd = null, tl = null; |
2553 |
|
p = pr; |
2554 |
|
else if ((pk = p.key) == k || (pk != null && k.equals(pk))) |
2555 |
|
return p; |
2556 |
< |
else if (pl == null && pr == null) |
2557 |
< |
break; |
2556 |
> |
else if (pl == null) |
2557 |
> |
p = pr; |
2558 |
> |
else if (pr == null) |
2559 |
> |
p = pl; |
2560 |
|
else if ((kc != null || |
2561 |
|
(kc = comparableClassFor(k)) != null) && |
2562 |
|
(dir = compareComparables(kc, k, pk)) != 0) |
2563 |
|
p = (dir < 0) ? pl : pr; |
2564 |
< |
else if (pl == null) |
2538 |
< |
p = pr; |
2539 |
< |
else if (pr == null || |
2540 |
< |
(q = pr.findTreeNode(h, k, kc)) == null) |
2541 |
< |
p = pl; |
2542 |
< |
else |
2564 |
> |
else if ((q = pr.findTreeNode(h, k, kc)) != null) |
2565 |
|
return q; |
2566 |
+ |
else |
2567 |
+ |
p = pl; |
2568 |
|
} while (p != null); |
2569 |
|
} |
2570 |
|
return null; |
2591 |
|
static final int READER = 4; // increment value for setting read lock |
2592 |
|
|
2593 |
|
/** |
2594 |
+ |
* Tie-breaking utility for ordering insertions when equal |
2595 |
+ |
* hashCodes and non-comparable. We don't require a total |
2596 |
+ |
* order, just a consistent insertion rule to maintain |
2597 |
+ |
* equivalence across rebalancings. Tie-breaking further than |
2598 |
+ |
* necessary simplifies testing a bit. |
2599 |
+ |
*/ |
2600 |
+ |
static int tieBreakOrder(Object a, Object b) { |
2601 |
+ |
int d; |
2602 |
+ |
if (a == null || b == null || |
2603 |
+ |
(d = a.getClass().getName(). |
2604 |
+ |
compareTo(b.getClass().getName())) == 0) |
2605 |
+ |
d = (System.identityHashCode(a) <= System.identityHashCode(b) ? |
2606 |
+ |
-1 : 1); |
2607 |
+ |
return d; |
2608 |
+ |
} |
2609 |
+ |
|
2610 |
+ |
/** |
2611 |
|
* Creates bin with initial set of nodes headed by b. |
2612 |
|
*/ |
2613 |
|
TreeBin(TreeNode<K,V> b) { |
2623 |
|
r = x; |
2624 |
|
} |
2625 |
|
else { |
2626 |
< |
Object key = x.key; |
2627 |
< |
int hash = x.hash; |
2626 |
> |
K k = x.key; |
2627 |
> |
int h = x.hash; |
2628 |
|
Class<?> kc = null; |
2629 |
|
for (TreeNode<K,V> p = r;;) { |
2630 |
|
int dir, ph; |
2631 |
< |
if ((ph = p.hash) > hash) |
2631 |
> |
K pk = p.key; |
2632 |
> |
if ((ph = p.hash) > h) |
2633 |
|
dir = -1; |
2634 |
< |
else if (ph < hash) |
2634 |
> |
else if (ph < h) |
2635 |
|
dir = 1; |
2636 |
< |
else if ((kc != null || |
2637 |
< |
(kc = comparableClassFor(key)) != null)) |
2638 |
< |
dir = compareComparables(kc, key, p.key); |
2639 |
< |
else |
2640 |
< |
dir = 0; |
2599 |
< |
TreeNode<K,V> xp = p; |
2636 |
> |
else if ((kc == null && |
2637 |
> |
(kc = comparableClassFor(k)) == null) || |
2638 |
> |
(dir = compareComparables(kc, k, pk)) == 0) |
2639 |
> |
dir = tieBreakOrder(k, pk); |
2640 |
> |
TreeNode<K,V> xp = p; |
2641 |
|
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
2642 |
|
x.parent = xp; |
2643 |
|
if (dir <= 0) |
2651 |
|
} |
2652 |
|
} |
2653 |
|
this.root = r; |
2654 |
+ |
assert checkInvariants(root); |
2655 |
|
} |
2656 |
|
|
2657 |
|
/** |
2658 |
< |
* Acquires write lock for tree restructuring |
2658 |
> |
* Acquires write lock for tree restructuring. |
2659 |
|
*/ |
2660 |
|
private final void lockRoot() { |
2661 |
|
if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) |
2663 |
|
} |
2664 |
|
|
2665 |
|
/** |
2666 |
< |
* Releases write lock for tree restructuring |
2666 |
> |
* Releases write lock for tree restructuring. |
2667 |
|
*/ |
2668 |
|
private final void unlockRoot() { |
2669 |
|
lockState = 0; |
2670 |
|
} |
2671 |
|
|
2672 |
|
/** |
2673 |
< |
* Possibly blocks awaiting root lock |
2673 |
> |
* Possibly blocks awaiting root lock. |
2674 |
|
*/ |
2675 |
|
private final void contendedLock() { |
2676 |
|
boolean waiting = false; |
2695 |
|
|
2696 |
|
/** |
2697 |
|
* Returns matching node or null if none. Tries to search |
2698 |
< |
* using tree compareisons from root, but continues linear |
2698 |
> |
* using tree comparisons from root, but continues linear |
2699 |
|
* search when lock not available. |
2700 |
|
*/ |
2701 |
< |
final Node<K,V> find(int h, Object k) { |
2701 |
> |
final Node<K,V> find(int h, Object k) { |
2702 |
|
if (k != null) { |
2703 |
|
for (Node<K,V> e = first; e != null; e = e.next) { |
2704 |
|
int s; K ek; |
2735 |
|
*/ |
2736 |
|
final TreeNode<K,V> putTreeVal(int h, K k, V v) { |
2737 |
|
Class<?> kc = null; |
2738 |
+ |
boolean searched = false; |
2739 |
|
for (TreeNode<K,V> p = root;;) { |
2740 |
< |
int dir, ph; K pk; TreeNode<K,V> q, pr; |
2740 |
> |
int dir, ph; K pk; |
2741 |
|
if (p == null) { |
2742 |
|
first = root = new TreeNode<K,V>(h, k, v, null, null); |
2743 |
|
break; |
2751 |
|
else if ((kc == null && |
2752 |
|
(kc = comparableClassFor(k)) == null) || |
2753 |
|
(dir = compareComparables(kc, k, pk)) == 0) { |
2754 |
< |
if (p.left == null) |
2755 |
< |
dir = 1; |
2756 |
< |
else if ((pr = p.right) == null || |
2757 |
< |
(q = pr.findTreeNode(h, k, kc)) == null) |
2758 |
< |
dir = -1; |
2759 |
< |
else |
2760 |
< |
return q; |
2754 |
> |
if (!searched) { |
2755 |
> |
TreeNode<K,V> q, ch; |
2756 |
> |
searched = true; |
2757 |
> |
if (((ch = p.left) != null && |
2758 |
> |
(q = ch.findTreeNode(h, k, kc)) != null) || |
2759 |
> |
((ch = p.right) != null && |
2760 |
> |
(q = ch.findTreeNode(h, k, kc)) != null)) |
2761 |
> |
return q; |
2762 |
> |
} |
2763 |
> |
dir = tieBreakOrder(k, pk); |
2764 |
|
} |
2765 |
+ |
|
2766 |
|
TreeNode<K,V> xp = p; |
2767 |
< |
if ((p = (dir < 0) ? p.left : p.right) == null) { |
2767 |
> |
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
2768 |
|
TreeNode<K,V> x, f = first; |
2769 |
|
first = x = new TreeNode<K,V>(h, k, v, f, xp); |
2770 |
|
if (f != null) |
2771 |
|
f.prev = x; |
2772 |
< |
if (dir < 0) |
2772 |
> |
if (dir <= 0) |
2773 |
|
xp.left = x; |
2774 |
|
else |
2775 |
|
xp.right = x; |
2798 |
|
* that are accessible independently of lock. So instead we |
2799 |
|
* swap the tree linkages. |
2800 |
|
* |
2801 |
< |
* @return true if now too small so should be untreeified. |
2801 |
> |
* @return true if now too small, so should be untreeified |
2802 |
|
*/ |
2803 |
|
final boolean removeTreeNode(TreeNode<K,V> p) { |
2804 |
|
TreeNode<K,V> next = (TreeNode<K,V>)p.next; |
3193 |
|
|
3194 |
|
/** |
3195 |
|
* Base of key, value, and entry Iterators. Adds fields to |
3196 |
< |
* Traverser to support iterator.remove |
3196 |
> |
* Traverser to support iterator.remove. |
3197 |
|
*/ |
3198 |
|
static class BaseIterator<K,V> extends Traverser<K,V> { |
3199 |
|
final ConcurrentHashMapV8<K,V> map; |
3545 |
|
* of all (key, value) pairs |
3546 |
|
* @since 1.8 |
3547 |
|
*/ |
3548 |
< |
public double reduceToDoubleIn(long parallelismThreshold, |
3549 |
< |
ObjectByObjectToDouble<? super K, ? super V> transformer, |
3550 |
< |
double basis, |
3551 |
< |
DoubleByDoubleToDouble reducer) { |
3548 |
> |
public double reduceToDouble(long parallelismThreshold, |
3549 |
> |
ObjectByObjectToDouble<? super K, ? super V> transformer, |
3550 |
> |
double basis, |
3551 |
> |
DoubleByDoubleToDouble reducer) { |
3552 |
|
if (transformer == null || reducer == null) |
3553 |
|
throw new NullPointerException(); |
3554 |
|
return new MapReduceMappingsToDoubleTask<K,V> |
6033 |
|
} |
6034 |
|
|
6035 |
|
/** |
6036 |
< |
* Generates initial value for per-thread CounterHashCodes |
6036 |
> |
* Generates initial value for per-thread CounterHashCodes. |
6037 |
|
*/ |
6038 |
|
static final AtomicInteger counterHashCodeGenerator = new AtomicInteger(); |
6039 |
|
|