12 |
|
import java.io.Serializable; |
13 |
|
import java.lang.reflect.ParameterizedType; |
14 |
|
import java.lang.reflect.Type; |
15 |
+ |
import java.util.AbstractMap; |
16 |
|
import java.util.Arrays; |
17 |
|
import java.util.Collection; |
18 |
|
import java.util.Comparator; |
219 |
|
* @param <K> the type of keys maintained by this map |
220 |
|
* @param <V> the type of mapped values |
221 |
|
*/ |
222 |
< |
public class ConcurrentHashMapV8<K,V> |
222 |
> |
public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V> |
223 |
|
implements ConcurrentMap<K,V>, Serializable { |
224 |
|
private static final long serialVersionUID = 7249069246763182397L; |
225 |
|
|
300 |
|
* because they have negative hash fields and null key and value |
301 |
|
* fields. (These special nodes are either uncommon or transient, |
302 |
|
* so the impact of carrying around some unused fields is |
303 |
< |
* insignficant.) |
303 |
> |
* insignificant.) |
304 |
|
* |
305 |
|
* The table is lazily initialized to a power-of-two size upon the |
306 |
|
* first insertion. Each bin in the table normally contains a |
447 |
|
* related operations (which is the main reason we cannot use |
448 |
|
* existing collections such as TreeMaps). TreeBins contain |
449 |
|
* Comparable elements, but may contain others, as well as |
450 |
< |
* elements that are Comparable but not necessarily Comparable |
451 |
< |
* for the same T, so we cannot invoke compareTo among them. To |
452 |
< |
* handle this, the tree is ordered primarily by hash value, then |
453 |
< |
* by Comparable.compareTo order if applicable. On lookup at a |
454 |
< |
* node, if elements are not comparable or compare as 0 then both |
455 |
< |
* left and right children may need to be searched in the case of |
456 |
< |
* tied hash values. (This corresponds to the full list search |
457 |
< |
* that would be necessary if all elements were non-Comparable and |
458 |
< |
* had tied hashes.) The red-black balancing code is updated from |
459 |
< |
* pre-jdk-collections |
450 |
> |
* elements that are Comparable but not necessarily Comparable for |
451 |
> |
* the same T, so we cannot invoke compareTo among them. To handle |
452 |
> |
* this, the tree is ordered primarily by hash value, then by |
453 |
> |
* Comparable.compareTo order if applicable. On lookup at a node, |
454 |
> |
* if elements are not comparable or compare as 0 then both left |
455 |
> |
* and right children may need to be searched in the case of tied |
456 |
> |
* hash values. (This corresponds to the full list search that |
457 |
> |
* would be necessary if all elements were non-Comparable and had |
458 |
> |
* tied hashes.) On insertion, to keep a total ordering (or as |
459 |
> |
* close as is required here) across rebalancings, we compare |
460 |
> |
* classes and identityHashCodes as tie-breakers. The red-black |
461 |
> |
* balancing code is updated from pre-jdk-collections |
462 |
|
* (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) |
463 |
|
* based in turn on Cormen, Leiserson, and Rivest "Introduction to |
464 |
|
* Algorithms" (CLR). |
465 |
|
* |
466 |
|
* TreeBins also require an additional locking mechanism. While |
467 |
|
* list traversal is always possible by readers even during |
468 |
< |
* updates, tree traversal is not, mainly beause of tree-rotations |
468 |
> |
* updates, tree traversal is not, mainly because of tree-rotations |
469 |
|
* that may change the root node and/or its linkages. TreeBins |
470 |
|
* include a simple read-write lock mechanism parasitic on the |
471 |
|
* main bin-synchronization strategy: Structural adjustments |
488 |
|
* unused "Segment" class that is instantiated in minimal form |
489 |
|
* only when serializing. |
490 |
|
* |
491 |
+ |
* Also, solely for compatibility with previous versions of this |
492 |
+ |
* class, it extends AbstractMap, even though all of its methods |
493 |
+ |
* are overridden, so it is just useless baggage. |
494 |
+ |
* |
495 |
|
* This file is organized to make things a little easier to follow |
496 |
|
* while reading than they might otherwise: First the main static |
497 |
|
* declarations and utilities, then fields, then main public |
575 |
|
/* |
576 |
|
* Encodings for Node hash fields. See above for explanation. |
577 |
|
*/ |
578 |
< |
static final int MOVED = 0x8fffffff; // (-1) hash for forwarding nodes |
579 |
< |
static final int TREEBIN = 0x80000000; // hash for heads of treea |
580 |
< |
static final int RESERVED = 0x80000001; // hash for transient reservations |
578 |
> |
static final int MOVED = -1; // hash for forwarding nodes |
579 |
> |
static final int TREEBIN = -2; // hash for roots of trees |
580 |
> |
static final int RESERVED = -3; // hash for transient reservations |
581 |
|
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash |
582 |
|
|
583 |
|
/** Number of CPUS, to place bounds on some sizings */ |
596 |
|
* Key-value entry. This class is never exported out as a |
597 |
|
* user-mutable Map.Entry (i.e., one supporting setValue; see |
598 |
|
* MapEntry below), but can be used for read-only traversals used |
599 |
< |
* in bulk tasks. Subclasses of Node with a negativehash field |
599 |
> |
* in bulk tasks. Subclasses of Node with a negative hash field |
600 |
|
* are special, and contain null keys and values (but are never |
601 |
|
* exported). Otherwise, keys and vals are never null. |
602 |
|
*/ |
604 |
|
final int hash; |
605 |
|
final K key; |
606 |
|
volatile V val; |
607 |
< |
Node<K,V> next; |
607 |
> |
volatile Node<K,V> next; |
608 |
|
|
609 |
|
Node(int hash, K key, V val, Node<K,V> next) { |
610 |
|
this.hash = hash; |
729 |
|
* errors by users, these checks must operate on local variables, |
730 |
|
* which accounts for some odd-looking inline assignments below. |
731 |
|
* Note that calls to setTabAt always occur within locked regions, |
732 |
< |
* and so do not need full volatile semantics, but still require |
733 |
< |
* ordering to maintain concurrent readability. |
732 |
> |
* and so in principle require only release ordering, not need |
733 |
> |
* full volatile semantics, but are currently coded as volatile |
734 |
> |
* writes to be conservative. |
735 |
|
*/ |
736 |
|
|
737 |
|
@SuppressWarnings("unchecked") |
745 |
|
} |
746 |
|
|
747 |
|
static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { |
748 |
< |
U.putOrderedObject(tab, ((long)i << ASHIFT) + ABASE, v); |
748 |
> |
U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); |
749 |
|
} |
750 |
|
|
751 |
|
/* ---------------- Fields -------------- */ |
1366 |
|
* Saves the state of the {@code ConcurrentHashMapV8} instance to a |
1367 |
|
* stream (i.e., serializes it). |
1368 |
|
* @param s the stream |
1369 |
+ |
* @throws java.io.IOException if an I/O error occurs |
1370 |
|
* @serialData |
1371 |
|
* the key (Object) and value (Object) |
1372 |
|
* for each key-value mapping, followed by a null pair. |
1409 |
|
/** |
1410 |
|
* Reconstitutes the instance from a stream (that is, deserializes it). |
1411 |
|
* @param s the stream |
1412 |
+ |
* @throws ClassNotFoundException if the class of a serialized object |
1413 |
+ |
* could not be found |
1414 |
+ |
* @throws java.io.IOException if an I/O error occurs |
1415 |
|
*/ |
1416 |
|
private void readObject(java.io.ObjectInputStream s) |
1417 |
|
throws java.io.IOException, ClassNotFoundException { |
1446 |
|
int sz = (int)size; |
1447 |
|
n = tableSizeFor(sz + (sz >>> 1) + 1); |
1448 |
|
} |
1449 |
< |
@SuppressWarnings({"rawtypes","unchecked"}) |
1450 |
< |
Node<K,V>[] tab = (Node<K,V>[])new Node[n]; |
1449 |
> |
@SuppressWarnings("unchecked") |
1450 |
> |
Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n]; |
1451 |
|
int mask = n - 1; |
1452 |
|
long added = 0L; |
1453 |
|
while (p != null) { |
2112 |
|
* |
2113 |
|
* @param initialCapacity The implementation performs internal |
2114 |
|
* sizing to accommodate this many elements. |
2115 |
+ |
* @return the new set |
2116 |
|
* @throws IllegalArgumentException if the initial capacity of |
2117 |
|
* elements is negative |
2105 |
– |
* @return the new set |
2118 |
|
* @since 1.8 |
2119 |
|
*/ |
2120 |
|
public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { |
2152 |
|
} |
2153 |
|
|
2154 |
|
Node<K,V> find(int h, Object k) { |
2155 |
< |
Node<K,V> e; int n; |
2156 |
< |
Node<K,V>[] tab = nextTable; |
2157 |
< |
if (k != null && tab != null && (n = tab.length) > 0 && |
2158 |
< |
(e = tabAt(tab, (n - 1) & h)) != null) { |
2159 |
< |
do { |
2155 |
> |
// loop to avoid arbitrarily deep recursion on forwarding nodes |
2156 |
> |
outer: for (Node<K,V>[] tab = nextTable;;) { |
2157 |
> |
Node<K,V> e; int n; |
2158 |
> |
if (k == null || tab == null || (n = tab.length) == 0 || |
2159 |
> |
(e = tabAt(tab, (n - 1) & h)) == null) |
2160 |
> |
return null; |
2161 |
> |
for (;;) { |
2162 |
|
int eh; K ek; |
2163 |
|
if ((eh = e.hash) == h && |
2164 |
|
((ek = e.key) == k || (ek != null && k.equals(ek)))) |
2165 |
|
return e; |
2166 |
< |
if (eh < 0) |
2167 |
< |
return e.find(h, k); |
2168 |
< |
} while ((e = e.next) != null); |
2166 |
> |
if (eh < 0) { |
2167 |
> |
if (e instanceof ForwardingNode) { |
2168 |
> |
tab = ((ForwardingNode<K,V>)e).nextTable; |
2169 |
> |
continue outer; |
2170 |
> |
} |
2171 |
> |
else |
2172 |
> |
return e.find(h, k); |
2173 |
> |
} |
2174 |
> |
if ((e = e.next) == null) |
2175 |
> |
return null; |
2176 |
> |
} |
2177 |
|
} |
2156 |
– |
return null; |
2178 |
|
} |
2179 |
|
} |
2180 |
|
|
2205 |
|
try { |
2206 |
|
if ((tab = table) == null || tab.length == 0) { |
2207 |
|
int n = (sc > 0) ? sc : DEFAULT_CAPACITY; |
2208 |
< |
@SuppressWarnings({"rawtypes","unchecked"}) |
2209 |
< |
Node<K,V>[] nt = (Node<K,V>[])new Node[n]; |
2208 |
> |
@SuppressWarnings("unchecked") |
2209 |
> |
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; |
2210 |
|
table = tab = nt; |
2211 |
|
sc = n - (n >>> 2); |
2212 |
|
} |
2297 |
|
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { |
2298 |
|
try { |
2299 |
|
if (table == tab) { |
2300 |
< |
@SuppressWarnings({"rawtypes","unchecked"}) |
2301 |
< |
Node<K,V>[] nt = (Node<K,V>[])new Node[n]; |
2300 |
> |
@SuppressWarnings("unchecked") |
2301 |
> |
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; |
2302 |
|
table = nt; |
2303 |
|
sc = n - (n >>> 2); |
2304 |
|
} |
2325 |
|
stride = MIN_TRANSFER_STRIDE; // subdivide range |
2326 |
|
if (nextTab == null) { // initiating |
2327 |
|
try { |
2328 |
< |
@SuppressWarnings({"rawtypes","unchecked"}) |
2329 |
< |
Node<K,V>[] nt = (Node<K,V>[])new Node[n << 1]; |
2328 |
> |
@SuppressWarnings("unchecked") |
2329 |
> |
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; |
2330 |
|
nextTab = nt; |
2331 |
|
} catch (Throwable ex) { // try to cope with OOME |
2332 |
|
sizeCtl = Integer.MAX_VALUE; |
2348 |
|
int nextn = nextTab.length; |
2349 |
|
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); |
2350 |
|
boolean advance = true; |
2351 |
+ |
boolean finishing = false; // to ensure sweep before committing nextTab |
2352 |
|
for (int i = 0, bound = 0;;) { |
2353 |
|
int nextIndex, nextBound, fh; Node<K,V> f; |
2354 |
|
while (advance) { |
2355 |
< |
if (--i >= bound) |
2355 |
> |
if (--i >= bound || finishing) |
2356 |
|
advance = false; |
2357 |
|
else if ((nextIndex = transferIndex) <= transferOrigin) { |
2358 |
|
i = -1; |
2368 |
|
} |
2369 |
|
} |
2370 |
|
if (i < 0 || i >= n || i + n >= nextn) { |
2371 |
+ |
if (finishing) { |
2372 |
+ |
nextTable = null; |
2373 |
+ |
table = nextTab; |
2374 |
+ |
sizeCtl = (n << 1) - (n >>> 1); |
2375 |
+ |
return; |
2376 |
+ |
} |
2377 |
|
for (int sc;;) { |
2378 |
|
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { |
2379 |
< |
if (sc == -1) { |
2380 |
< |
nextTable = null; |
2381 |
< |
table = nextTab; |
2382 |
< |
sizeCtl = (n << 1) - (n >>> 1); |
2383 |
< |
} |
2356 |
< |
return; |
2379 |
> |
if (sc != -1) |
2380 |
> |
return; |
2381 |
> |
finishing = advance = true; |
2382 |
> |
i = n; // recheck before commit |
2383 |
> |
break; |
2384 |
|
} |
2385 |
|
} |
2386 |
|
} |
2422 |
|
else |
2423 |
|
hn = new Node<K,V>(ph, pk, pv, hn); |
2424 |
|
} |
2425 |
+ |
setTabAt(nextTab, i, ln); |
2426 |
+ |
setTabAt(nextTab, i + n, hn); |
2427 |
+ |
setTabAt(tab, i, fwd); |
2428 |
+ |
advance = true; |
2429 |
|
} |
2430 |
|
else if (f instanceof TreeBin) { |
2431 |
|
TreeBin<K,V> t = (TreeBin<K,V>)f; |
2453 |
|
++hc; |
2454 |
|
} |
2455 |
|
} |
2456 |
< |
ln = (lc <= UNTREEIFY_THRESHOLD ? untreeify(lo) : |
2457 |
< |
(hc != 0) ? new TreeBin<K,V>(lo) : t); |
2458 |
< |
hn = (hc <= UNTREEIFY_THRESHOLD ? untreeify(hi) : |
2459 |
< |
(lc != 0) ? new TreeBin<K,V>(hi) : t); |
2456 |
> |
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : |
2457 |
> |
(hc != 0) ? new TreeBin<K,V>(lo) : t; |
2458 |
> |
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : |
2459 |
> |
(lc != 0) ? new TreeBin<K,V>(hi) : t; |
2460 |
> |
setTabAt(nextTab, i, ln); |
2461 |
> |
setTabAt(nextTab, i + n, hn); |
2462 |
> |
setTabAt(tab, i, fwd); |
2463 |
> |
advance = true; |
2464 |
|
} |
2430 |
– |
else |
2431 |
– |
ln = hn = null; |
2432 |
– |
setTabAt(nextTab, i, ln); |
2433 |
– |
setTabAt(nextTab, i + n, hn); |
2434 |
– |
setTabAt(tab, i, fwd); |
2435 |
– |
advance = true; |
2465 |
|
} |
2466 |
|
} |
2467 |
|
} |
2482 |
|
U.compareAndSwapInt(this, SIZECTL, sc, -2)) |
2483 |
|
transfer(tab, null); |
2484 |
|
} |
2485 |
< |
else if ((b = tabAt(tab, index)) != null) { |
2485 |
> |
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { |
2486 |
|
synchronized (b) { |
2487 |
|
if (tabAt(tab, index) == b) { |
2488 |
|
TreeNode<K,V> hd = null, tl = null; |
2504 |
|
} |
2505 |
|
|
2506 |
|
/** |
2507 |
< |
* Returns a list on non-TreeNodes replacing those in given list |
2507 |
> |
* Returns a list on non-TreeNodes replacing those in given list. |
2508 |
|
*/ |
2509 |
|
static <K,V> Node<K,V> untreeify(Node<K,V> b) { |
2510 |
|
Node<K,V> hd = null, tl = null; |
2557 |
|
p = pr; |
2558 |
|
else if ((pk = p.key) == k || (pk != null && k.equals(pk))) |
2559 |
|
return p; |
2560 |
< |
else if (pl == null && pr == null) |
2561 |
< |
break; |
2560 |
> |
else if (pl == null) |
2561 |
> |
p = pr; |
2562 |
> |
else if (pr == null) |
2563 |
> |
p = pl; |
2564 |
|
else if ((kc != null || |
2565 |
|
(kc = comparableClassFor(k)) != null) && |
2566 |
|
(dir = compareComparables(kc, k, pk)) != 0) |
2567 |
|
p = (dir < 0) ? pl : pr; |
2568 |
< |
else if (pl == null) |
2538 |
< |
p = pr; |
2539 |
< |
else if (pr == null || |
2540 |
< |
(q = pr.findTreeNode(h, k, kc)) == null) |
2541 |
< |
p = pl; |
2542 |
< |
else |
2568 |
> |
else if ((q = pr.findTreeNode(h, k, kc)) != null) |
2569 |
|
return q; |
2570 |
+ |
else |
2571 |
+ |
p = pl; |
2572 |
|
} while (p != null); |
2573 |
|
} |
2574 |
|
return null; |
2595 |
|
static final int READER = 4; // increment value for setting read lock |
2596 |
|
|
2597 |
|
/** |
2598 |
+ |
* Tie-breaking utility for ordering insertions when equal |
2599 |
+ |
* hashCodes and non-comparable. We don't require a total |
2600 |
+ |
* order, just a consistent insertion rule to maintain |
2601 |
+ |
* equivalence across rebalancings. Tie-breaking further than |
2602 |
+ |
* necessary simplifies testing a bit. |
2603 |
+ |
*/ |
2604 |
+ |
static int tieBreakOrder(Object a, Object b) { |
2605 |
+ |
int d; |
2606 |
+ |
if (a == null || b == null || |
2607 |
+ |
(d = a.getClass().getName(). |
2608 |
+ |
compareTo(b.getClass().getName())) == 0) |
2609 |
+ |
d = (System.identityHashCode(a) <= System.identityHashCode(b) ? |
2610 |
+ |
-1 : 1); |
2611 |
+ |
return d; |
2612 |
+ |
} |
2613 |
+ |
|
2614 |
+ |
/** |
2615 |
|
* Creates bin with initial set of nodes headed by b. |
2616 |
|
*/ |
2617 |
|
TreeBin(TreeNode<K,V> b) { |
2627 |
|
r = x; |
2628 |
|
} |
2629 |
|
else { |
2630 |
< |
Object key = x.key; |
2631 |
< |
int hash = x.hash; |
2630 |
> |
K k = x.key; |
2631 |
> |
int h = x.hash; |
2632 |
|
Class<?> kc = null; |
2633 |
|
for (TreeNode<K,V> p = r;;) { |
2634 |
|
int dir, ph; |
2635 |
< |
if ((ph = p.hash) > hash) |
2635 |
> |
K pk = p.key; |
2636 |
> |
if ((ph = p.hash) > h) |
2637 |
|
dir = -1; |
2638 |
< |
else if (ph < hash) |
2638 |
> |
else if (ph < h) |
2639 |
|
dir = 1; |
2640 |
< |
else if ((kc != null || |
2641 |
< |
(kc = comparableClassFor(key)) != null)) |
2642 |
< |
dir = compareComparables(kc, key, p.key); |
2643 |
< |
else |
2644 |
< |
dir = 0; |
2599 |
< |
TreeNode<K,V> xp = p; |
2640 |
> |
else if ((kc == null && |
2641 |
> |
(kc = comparableClassFor(k)) == null) || |
2642 |
> |
(dir = compareComparables(kc, k, pk)) == 0) |
2643 |
> |
dir = tieBreakOrder(k, pk); |
2644 |
> |
TreeNode<K,V> xp = p; |
2645 |
|
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
2646 |
|
x.parent = xp; |
2647 |
|
if (dir <= 0) |
2655 |
|
} |
2656 |
|
} |
2657 |
|
this.root = r; |
2658 |
+ |
assert checkInvariants(root); |
2659 |
|
} |
2660 |
|
|
2661 |
|
/** |
2662 |
< |
* Acquires write lock for tree restructuring |
2662 |
> |
* Acquires write lock for tree restructuring. |
2663 |
|
*/ |
2664 |
|
private final void lockRoot() { |
2665 |
|
if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) |
2667 |
|
} |
2668 |
|
|
2669 |
|
/** |
2670 |
< |
* Releases write lock for tree restructuring |
2670 |
> |
* Releases write lock for tree restructuring. |
2671 |
|
*/ |
2672 |
|
private final void unlockRoot() { |
2673 |
|
lockState = 0; |
2674 |
|
} |
2675 |
|
|
2676 |
|
/** |
2677 |
< |
* Possibly blocks awaiting root lock |
2677 |
> |
* Possibly blocks awaiting root lock. |
2678 |
|
*/ |
2679 |
|
private final void contendedLock() { |
2680 |
|
boolean waiting = false; |
2686 |
|
return; |
2687 |
|
} |
2688 |
|
} |
2689 |
< |
else if ((s | WAITER) == 0) { |
2689 |
> |
else if ((s & WAITER) == 0) { |
2690 |
|
if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { |
2691 |
|
waiting = true; |
2692 |
|
waiter = Thread.currentThread(); |
2699 |
|
|
2700 |
|
/** |
2701 |
|
* Returns matching node or null if none. Tries to search |
2702 |
< |
* using tree compareisons from root, but continues linear |
2702 |
> |
* using tree comparisons from root, but continues linear |
2703 |
|
* search when lock not available. |
2704 |
|
*/ |
2705 |
< |
final Node<K,V> find(int h, Object k) { |
2705 |
> |
final Node<K,V> find(int h, Object k) { |
2706 |
|
if (k != null) { |
2707 |
|
for (Node<K,V> e = first; e != null; e = e.next) { |
2708 |
|
int s; K ek; |
2739 |
|
*/ |
2740 |
|
final TreeNode<K,V> putTreeVal(int h, K k, V v) { |
2741 |
|
Class<?> kc = null; |
2742 |
+ |
boolean searched = false; |
2743 |
|
for (TreeNode<K,V> p = root;;) { |
2744 |
< |
int dir, ph; K pk; TreeNode<K,V> q, pr; |
2744 |
> |
int dir, ph; K pk; |
2745 |
|
if (p == null) { |
2746 |
|
first = root = new TreeNode<K,V>(h, k, v, null, null); |
2747 |
|
break; |
2755 |
|
else if ((kc == null && |
2756 |
|
(kc = comparableClassFor(k)) == null) || |
2757 |
|
(dir = compareComparables(kc, k, pk)) == 0) { |
2758 |
< |
if (p.left == null) |
2759 |
< |
dir = 1; |
2760 |
< |
else if ((pr = p.right) == null || |
2761 |
< |
(q = pr.findTreeNode(h, k, kc)) == null) |
2762 |
< |
dir = -1; |
2763 |
< |
else |
2764 |
< |
return q; |
2758 |
> |
if (!searched) { |
2759 |
> |
TreeNode<K,V> q, ch; |
2760 |
> |
searched = true; |
2761 |
> |
if (((ch = p.left) != null && |
2762 |
> |
(q = ch.findTreeNode(h, k, kc)) != null) || |
2763 |
> |
((ch = p.right) != null && |
2764 |
> |
(q = ch.findTreeNode(h, k, kc)) != null)) |
2765 |
> |
return q; |
2766 |
> |
} |
2767 |
> |
dir = tieBreakOrder(k, pk); |
2768 |
|
} |
2769 |
+ |
|
2770 |
|
TreeNode<K,V> xp = p; |
2771 |
< |
if ((p = (dir < 0) ? p.left : p.right) == null) { |
2771 |
> |
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
2772 |
|
TreeNode<K,V> x, f = first; |
2773 |
|
first = x = new TreeNode<K,V>(h, k, v, f, xp); |
2774 |
|
if (f != null) |
2775 |
|
f.prev = x; |
2776 |
< |
if (dir < 0) |
2776 |
> |
if (dir <= 0) |
2777 |
|
xp.left = x; |
2778 |
|
else |
2779 |
|
xp.right = x; |
2802 |
|
* that are accessible independently of lock. So instead we |
2803 |
|
* swap the tree linkages. |
2804 |
|
* |
2805 |
< |
* @return true if now too small so should be untreeified. |
2805 |
> |
* @return true if now too small, so should be untreeified |
2806 |
|
*/ |
2807 |
|
final boolean removeTreeNode(TreeNode<K,V> p) { |
2808 |
|
TreeNode<K,V> next = (TreeNode<K,V>)p.next; |
3197 |
|
|
3198 |
|
/** |
3199 |
|
* Base of key, value, and entry Iterators. Adds fields to |
3200 |
< |
* Traverser to support iterator.remove |
3200 |
> |
* Traverser to support iterator.remove. |
3201 |
|
*/ |
3202 |
|
static class BaseIterator<K,V> extends Traverser<K,V> { |
3203 |
|
final ConcurrentHashMapV8<K,V> map; |
3549 |
|
* of all (key, value) pairs |
3550 |
|
* @since 1.8 |
3551 |
|
*/ |
3552 |
< |
public double reduceToDoubleIn(long parallelismThreshold, |
3553 |
< |
ObjectByObjectToDouble<? super K, ? super V> transformer, |
3554 |
< |
double basis, |
3555 |
< |
DoubleByDoubleToDouble reducer) { |
3552 |
> |
public double reduceToDouble(long parallelismThreshold, |
3553 |
> |
ObjectByObjectToDouble<? super K, ? super V> transformer, |
3554 |
> |
double basis, |
3555 |
> |
DoubleByDoubleToDouble reducer) { |
3556 |
|
if (transformer == null || reducer == null) |
3557 |
|
throw new NullPointerException(); |
3558 |
|
return new MapReduceMappingsToDoubleTask<K,V> |
6037 |
|
} |
6038 |
|
|
6039 |
|
/** |
6040 |
< |
* Generates initial value for per-thread CounterHashCodes |
6040 |
> |
* Generates initial value for per-thread CounterHashCodes. |
6041 |
|
*/ |
6042 |
|
static final AtomicInteger counterHashCodeGenerator = new AtomicInteger(); |
6043 |
|
|