14 |
|
import java.util.Arrays; |
15 |
|
import java.util.Collection; |
16 |
|
import java.util.Comparator; |
17 |
– |
import java.util.ConcurrentModificationException; |
17 |
|
import java.util.Enumeration; |
18 |
|
import java.util.HashMap; |
19 |
|
import java.util.Hashtable; |
64 |
|
* that key reporting the updated value.) For aggregate operations |
65 |
|
* such as {@code putAll} and {@code clear}, concurrent retrievals may |
66 |
|
* reflect insertion or removal of only some entries. Similarly, |
67 |
< |
* Iterators and Enumerations return elements reflecting the state of |
68 |
< |
* the hash table at some point at or since the creation of the |
67 |
> |
* Iterators, Spliterators and Enumerations return elements reflecting the |
68 |
> |
* state of the hash table at some point at or since the creation of the |
69 |
|
* iterator/enumeration. They do <em>not</em> throw {@link |
70 |
< |
* ConcurrentModificationException}. However, iterators are designed |
71 |
< |
* to be used by only one thread at a time. Bear in mind that the |
72 |
< |
* results of aggregate status methods including {@code size}, {@code |
73 |
< |
* isEmpty}, and {@code containsValue} are typically useful only when |
74 |
< |
* a map is not undergoing concurrent updates in other threads. |
70 |
> |
* java.util.ConcurrentModificationException ConcurrentModificationException}. |
71 |
> |
* However, iterators are designed to be used by only one thread at a time. |
72 |
> |
* Bear in mind that the results of aggregate status methods including |
73 |
> |
* {@code size}, {@code isEmpty}, and {@code containsValue} are typically |
74 |
> |
* useful only when a map is not undergoing concurrent updates in other threads. |
75 |
|
* Otherwise the results of these methods reflect transient states |
76 |
|
* that may be adequate for monitoring or estimation purposes, but not |
77 |
|
* for program control. |
235 |
|
* @param <K> the type of keys maintained by this map |
236 |
|
* @param <V> the type of mapped values |
237 |
|
*/ |
238 |
< |
public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> implements ConcurrentMap<K,V>, Serializable { |
238 |
> |
public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> |
239 |
> |
implements ConcurrentMap<K,V>, Serializable { |
240 |
|
private static final long serialVersionUID = 7249069246763182397L; |
241 |
|
|
242 |
|
/* |
410 |
|
* related operations (which is the main reason we cannot use |
411 |
|
* existing collections such as TreeMaps). TreeBins contain |
412 |
|
* Comparable elements, but may contain others, as well as |
413 |
< |
* elements that are Comparable but not necessarily Comparable |
414 |
< |
* for the same T, so we cannot invoke compareTo among them. To |
415 |
< |
* handle this, the tree is ordered primarily by hash value, then |
416 |
< |
* by Comparable.compareTo order if applicable. On lookup at a |
417 |
< |
* node, if elements are not comparable or compare as 0 then both |
418 |
< |
* left and right children may need to be searched in the case of |
419 |
< |
* tied hash values. (This corresponds to the full list search |
420 |
< |
* that would be necessary if all elements were non-Comparable and |
421 |
< |
* had tied hashes.) The red-black balancing code is updated from |
422 |
< |
* pre-jdk-collections |
413 |
> |
* elements that are Comparable but not necessarily Comparable for |
414 |
> |
* the same T, so we cannot invoke compareTo among them. To handle |
415 |
> |
* this, the tree is ordered primarily by hash value, then by |
416 |
> |
* Comparable.compareTo order if applicable. On lookup at a node, |
417 |
> |
* if elements are not comparable or compare as 0 then both left |
418 |
> |
* and right children may need to be searched in the case of tied |
419 |
> |
* hash values. (This corresponds to the full list search that |
420 |
> |
* would be necessary if all elements were non-Comparable and had |
421 |
> |
* tied hashes.) On insertion, to keep a total ordering (or as |
422 |
> |
* close as is required here) across rebalancings, we compare |
423 |
> |
* classes and identityHashCodes as tie-breakers. The red-black |
424 |
> |
* balancing code is updated from pre-jdk-collections |
425 |
|
* (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) |
426 |
|
* based in turn on Cormen, Leiserson, and Rivest "Introduction to |
427 |
|
* Algorithms" (CLR). |
451 |
|
* unused "Segment" class that is instantiated in minimal form |
452 |
|
* only when serializing. |
453 |
|
* |
454 |
+ |
* Also, solely for compatibility with previous versions of this |
455 |
+ |
* class, it extends AbstractMap, even though all of its methods |
456 |
+ |
* are overridden, so it is just useless baggage. |
457 |
+ |
* |
458 |
|
* This file is organized to make things a little easier to follow |
459 |
|
* while reading than they might otherwise: First the main static |
460 |
|
* declarations and utilities, then fields, then main public |
1170 |
|
* operations. It does not support the {@code add} or |
1171 |
|
* {@code addAll} operations. |
1172 |
|
* |
1173 |
< |
* <p>The view's {@code iterator} is a "weakly consistent" iterator |
1174 |
< |
* that will never throw {@link ConcurrentModificationException}, |
1175 |
< |
* and guarantees to traverse elements as they existed upon |
1176 |
< |
* construction of the iterator, and may (but is not guaranteed to) |
1177 |
< |
* reflect any modifications subsequent to construction. |
1173 |
> |
* <p>The view's iterators and spliterators are |
1174 |
> |
* <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. |
1175 |
> |
* |
1176 |
> |
* <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, |
1177 |
> |
* {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. |
1178 |
|
* |
1179 |
|
* @return the set view |
1180 |
|
*/ |
1193 |
|
* {@code retainAll}, and {@code clear} operations. It does not |
1194 |
|
* support the {@code add} or {@code addAll} operations. |
1195 |
|
* |
1196 |
< |
* <p>The view's {@code iterator} is a "weakly consistent" iterator |
1197 |
< |
* that will never throw {@link ConcurrentModificationException}, |
1198 |
< |
* and guarantees to traverse elements as they existed upon |
1199 |
< |
* construction of the iterator, and may (but is not guaranteed to) |
1200 |
< |
* reflect any modifications subsequent to construction. |
1196 |
> |
* <p>The view's iterators and spliterators are |
1197 |
> |
* <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. |
1198 |
> |
* |
1199 |
> |
* <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT} |
1200 |
> |
* and {@link Spliterator#NONNULL}. |
1201 |
|
* |
1202 |
|
* @return the collection view |
1203 |
|
*/ |
1215 |
|
* {@code removeAll}, {@code retainAll}, and {@code clear} |
1216 |
|
* operations. |
1217 |
|
* |
1218 |
< |
* <p>The view's {@code iterator} is a "weakly consistent" iterator |
1219 |
< |
* that will never throw {@link ConcurrentModificationException}, |
1220 |
< |
* and guarantees to traverse elements as they existed upon |
1221 |
< |
* construction of the iterator, and may (but is not guaranteed to) |
1222 |
< |
* reflect any modifications subsequent to construction. |
1218 |
> |
* <p>The view's iterators and spliterators are |
1219 |
> |
* <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. |
1220 |
> |
* |
1221 |
> |
* <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, |
1222 |
> |
* {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. |
1223 |
|
* |
1224 |
|
* @return the set view |
1225 |
|
*/ |
2077 |
|
* @param initialCapacity The implementation performs internal |
2078 |
|
* sizing to accommodate this many elements. |
2079 |
|
* @param <K> the element type of the returned set |
2080 |
+ |
* @return the new set |
2081 |
|
* @throws IllegalArgumentException if the initial capacity of |
2082 |
|
* elements is negative |
2076 |
– |
* @return the new set |
2083 |
|
* @since 1.8 |
2084 |
|
*/ |
2085 |
|
public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { |
2626 |
|
p = pr; |
2627 |
|
else if ((pk = p.key) == k || (pk != null && k.equals(pk))) |
2628 |
|
return p; |
2629 |
< |
else if (pl == null && pr == null) |
2630 |
< |
break; |
2629 |
> |
else if (pl == null) |
2630 |
> |
p = pr; |
2631 |
> |
else if (pr == null) |
2632 |
> |
p = pl; |
2633 |
|
else if ((kc != null || |
2634 |
|
(kc = comparableClassFor(k)) != null) && |
2635 |
|
(dir = compareComparables(kc, k, pk)) != 0) |
2636 |
|
p = (dir < 0) ? pl : pr; |
2637 |
< |
else if (pl == null) |
2630 |
< |
p = pr; |
2631 |
< |
else if (pr == null || |
2632 |
< |
(q = pr.findTreeNode(h, k, kc)) == null) |
2633 |
< |
p = pl; |
2634 |
< |
else |
2637 |
> |
else if ((q = pr.findTreeNode(h, k, kc)) != null) |
2638 |
|
return q; |
2639 |
+ |
else |
2640 |
+ |
p = pl; |
2641 |
|
} while (p != null); |
2642 |
|
} |
2643 |
|
return null; |
2664 |
|
static final int READER = 4; // increment value for setting read lock |
2665 |
|
|
2666 |
|
/** |
2667 |
+ |
* Tie-breaking utility for ordering insertions when equal |
2668 |
+ |
* hashCodes and non-comparable. We don't require a total |
2669 |
+ |
* order, just a consistent insertion rule to maintain |
2670 |
+ |
* equivalence across rebalancings. Tie-breaking further than |
2671 |
+ |
* necessary simplifies testing a bit. |
2672 |
+ |
*/ |
2673 |
+ |
static int tieBreakOrder(Object a, Object b) { |
2674 |
+ |
int d; |
2675 |
+ |
if (a == null || b == null || |
2676 |
+ |
(d = a.getClass().getName(). |
2677 |
+ |
compareTo(b.getClass().getName())) == 0) |
2678 |
+ |
d = (System.identityHashCode(a) <= System.identityHashCode(b) ? |
2679 |
+ |
-1 : 1); |
2680 |
+ |
return d; |
2681 |
+ |
} |
2682 |
+ |
|
2683 |
+ |
/** |
2684 |
|
* Creates bin with initial set of nodes headed by b. |
2685 |
|
*/ |
2686 |
|
TreeBin(TreeNode<K,V> b) { |
2696 |
|
r = x; |
2697 |
|
} |
2698 |
|
else { |
2699 |
< |
Object key = x.key; |
2700 |
< |
int hash = x.hash; |
2699 |
> |
K k = x.key; |
2700 |
> |
int h = x.hash; |
2701 |
|
Class<?> kc = null; |
2702 |
|
for (TreeNode<K,V> p = r;;) { |
2703 |
|
int dir, ph; |
2704 |
< |
if ((ph = p.hash) > hash) |
2704 |
> |
K pk = p.key; |
2705 |
> |
if ((ph = p.hash) > h) |
2706 |
|
dir = -1; |
2707 |
< |
else if (ph < hash) |
2707 |
> |
else if (ph < h) |
2708 |
|
dir = 1; |
2709 |
< |
else if ((kc != null || |
2710 |
< |
(kc = comparableClassFor(key)) != null)) |
2711 |
< |
dir = compareComparables(kc, key, p.key); |
2712 |
< |
else |
2713 |
< |
dir = 0; |
2691 |
< |
TreeNode<K,V> xp = p; |
2709 |
> |
else if ((kc == null && |
2710 |
> |
(kc = comparableClassFor(k)) == null) || |
2711 |
> |
(dir = compareComparables(kc, k, pk)) == 0) |
2712 |
> |
dir = tieBreakOrder(k, pk); |
2713 |
> |
TreeNode<K,V> xp = p; |
2714 |
|
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
2715 |
|
x.parent = xp; |
2716 |
|
if (dir <= 0) |
2724 |
|
} |
2725 |
|
} |
2726 |
|
this.root = r; |
2727 |
+ |
assert checkInvariants(root); |
2728 |
|
} |
2729 |
|
|
2730 |
|
/** |
2755 |
|
return; |
2756 |
|
} |
2757 |
|
} |
2758 |
< |
else if ((s | WAITER) == 0) { |
2758 |
> |
else if ((s & WAITER) == 0) { |
2759 |
|
if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { |
2760 |
|
waiting = true; |
2761 |
|
waiter = Thread.currentThread(); |
2805 |
|
*/ |
2806 |
|
final TreeNode<K,V> putTreeVal(int h, K k, V v) { |
2807 |
|
Class<?> kc = null; |
2808 |
+ |
boolean searched = false; |
2809 |
|
for (TreeNode<K,V> p = root;;) { |
2810 |
< |
int dir, ph; K pk; TreeNode<K,V> q, pr; |
2810 |
> |
int dir, ph; K pk; |
2811 |
|
if (p == null) { |
2812 |
|
first = root = new TreeNode<K,V>(h, k, v, null, null); |
2813 |
|
break; |
2821 |
|
else if ((kc == null && |
2822 |
|
(kc = comparableClassFor(k)) == null) || |
2823 |
|
(dir = compareComparables(kc, k, pk)) == 0) { |
2824 |
< |
if (p.left == null) |
2825 |
< |
dir = 1; |
2826 |
< |
else if ((pr = p.right) == null || |
2827 |
< |
(q = pr.findTreeNode(h, k, kc)) == null) |
2828 |
< |
dir = -1; |
2829 |
< |
else |
2830 |
< |
return q; |
2824 |
> |
if (!searched) { |
2825 |
> |
TreeNode<K,V> q, ch; |
2826 |
> |
searched = true; |
2827 |
> |
if (((ch = p.left) != null && |
2828 |
> |
(q = ch.findTreeNode(h, k, kc)) != null) || |
2829 |
> |
((ch = p.right) != null && |
2830 |
> |
(q = ch.findTreeNode(h, k, kc)) != null)) |
2831 |
> |
return q; |
2832 |
> |
} |
2833 |
> |
dir = tieBreakOrder(k, pk); |
2834 |
|
} |
2835 |
+ |
|
2836 |
|
TreeNode<K,V> xp = p; |
2837 |
< |
if ((p = (dir < 0) ? p.left : p.right) == null) { |
2837 |
> |
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
2838 |
|
TreeNode<K,V> x, f = first; |
2839 |
|
first = x = new TreeNode<K,V>(h, k, v, f, xp); |
2840 |
|
if (f != null) |
2841 |
|
f.prev = x; |
2842 |
< |
if (dir < 0) |
2842 |
> |
if (dir <= 0) |
2843 |
|
xp.left = x; |
2844 |
|
else |
2845 |
|
xp.right = x; |
4278 |
|
// implementations below rely on concrete classes supplying these |
4279 |
|
// abstract methods |
4280 |
|
/** |
4281 |
< |
* Returns a "weakly consistent" iterator that will never |
4282 |
< |
* throw {@link ConcurrentModificationException}, and |
4283 |
< |
* guarantees to traverse elements as they existed upon |
4284 |
< |
* construction of the iterator, and may (but is not |
4285 |
< |
* guaranteed to) reflect any modifications subsequent to |
4286 |
< |
* construction. |
4281 |
> |
* Returns an iterator over the elements in this collection. |
4282 |
> |
* |
4283 |
> |
* <p>The returned iterator is |
4284 |
> |
* <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. |
4285 |
> |
* |
4286 |
> |
* @return an iterator over the elements in this collection |
4287 |
|
*/ |
4288 |
|
public abstract Iterator<E> iterator(); |
4289 |
|
public abstract boolean contains(Object o); |
4686 |
|
* Base class for bulk tasks. Repeats some fields and code from |
4687 |
|
* class Traverser, because we need to subclass CountedCompleter. |
4688 |
|
*/ |
4689 |
+ |
@SuppressWarnings("serial") |
4690 |
|
abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { |
4691 |
|
Node<K,V>[] tab; // same as Traverser |
4692 |
|
Node<K,V> next; |