1 |
/* |
2 |
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
5 |
* This code is free software; you can redistribute it and/or modify it |
6 |
* under the terms of the GNU General Public License version 2 only, as |
7 |
* published by the Free Software Foundation. Oracle designates this |
8 |
* particular file as subject to the "Classpath" exception as provided |
9 |
* by Oracle in the LICENSE file that accompanied this code. |
10 |
* |
11 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
12 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 |
* version 2 for more details (a copy is included in the LICENSE file that |
15 |
* accompanied this code). |
16 |
* |
17 |
* You should have received a copy of the GNU General Public License version |
18 |
* 2 along with this work; if not, write to the Free Software Foundation, |
19 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
20 |
* |
21 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
22 |
* or visit www.oracle.com if you need additional information or have any |
23 |
* questions. |
24 |
*/ |
25 |
|
26 |
package java.util; |
27 |
|
28 |
import java.io.IOException; |
29 |
import java.io.InvalidObjectException; |
30 |
import java.io.Serializable; |
31 |
import java.lang.reflect.ParameterizedType; |
32 |
import java.lang.reflect.Type; |
33 |
import java.util.function.BiConsumer; |
34 |
import java.util.function.BiFunction; |
35 |
import java.util.function.Consumer; |
36 |
import java.util.function.Function; |
37 |
|
38 |
/** |
39 |
* Hash table based implementation of the <tt>Map</tt> interface. This |
40 |
* implementation provides all of the optional map operations, and permits |
41 |
* <tt>null</tt> values and the <tt>null</tt> key. (The <tt>HashMap</tt> |
42 |
* class is roughly equivalent to <tt>Hashtable</tt>, except that it is |
43 |
* unsynchronized and permits nulls.) This class makes no guarantees as to |
44 |
* the order of the map; in particular, it does not guarantee that the order |
45 |
* will remain constant over time. |
46 |
* |
47 |
* <p>This implementation provides constant-time performance for the basic |
48 |
* operations (<tt>get</tt> and <tt>put</tt>), assuming the hash function |
49 |
* disperses the elements properly among the buckets. Iteration over |
50 |
* collection views requires time proportional to the "capacity" of the |
51 |
* <tt>HashMap</tt> instance (the number of buckets) plus its size (the number |
52 |
* of key-value mappings). Thus, it's very important not to set the initial |
53 |
* capacity too high (or the load factor too low) if iteration performance is |
54 |
* important. |
55 |
* |
56 |
* <p>An instance of <tt>HashMap</tt> has two parameters that affect its |
57 |
* performance: <i>initial capacity</i> and <i>load factor</i>. The |
58 |
* <i>capacity</i> is the number of buckets in the hash table, and the initial |
59 |
* capacity is simply the capacity at the time the hash table is created. The |
60 |
* <i>load factor</i> is a measure of how full the hash table is allowed to |
61 |
* get before its capacity is automatically increased. When the number of |
62 |
* entries in the hash table exceeds the product of the load factor and the |
63 |
* current capacity, the hash table is <i>rehashed</i> (that is, internal data |
64 |
* structures are rebuilt) so that the hash table has approximately twice the |
65 |
* number of buckets. |
66 |
* |
67 |
* <p>As a general rule, the default load factor (.75) offers a good |
68 |
* tradeoff between time and space costs. Higher values decrease the |
69 |
* space overhead but increase the lookup cost (reflected in most of |
70 |
* the operations of the <tt>HashMap</tt> class, including |
71 |
* <tt>get</tt> and <tt>put</tt>). The expected number of entries in |
72 |
* the map and its load factor should be taken into account when |
73 |
* setting its initial capacity, so as to minimize the number of |
74 |
* rehash operations. If the initial capacity is greater than the |
75 |
* maximum number of entries divided by the load factor, no rehash |
76 |
* operations will ever occur. |
77 |
* |
78 |
* <p>If many mappings are to be stored in a <tt>HashMap</tt> |
79 |
* instance, creating it with a sufficiently large capacity will allow |
80 |
* the mappings to be stored more efficiently than letting it perform |
81 |
* automatic rehashing as needed to grow the table. Note that using |
82 |
* many keys with the same {@code hashCode()} is a sure way to slow |
83 |
* down performance of any hash table. To ameliorate impact, when keys |
84 |
* are {@link Comparable}, this class may use comparison order among |
85 |
* keys to help break ties. |
86 |
* |
87 |
* <p><strong>Note that this implementation is not synchronized.</strong> |
88 |
* If multiple threads access a hash map concurrently, and at least one of |
89 |
* the threads modifies the map structurally, it <i>must</i> be |
90 |
* synchronized externally. (A structural modification is any operation |
91 |
* that adds or deletes one or more mappings; merely changing the value |
92 |
* associated with a key that an instance already contains is not a |
93 |
* structural modification.) This is typically accomplished by |
94 |
* synchronizing on some object that naturally encapsulates the map. |
95 |
* |
96 |
* If no such object exists, the map should be "wrapped" using the |
97 |
* {@link Collections#synchronizedMap Collections.synchronizedMap} |
98 |
* method. This is best done at creation time, to prevent accidental |
99 |
* unsynchronized access to the map:<pre> |
100 |
* Map m = Collections.synchronizedMap(new HashMap(...));</pre> |
101 |
* |
102 |
* <p>The iterators returned by all of this class's "collection view methods" |
103 |
* are <i>fail-fast</i>: if the map is structurally modified at any time after |
104 |
* the iterator is created, in any way except through the iterator's own |
105 |
* <tt>remove</tt> method, the iterator will throw a |
106 |
* {@link ConcurrentModificationException}. Thus, in the face of concurrent |
107 |
* modification, the iterator fails quickly and cleanly, rather than risking |
108 |
* arbitrary, non-deterministic behavior at an undetermined time in the |
109 |
* future. |
110 |
* |
111 |
* <p>Note that the fail-fast behavior of an iterator cannot be guaranteed |
112 |
* as it is, generally speaking, impossible to make any hard guarantees in the |
113 |
* presence of unsynchronized concurrent modification. Fail-fast iterators |
114 |
* throw <tt>ConcurrentModificationException</tt> on a best-effort basis. |
115 |
* Therefore, it would be wrong to write a program that depended on this |
116 |
* exception for its correctness: <i>the fail-fast behavior of iterators |
117 |
* should be used only to detect bugs.</i> |
118 |
* |
119 |
* <p>This class is a member of the |
120 |
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
121 |
* Java Collections Framework</a>. |
122 |
* |
123 |
* @param <K> the type of keys maintained by this map |
124 |
* @param <V> the type of mapped values |
125 |
* |
126 |
* @author Doug Lea |
127 |
* @author Josh Bloch |
128 |
* @author Arthur van Hoff |
129 |
* @author Neal Gafter |
130 |
* @see Object#hashCode() |
131 |
* @see Collection |
132 |
* @see Map |
133 |
* @see TreeMap |
134 |
* @see Hashtable |
135 |
* @since 1.2 |
136 |
*/ |
137 |
public class HashMap<K,V> extends AbstractMap<K,V> |
138 |
implements Map<K,V>, Cloneable, Serializable { |
139 |
|
140 |
private static final long serialVersionUID = 362498820763181265L; |
141 |
|
142 |
/* |
143 |
* Implementation notes. |
144 |
* |
145 |
* This map usually acts as a binned (bucketed) hash table, but |
146 |
* when bins get too large, they are transformed into bins of |
147 |
* TreeNodes, each structured similarly to those in |
148 |
* java.util.TreeMap. Most methods try to use normal bins, but |
149 |
* relay to TreeNode methods when applicable (simply by checking |
150 |
* instanceof a node). Bins of TreeNodes may be traversed and |
151 |
* used like any others, but additionally support faster lookup |
152 |
* when overpopulated. However, since the vast majority of bins in |
153 |
* normal use are not overpopulated, checking for existence of |
154 |
* tree bins may be delayed in the course of table methods. |
155 |
* |
156 |
* Tree bins (i.e., bins whose elements are all TreeNodes) are |
157 |
* ordered primarily by hashCode, but in the case of ties, if two |
158 |
* elements are of the same "class C implements Comparable<C>", |
159 |
* type then their compareTo method is used for ordering. (We |
160 |
* conservatively check generic types via reflection to validate |
161 |
* this -- see method comparableClassFor). The added complexity |
162 |
* of tree bins is worthwhile in providing worst-case O(log n) |
163 |
* operations when keys either have distinct hashes or are |
164 |
* orderable, Thus, performance degrades gracefully under |
165 |
* accidental or malicious usages in which hashCode() methods |
166 |
* return values that are poorly distributed, as well as those in |
167 |
* which many keys share a hashCode, so long as they are also |
168 |
* Comparable. (If neither of these apply, we may waste about a |
169 |
* factor of two in time and space compared to taking no |
170 |
* precautions. But the only known cases stem from poor user |
171 |
* programming practices that are already so slow that this makes |
172 |
* little difference.) |
173 |
* |
174 |
* Because TreeNodes are about twice the size of regular nodes, we |
175 |
* use them only when bins contain enough nodes to warrant use |
176 |
* (see TREEIFY_THRESHOLD). And when they become too small (due to |
177 |
* removal or resizing) they are converted back to plain bins. In |
178 |
* usages with well-distributed user hashCodes, tree bins are |
179 |
* rarely used. Ideally, under random hashCodes, the frequency of |
180 |
* nodes in bins follows a Poisson distribution |
181 |
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a |
182 |
* parameter of about 0.5 on average for the default resizing |
183 |
* threshold of 0.75, although with a large variance because of |
184 |
* resizing granularity. Ignoring variance, the expected |
185 |
* occurrences of list size k are (exp(-0.5) * pow(0.5, k) / |
186 |
* factorial(k)). The first values are: |
187 |
* |
188 |
* 0: 0.60653066 |
189 |
* 1: 0.30326533 |
190 |
* 2: 0.07581633 |
191 |
* 3: 0.01263606 |
192 |
* 4: 0.00157952 |
193 |
* 5: 0.00015795 |
194 |
* 6: 0.00001316 |
195 |
* 7: 0.00000094 |
196 |
* 8: 0.00000006 |
197 |
* more: less than 1 in ten million |
198 |
* |
199 |
* The root of a tree bin is normally its first node. However, |
200 |
* sometimes (currently only upon Iterator.remove), the root might |
201 |
* be elsewhere, but can be recovered following parent links |
202 |
* (method TreeNode.root()). |
203 |
* |
204 |
* All applicable internal methods accept a hash code as an |
205 |
* argument (as normally supplied from a public method), allowing |
206 |
* them to call each other without recomputing user hashCodes. |
207 |
* Most internal methods also accept a "tab" argument, that is |
208 |
* normally the current table, but may be a new or old one when |
209 |
* resizing or converting. |
210 |
* |
211 |
* When bin lists are treeified, split, or untreeified, we keep |
212 |
* them in the same relative access/traversal order (i.e., field |
213 |
* Node.next) to better preserve locality, and to slightly |
214 |
* simplify handling of splits and traversals that invoke |
215 |
* iterator.remove. When using comparators on insertion, to keep a |
216 |
* total ordering (or as close as is required here) across |
217 |
* rebalancings, we compare classes and identityHashCodes as |
218 |
* tie-breakers. |
219 |
* |
220 |
* The use and transitions among plain vs tree modes is |
221 |
* complicated by the existence of subclass LinkedHashMap. See |
222 |
* below for hook methods defined to be invoked upon insertion, |
223 |
* removal and access that allow LinkedHashMap internals to |
224 |
* otherwise remain independent of these mechanics. (This also |
225 |
* requires that a map instance be passed to some utility methods |
226 |
* that may create new nodes.) |
227 |
* |
228 |
* The concurrent-programming-like SSA-based coding style helps |
229 |
* avoid aliasing errors amid all of the twisty pointer operations. |
230 |
*/ |
231 |
|
232 |
/** |
233 |
* The default initial capacity - MUST be a power of two. |
234 |
*/ |
235 |
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 |
236 |
|
237 |
/** |
238 |
* The maximum capacity, used if a higher value is implicitly specified |
239 |
* by either of the constructors with arguments. |
240 |
* MUST be a power of two <= 1<<30. |
241 |
*/ |
242 |
static final int MAXIMUM_CAPACITY = 1 << 30; |
243 |
|
244 |
/** |
245 |
* The load factor used when none specified in constructor. |
246 |
*/ |
247 |
static final float DEFAULT_LOAD_FACTOR = 0.75f; |
248 |
|
249 |
/** |
250 |
* The bin count threshold for using a tree rather than list for a |
251 |
* bin. Bins are converted to trees when adding an element to a |
252 |
* bin with at least this many nodes. The value must be greater |
253 |
* than 2 and should be at least 8 to mesh with assumptions in |
254 |
* tree removal about conversion back to plain bins upon |
255 |
* shrinkage. |
256 |
*/ |
257 |
static final int TREEIFY_THRESHOLD = 8; |
258 |
|
259 |
/** |
260 |
* The bin count threshold for untreeifying a (split) bin during a |
261 |
* resize operation. Should be less than TREEIFY_THRESHOLD, and at |
262 |
* most 6 to mesh with shrinkage detection under removal. |
263 |
*/ |
264 |
static final int UNTREEIFY_THRESHOLD = 6; |
265 |
|
266 |
/** |
267 |
* The smallest table capacity for which bins may be treeified. |
268 |
* (Otherwise the table is resized if too many nodes in a bin.) |
269 |
* Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts |
270 |
* between resizing and treeification thresholds. |
271 |
*/ |
272 |
static final int MIN_TREEIFY_CAPACITY = 64; |
273 |
|
274 |
/** |
275 |
* Basic hash bin node, used for most entries. (See below for |
276 |
* TreeNode subclass, and in LinkedHashMap for its Entry subclass.) |
277 |
*/ |
278 |
static class Node<K,V> implements Map.Entry<K,V> { |
279 |
final int hash; |
280 |
final K key; |
281 |
V value; |
282 |
Node<K,V> next; |
283 |
|
284 |
Node(int hash, K key, V value, Node<K,V> next) { |
285 |
this.hash = hash; |
286 |
this.key = key; |
287 |
this.value = value; |
288 |
this.next = next; |
289 |
} |
290 |
|
291 |
public final K getKey() { return key; } |
292 |
public final V getValue() { return value; } |
293 |
public final String toString() { return key + "=" + value; } |
294 |
|
295 |
public final int hashCode() { |
296 |
return Objects.hashCode(key) ^ Objects.hashCode(value); |
297 |
} |
298 |
|
299 |
public final V setValue(V newValue) { |
300 |
V oldValue = value; |
301 |
value = newValue; |
302 |
return oldValue; |
303 |
} |
304 |
|
305 |
public final boolean equals(Object o) { |
306 |
if (o == this) |
307 |
return true; |
308 |
if (o instanceof Map.Entry) { |
309 |
Map.Entry<?,?> e = (Map.Entry<?,?>)o; |
310 |
if (Objects.equals(key, e.getKey()) && |
311 |
Objects.equals(value, e.getValue())) |
312 |
return true; |
313 |
} |
314 |
return false; |
315 |
} |
316 |
} |
317 |
|
318 |
/* ---------------- Static utilities -------------- */ |
319 |
|
320 |
/** |
321 |
* Computes key.hashCode() and spreads (XORs) higher bits of hash |
322 |
* to lower. Because the table uses power-of-two masking, sets of |
323 |
* hashes that vary only in bits above the current mask will |
324 |
* always collide. (Among known examples are sets of Float keys |
325 |
* holding consecutive whole numbers in small tables.) So we |
326 |
* apply a transform that spreads the impact of higher bits |
327 |
* downward. There is a tradeoff between speed, utility, and |
328 |
* quality of bit-spreading. Because many common sets of hashes |
329 |
* are already reasonably distributed (so don't benefit from |
330 |
* spreading), and because we use trees to handle large sets of |
331 |
* collisions in bins, we just XOR some shifted bits in the |
332 |
* cheapest possible way to reduce systematic lossage, as well as |
333 |
* to incorporate impact of the highest bits that would otherwise |
334 |
* never be used in index calculations because of table bounds. |
335 |
*/ |
336 |
static final int hash(Object key) { |
337 |
int h; |
338 |
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); |
339 |
} |
340 |
|
341 |
/** |
342 |
* Returns x's Class if it is of the form "class C implements |
343 |
* Comparable<C>", else null. |
344 |
*/ |
345 |
static Class<?> comparableClassFor(Object x) { |
346 |
if (x instanceof Comparable) { |
347 |
Class<?> c; Type[] ts, as; Type t; ParameterizedType p; |
348 |
if ((c = x.getClass()) == String.class) // bypass checks |
349 |
return c; |
350 |
if ((ts = c.getGenericInterfaces()) != null) { |
351 |
for (int i = 0; i < ts.length; ++i) { |
352 |
if (((t = ts[i]) instanceof ParameterizedType) && |
353 |
((p = (ParameterizedType)t).getRawType() == |
354 |
Comparable.class) && |
355 |
(as = p.getActualTypeArguments()) != null && |
356 |
as.length == 1 && as[0] == c) // type arg is c |
357 |
return c; |
358 |
} |
359 |
} |
360 |
} |
361 |
return null; |
362 |
} |
363 |
|
364 |
/** |
365 |
* Returns k.compareTo(x) if x matches kc (k's screened comparable |
366 |
* class), else 0. |
367 |
*/ |
368 |
@SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable |
369 |
static int compareComparables(Class<?> kc, Object k, Object x) { |
370 |
return (x == null || x.getClass() != kc ? 0 : |
371 |
((Comparable)k).compareTo(x)); |
372 |
} |
373 |
|
374 |
/** |
375 |
* Returns a power of two size for the given target capacity. |
376 |
*/ |
377 |
static final int tableSizeFor(int cap) { |
378 |
int n = cap - 1; |
379 |
n |= n >>> 1; |
380 |
n |= n >>> 2; |
381 |
n |= n >>> 4; |
382 |
n |= n >>> 8; |
383 |
n |= n >>> 16; |
384 |
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; |
385 |
} |
386 |
|
387 |
/* ---------------- Fields -------------- */ |
388 |
|
389 |
/** |
390 |
* The table, initialized on first use, and resized as |
391 |
* necessary. When allocated, length is always a power of two. |
392 |
* (We also tolerate length zero in some operations to allow |
393 |
* bootstrapping mechanics that are currently not needed.) |
394 |
*/ |
395 |
transient Node<K,V>[] table; |
396 |
|
397 |
/** |
398 |
* Holds cached entrySet(). Note that AbstractMap fields are used |
399 |
* for keySet() and values(). |
400 |
*/ |
401 |
transient Set<Map.Entry<K,V>> entrySet; |
402 |
|
403 |
/** |
404 |
* The number of key-value mappings contained in this map. |
405 |
*/ |
406 |
transient int size; |
407 |
|
408 |
/** |
409 |
* The number of times this HashMap has been structurally modified |
410 |
* Structural modifications are those that change the number of mappings in |
411 |
* the HashMap or otherwise modify its internal structure (e.g., |
412 |
* rehash). This field is used to make iterators on Collection-views of |
413 |
* the HashMap fail-fast. (See ConcurrentModificationException). |
414 |
*/ |
415 |
transient int modCount; |
416 |
|
417 |
/** |
418 |
* The next size value at which to resize (capacity * load factor). |
419 |
* |
420 |
* @serial |
421 |
*/ |
422 |
// (The javadoc description is true upon serialization. |
423 |
// Additionally, if the table array has not been allocated, this |
424 |
// field holds the initial array capacity, or zero signifying |
425 |
// DEFAULT_INITIAL_CAPACITY.) |
426 |
int threshold; |
427 |
|
428 |
/** |
429 |
* The load factor for the hash table. |
430 |
* |
431 |
* @serial |
432 |
*/ |
433 |
final float loadFactor; |
434 |
|
435 |
/* ---------------- Public operations -------------- */ |
436 |
|
437 |
/** |
438 |
* Constructs an empty <tt>HashMap</tt> with the specified initial |
439 |
* capacity and load factor. |
440 |
* |
441 |
* @param initialCapacity the initial capacity |
442 |
* @param loadFactor the load factor |
443 |
* @throws IllegalArgumentException if the initial capacity is negative |
444 |
* or the load factor is nonpositive |
445 |
*/ |
446 |
public HashMap(int initialCapacity, float loadFactor) { |
447 |
if (initialCapacity < 0) |
448 |
throw new IllegalArgumentException("Illegal initial capacity: " + |
449 |
initialCapacity); |
450 |
if (initialCapacity > MAXIMUM_CAPACITY) |
451 |
initialCapacity = MAXIMUM_CAPACITY; |
452 |
if (loadFactor <= 0 || Float.isNaN(loadFactor)) |
453 |
throw new IllegalArgumentException("Illegal load factor: " + |
454 |
loadFactor); |
455 |
this.loadFactor = loadFactor; |
456 |
this.threshold = tableSizeFor(initialCapacity); |
457 |
} |
458 |
|
459 |
/** |
460 |
* Constructs an empty <tt>HashMap</tt> with the specified initial |
461 |
* capacity and the default load factor (0.75). |
462 |
* |
463 |
* @param initialCapacity the initial capacity. |
464 |
* @throws IllegalArgumentException if the initial capacity is negative. |
465 |
*/ |
466 |
public HashMap(int initialCapacity) { |
467 |
this(initialCapacity, DEFAULT_LOAD_FACTOR); |
468 |
} |
469 |
|
470 |
/** |
471 |
* Constructs an empty <tt>HashMap</tt> with the default initial capacity |
472 |
* (16) and the default load factor (0.75). |
473 |
*/ |
474 |
public HashMap() { |
475 |
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted |
476 |
} |
477 |
|
478 |
/** |
479 |
* Constructs a new <tt>HashMap</tt> with the same mappings as the |
480 |
* specified <tt>Map</tt>. The <tt>HashMap</tt> is created with |
481 |
* default load factor (0.75) and an initial capacity sufficient to |
482 |
* hold the mappings in the specified <tt>Map</tt>. |
483 |
* |
484 |
* @param m the map whose mappings are to be placed in this map |
485 |
* @throws NullPointerException if the specified map is null |
486 |
*/ |
487 |
public HashMap(Map<? extends K, ? extends V> m) { |
488 |
this.loadFactor = DEFAULT_LOAD_FACTOR; |
489 |
putMapEntries(m, false); |
490 |
} |
491 |
|
492 |
/** |
493 |
* Implements Map.putAll and Map constructor |
494 |
* |
495 |
* @param m the map |
496 |
* @param evict false when initially constructing this map, else |
497 |
* true (relayed to method afterNodeInsertion). |
498 |
*/ |
499 |
final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { |
500 |
int s = m.size(); |
501 |
if (s > 0) { |
502 |
if (table == null) { // pre-size |
503 |
float ft = ((float)s / loadFactor) + 1.0F; |
504 |
int t = ((ft < (float)MAXIMUM_CAPACITY) ? |
505 |
(int)ft : MAXIMUM_CAPACITY); |
506 |
if (t > threshold) |
507 |
threshold = tableSizeFor(t); |
508 |
} |
509 |
else if (s > threshold) |
510 |
resize(); |
511 |
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { |
512 |
K key = e.getKey(); |
513 |
V value = e.getValue(); |
514 |
putVal(hash(key), key, value, false, evict); |
515 |
} |
516 |
} |
517 |
} |
518 |
|
519 |
/** |
520 |
* Returns the number of key-value mappings in this map. |
521 |
* |
522 |
* @return the number of key-value mappings in this map |
523 |
*/ |
524 |
public int size() { |
525 |
return size; |
526 |
} |
527 |
|
528 |
/** |
529 |
* Returns <tt>true</tt> if this map contains no key-value mappings. |
530 |
* |
531 |
* @return <tt>true</tt> if this map contains no key-value mappings |
532 |
*/ |
533 |
public boolean isEmpty() { |
534 |
return size == 0; |
535 |
} |
536 |
|
537 |
/** |
538 |
* Returns the value to which the specified key is mapped, |
539 |
* or {@code null} if this map contains no mapping for the key. |
540 |
* |
541 |
* <p>More formally, if this map contains a mapping from a key |
542 |
* {@code k} to a value {@code v} such that {@code (key==null ? k==null : |
543 |
* key.equals(k))}, then this method returns {@code v}; otherwise |
544 |
* it returns {@code null}. (There can be at most one such mapping.) |
545 |
* |
546 |
* <p>A return value of {@code null} does not <i>necessarily</i> |
547 |
* indicate that the map contains no mapping for the key; it's also |
548 |
* possible that the map explicitly maps the key to {@code null}. |
549 |
* The {@link #containsKey containsKey} operation may be used to |
550 |
* distinguish these two cases. |
551 |
* |
552 |
* @see #put(Object, Object) |
553 |
*/ |
554 |
public V get(Object key) { |
555 |
Node<K,V> e; |
556 |
return (e = getNode(hash(key), key)) == null ? null : e.value; |
557 |
} |
558 |
|
559 |
/** |
560 |
* Implements Map.get and related methods |
561 |
* |
562 |
* @param hash hash for key |
563 |
* @param key the key |
564 |
* @return the node, or null if none |
565 |
*/ |
566 |
final Node<K,V> getNode(int hash, Object key) { |
567 |
Node<K,V>[] tab; Node<K,V> first, e; int n; K k; |
568 |
if ((tab = table) != null && (n = tab.length) > 0 && |
569 |
(first = tab[(n - 1) & hash]) != null) { |
570 |
if (first.hash == hash && // always check first node |
571 |
((k = first.key) == key || (key != null && key.equals(k)))) |
572 |
return first; |
573 |
if ((e = first.next) != null) { |
574 |
if (first instanceof TreeNode) |
575 |
return ((TreeNode<K,V>)first).getTreeNode(hash, key); |
576 |
do { |
577 |
if (e.hash == hash && |
578 |
((k = e.key) == key || (key != null && key.equals(k)))) |
579 |
return e; |
580 |
} while ((e = e.next) != null); |
581 |
} |
582 |
} |
583 |
return null; |
584 |
} |
585 |
|
586 |
/** |
587 |
* Returns <tt>true</tt> if this map contains a mapping for the |
588 |
* specified key. |
589 |
* |
590 |
* @param key The key whose presence in this map is to be tested |
591 |
* @return <tt>true</tt> if this map contains a mapping for the specified |
592 |
* key. |
593 |
*/ |
594 |
public boolean containsKey(Object key) { |
595 |
return getNode(hash(key), key) != null; |
596 |
} |
597 |
|
598 |
/** |
599 |
* Associates the specified value with the specified key in this map. |
600 |
* If the map previously contained a mapping for the key, the old |
601 |
* value is replaced. |
602 |
* |
603 |
* @param key key with which the specified value is to be associated |
604 |
* @param value value to be associated with the specified key |
605 |
* @return the previous value associated with <tt>key</tt>, or |
606 |
* <tt>null</tt> if there was no mapping for <tt>key</tt>. |
607 |
* (A <tt>null</tt> return can also indicate that the map |
608 |
* previously associated <tt>null</tt> with <tt>key</tt>.) |
609 |
*/ |
610 |
public V put(K key, V value) { |
611 |
return putVal(hash(key), key, value, false, true); |
612 |
} |
613 |
|
614 |
/** |
615 |
* Implements Map.put and related methods |
616 |
* |
617 |
* @param hash hash for key |
618 |
* @param key the key |
619 |
* @param value the value to put |
620 |
* @param onlyIfAbsent if true, don't change existing value |
621 |
* @param evict if false, the table is in creation mode. |
622 |
* @return previous value, or null if none |
623 |
*/ |
624 |
final V putVal(int hash, K key, V value, boolean onlyIfAbsent, |
625 |
boolean evict) { |
626 |
Node<K,V>[] tab; Node<K,V> p; int n, i; |
627 |
if ((tab = table) == null || (n = tab.length) == 0) |
628 |
n = (tab = resize()).length; |
629 |
if ((p = tab[i = (n - 1) & hash]) == null) |
630 |
tab[i] = newNode(hash, key, value, null); |
631 |
else { |
632 |
Node<K,V> e; K k; |
633 |
if (p.hash == hash && |
634 |
((k = p.key) == key || (key != null && key.equals(k)))) |
635 |
e = p; |
636 |
else if (p instanceof TreeNode) |
637 |
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); |
638 |
else { |
639 |
for (int binCount = 0; ; ++binCount) { |
640 |
if ((e = p.next) == null) { |
641 |
p.next = newNode(hash, key, value, null); |
642 |
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st |
643 |
treeifyBin(tab, hash); |
644 |
break; |
645 |
} |
646 |
if (e.hash == hash && |
647 |
((k = e.key) == key || (key != null && key.equals(k)))) |
648 |
break; |
649 |
p = e; |
650 |
} |
651 |
} |
652 |
if (e != null) { // existing mapping for key |
653 |
V oldValue = e.value; |
654 |
if (!onlyIfAbsent || oldValue == null) |
655 |
e.value = value; |
656 |
afterNodeAccess(e); |
657 |
return oldValue; |
658 |
} |
659 |
} |
660 |
++modCount; |
661 |
if (++size > threshold) |
662 |
resize(); |
663 |
afterNodeInsertion(evict); |
664 |
return null; |
665 |
} |
666 |
|
667 |
/** |
668 |
* Initializes or doubles table size. If null, allocates in |
669 |
* accord with initial capacity target held in field threshold. |
670 |
* Otherwise, because we are using power-of-two expansion, the |
671 |
* elements from each bin must either stay at same index, or move |
672 |
* with a power of two offset in the new table. |
673 |
* |
674 |
* @return the table |
675 |
*/ |
676 |
final Node<K,V>[] resize() { |
677 |
Node<K,V>[] oldTab = table; |
678 |
int oldCap = (oldTab == null) ? 0 : oldTab.length; |
679 |
int oldThr = threshold; |
680 |
int newCap, newThr = 0; |
681 |
if (oldCap > 0) { |
682 |
if (oldCap >= MAXIMUM_CAPACITY) { |
683 |
threshold = Integer.MAX_VALUE; |
684 |
return oldTab; |
685 |
} |
686 |
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && |
687 |
oldCap >= DEFAULT_INITIAL_CAPACITY) |
688 |
newThr = oldThr << 1; // double threshold |
689 |
} |
690 |
else if (oldThr > 0) // initial capacity was placed in threshold |
691 |
newCap = oldThr; |
692 |
else { // zero initial threshold signifies using defaults |
693 |
newCap = DEFAULT_INITIAL_CAPACITY; |
694 |
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); |
695 |
} |
696 |
if (newThr == 0) { |
697 |
float ft = (float)newCap * loadFactor; |
698 |
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? |
699 |
(int)ft : Integer.MAX_VALUE); |
700 |
} |
701 |
threshold = newThr; |
702 |
@SuppressWarnings({"rawtypes","unchecked"}) |
703 |
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; |
704 |
table = newTab; |
705 |
if (oldTab != null) { |
706 |
for (int j = 0; j < oldCap; ++j) { |
707 |
Node<K,V> e; |
708 |
if ((e = oldTab[j]) != null) { |
709 |
oldTab[j] = null; |
710 |
if (e.next == null) |
711 |
newTab[e.hash & (newCap - 1)] = e; |
712 |
else if (e instanceof TreeNode) |
713 |
((TreeNode<K,V>)e).split(this, newTab, j, oldCap); |
714 |
else { // preserve order |
715 |
Node<K,V> loHead = null, loTail = null; |
716 |
Node<K,V> hiHead = null, hiTail = null; |
717 |
Node<K,V> next; |
718 |
do { |
719 |
next = e.next; |
720 |
if ((e.hash & oldCap) == 0) { |
721 |
if (loTail == null) |
722 |
loHead = e; |
723 |
else |
724 |
loTail.next = e; |
725 |
loTail = e; |
726 |
} |
727 |
else { |
728 |
if (hiTail == null) |
729 |
hiHead = e; |
730 |
else |
731 |
hiTail.next = e; |
732 |
hiTail = e; |
733 |
} |
734 |
} while ((e = next) != null); |
735 |
if (loTail != null) { |
736 |
loTail.next = null; |
737 |
newTab[j] = loHead; |
738 |
} |
739 |
if (hiTail != null) { |
740 |
hiTail.next = null; |
741 |
newTab[j + oldCap] = hiHead; |
742 |
} |
743 |
} |
744 |
} |
745 |
} |
746 |
} |
747 |
return newTab; |
748 |
} |
749 |
|
750 |
/** |
751 |
* Replaces all linked nodes in bin at index for given hash unless |
752 |
* table is too small, in which case resizes instead. |
753 |
*/ |
754 |
final void treeifyBin(Node<K,V>[] tab, int hash) { |
755 |
int n, index; Node<K,V> e; |
756 |
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) |
757 |
resize(); |
758 |
else if ((e = tab[index = (n - 1) & hash]) != null) { |
759 |
TreeNode<K,V> hd = null, tl = null; |
760 |
do { |
761 |
TreeNode<K,V> p = replacementTreeNode(e, null); |
762 |
if (tl == null) |
763 |
hd = p; |
764 |
else { |
765 |
p.prev = tl; |
766 |
tl.next = p; |
767 |
} |
768 |
tl = p; |
769 |
} while ((e = e.next) != null); |
770 |
if ((tab[index] = hd) != null) |
771 |
hd.treeify(tab); |
772 |
} |
773 |
} |
774 |
|
775 |
/** |
776 |
* Copies all of the mappings from the specified map to this map. |
777 |
* These mappings will replace any mappings that this map had for |
778 |
* any of the keys currently in the specified map. |
779 |
* |
780 |
* @param m mappings to be stored in this map |
781 |
* @throws NullPointerException if the specified map is null |
782 |
*/ |
783 |
public void putAll(Map<? extends K, ? extends V> m) { |
784 |
putMapEntries(m, true); |
785 |
} |
786 |
|
787 |
/** |
788 |
* Removes the mapping for the specified key from this map if present. |
789 |
* |
790 |
* @param key key whose mapping is to be removed from the map |
791 |
* @return the previous value associated with <tt>key</tt>, or |
792 |
* <tt>null</tt> if there was no mapping for <tt>key</tt>. |
793 |
* (A <tt>null</tt> return can also indicate that the map |
794 |
* previously associated <tt>null</tt> with <tt>key</tt>.) |
795 |
*/ |
796 |
public V remove(Object key) { |
797 |
Node<K,V> e; |
798 |
return (e = removeNode(hash(key), key, null, false, true)) == null ? |
799 |
null : e.value; |
800 |
} |
801 |
|
802 |
/** |
803 |
* Implements Map.remove and related methods |
804 |
* |
805 |
* @param hash hash for key |
806 |
* @param key the key |
807 |
* @param value the value to match if matchValue, else ignored |
808 |
* @param matchValue if true only remove if value is equal |
809 |
* @param movable if false do not move other nodes while removing |
810 |
* @return the node, or null if none |
811 |
*/ |
812 |
final Node<K,V> removeNode(int hash, Object key, Object value, |
813 |
boolean matchValue, boolean movable) { |
814 |
Node<K,V>[] tab; Node<K,V> p; int n, index; |
815 |
if ((tab = table) != null && (n = tab.length) > 0 && |
816 |
(p = tab[index = (n - 1) & hash]) != null) { |
817 |
Node<K,V> node = null, e; K k; V v; |
818 |
if (p.hash == hash && |
819 |
((k = p.key) == key || (key != null && key.equals(k)))) |
820 |
node = p; |
821 |
else if ((e = p.next) != null) { |
822 |
if (p instanceof TreeNode) |
823 |
node = ((TreeNode<K,V>)p).getTreeNode(hash, key); |
824 |
else { |
825 |
do { |
826 |
if (e.hash == hash && |
827 |
((k = e.key) == key || |
828 |
(key != null && key.equals(k)))) { |
829 |
node = e; |
830 |
break; |
831 |
} |
832 |
p = e; |
833 |
} while ((e = e.next) != null); |
834 |
} |
835 |
} |
836 |
if (node != null && (!matchValue || (v = node.value) == value || |
837 |
(value != null && value.equals(v)))) { |
838 |
if (node instanceof TreeNode) |
839 |
((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); |
840 |
else if (node == p) |
841 |
tab[index] = node.next; |
842 |
else |
843 |
p.next = node.next; |
844 |
++modCount; |
845 |
--size; |
846 |
afterNodeRemoval(node); |
847 |
return node; |
848 |
} |
849 |
} |
850 |
return null; |
851 |
} |
852 |
|
853 |
/** |
854 |
* Removes all of the mappings from this map. |
855 |
* The map will be empty after this call returns. |
856 |
*/ |
857 |
public void clear() { |
858 |
Node<K,V>[] tab; |
859 |
modCount++; |
860 |
if ((tab = table) != null && size > 0) { |
861 |
size = 0; |
862 |
for (int i = 0; i < tab.length; ++i) |
863 |
tab[i] = null; |
864 |
} |
865 |
} |
866 |
|
867 |
/** |
868 |
* Returns <tt>true</tt> if this map maps one or more keys to the |
869 |
* specified value. |
870 |
* |
871 |
* @param value value whose presence in this map is to be tested |
872 |
* @return <tt>true</tt> if this map maps one or more keys to the |
873 |
* specified value |
874 |
*/ |
875 |
public boolean containsValue(Object value) { |
876 |
Node<K,V>[] tab; V v; |
877 |
if ((tab = table) != null && size > 0) { |
878 |
for (int i = 0; i < tab.length; ++i) { |
879 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) { |
880 |
if ((v = e.value) == value || |
881 |
(value != null && value.equals(v))) |
882 |
return true; |
883 |
} |
884 |
} |
885 |
} |
886 |
return false; |
887 |
} |
888 |
|
889 |
/** |
890 |
* Returns a {@link Set} view of the keys contained in this map. |
891 |
* The set is backed by the map, so changes to the map are |
892 |
* reflected in the set, and vice-versa. If the map is modified |
893 |
* while an iteration over the set is in progress (except through |
894 |
* the iterator's own <tt>remove</tt> operation), the results of |
895 |
* the iteration are undefined. The set supports element removal, |
896 |
* which removes the corresponding mapping from the map, via the |
897 |
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, |
898 |
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> |
899 |
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt> |
900 |
* operations. |
901 |
* |
902 |
* @return a set view of the keys contained in this map |
903 |
*/ |
904 |
public Set<K> keySet() { |
905 |
Set<K> ks = keySet; |
906 |
if (ks == null) { |
907 |
ks = new KeySet(); |
908 |
keySet = ks; |
909 |
} |
910 |
return ks; |
911 |
} |
912 |
|
913 |
final class KeySet extends AbstractSet<K> { |
914 |
public final int size() { return size; } |
915 |
public final void clear() { HashMap.this.clear(); } |
916 |
public final Iterator<K> iterator() { return new KeyIterator(); } |
917 |
public final boolean contains(Object o) { return containsKey(o); } |
918 |
public final boolean remove(Object key) { |
919 |
return removeNode(hash(key), key, null, false, true) != null; |
920 |
} |
921 |
public final Spliterator<K> spliterator() { |
922 |
return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); |
923 |
} |
924 |
public final void forEach(Consumer<? super K> action) { |
925 |
Node<K,V>[] tab; |
926 |
if (action == null) |
927 |
throw new NullPointerException(); |
928 |
if (size > 0 && (tab = table) != null) { |
929 |
int mc = modCount; |
930 |
for (int i = 0; i < tab.length; ++i) { |
931 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) |
932 |
action.accept(e.key); |
933 |
} |
934 |
if (modCount != mc) |
935 |
throw new ConcurrentModificationException(); |
936 |
} |
937 |
} |
938 |
} |
939 |
|
940 |
/** |
941 |
* Returns a {@link Collection} view of the values contained in this map. |
942 |
* The collection is backed by the map, so changes to the map are |
943 |
* reflected in the collection, and vice-versa. If the map is |
944 |
* modified while an iteration over the collection is in progress |
945 |
* (except through the iterator's own <tt>remove</tt> operation), |
946 |
* the results of the iteration are undefined. The collection |
947 |
* supports element removal, which removes the corresponding |
948 |
* mapping from the map, via the <tt>Iterator.remove</tt>, |
949 |
* <tt>Collection.remove</tt>, <tt>removeAll</tt>, |
950 |
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not |
951 |
* support the <tt>add</tt> or <tt>addAll</tt> operations. |
952 |
* |
953 |
* @return a view of the values contained in this map |
954 |
*/ |
955 |
public Collection<V> values() { |
956 |
Collection<V> vs = values; |
957 |
if (vs == null) { |
958 |
vs = new Values(); |
959 |
values = vs; |
960 |
} |
961 |
return vs; |
962 |
} |
963 |
|
964 |
final class Values extends AbstractCollection<V> { |
965 |
public final int size() { return size; } |
966 |
public final void clear() { HashMap.this.clear(); } |
967 |
public final Iterator<V> iterator() { return new ValueIterator(); } |
968 |
public final boolean contains(Object o) { return containsValue(o); } |
969 |
public final Spliterator<V> spliterator() { |
970 |
return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0); |
971 |
} |
972 |
public final void forEach(Consumer<? super V> action) { |
973 |
Node<K,V>[] tab; |
974 |
if (action == null) |
975 |
throw new NullPointerException(); |
976 |
if (size > 0 && (tab = table) != null) { |
977 |
int mc = modCount; |
978 |
for (int i = 0; i < tab.length; ++i) { |
979 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) |
980 |
action.accept(e.value); |
981 |
} |
982 |
if (modCount != mc) |
983 |
throw new ConcurrentModificationException(); |
984 |
} |
985 |
} |
986 |
} |
987 |
|
988 |
/** |
989 |
* Returns a {@link Set} view of the mappings contained in this map. |
990 |
* The set is backed by the map, so changes to the map are |
991 |
* reflected in the set, and vice-versa. If the map is modified |
992 |
* while an iteration over the set is in progress (except through |
993 |
* the iterator's own <tt>remove</tt> operation, or through the |
994 |
* <tt>setValue</tt> operation on a map entry returned by the |
995 |
* iterator) the results of the iteration are undefined. The set |
996 |
* supports element removal, which removes the corresponding |
997 |
* mapping from the map, via the <tt>Iterator.remove</tt>, |
998 |
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and |
999 |
* <tt>clear</tt> operations. It does not support the |
1000 |
* <tt>add</tt> or <tt>addAll</tt> operations. |
1001 |
* |
1002 |
* @return a set view of the mappings contained in this map |
1003 |
*/ |
1004 |
public Set<Map.Entry<K,V>> entrySet() { |
1005 |
Set<Map.Entry<K,V>> es; |
1006 |
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; |
1007 |
} |
1008 |
|
1009 |
final class EntrySet extends AbstractSet<Map.Entry<K,V>> { |
1010 |
public final int size() { return size; } |
1011 |
public final void clear() { HashMap.this.clear(); } |
1012 |
public final Iterator<Map.Entry<K,V>> iterator() { |
1013 |
return new EntryIterator(); |
1014 |
} |
1015 |
public final boolean contains(Object o) { |
1016 |
if (!(o instanceof Map.Entry)) |
1017 |
return false; |
1018 |
Map.Entry<?,?> e = (Map.Entry<?,?>) o; |
1019 |
Object key = e.getKey(); |
1020 |
Node<K,V> candidate = getNode(hash(key), key); |
1021 |
return candidate != null && candidate.equals(e); |
1022 |
} |
1023 |
public final boolean remove(Object o) { |
1024 |
if (o instanceof Map.Entry) { |
1025 |
Map.Entry<?,?> e = (Map.Entry<?,?>) o; |
1026 |
Object key = e.getKey(); |
1027 |
Object value = e.getValue(); |
1028 |
return removeNode(hash(key), key, value, true, true) != null; |
1029 |
} |
1030 |
return false; |
1031 |
} |
1032 |
public final Spliterator<Map.Entry<K,V>> spliterator() { |
1033 |
return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0); |
1034 |
} |
1035 |
public final void forEach(Consumer<? super Map.Entry<K,V>> action) { |
1036 |
Node<K,V>[] tab; |
1037 |
if (action == null) |
1038 |
throw new NullPointerException(); |
1039 |
if (size > 0 && (tab = table) != null) { |
1040 |
int mc = modCount; |
1041 |
for (int i = 0; i < tab.length; ++i) { |
1042 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) |
1043 |
action.accept(e); |
1044 |
} |
1045 |
if (modCount != mc) |
1046 |
throw new ConcurrentModificationException(); |
1047 |
} |
1048 |
} |
1049 |
} |
1050 |
|
1051 |
// Overrides of JDK8 Map extension methods |
1052 |
|
1053 |
@Override |
1054 |
public V getOrDefault(Object key, V defaultValue) { |
1055 |
Node<K,V> e; |
1056 |
return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; |
1057 |
} |
1058 |
|
1059 |
@Override |
1060 |
public V putIfAbsent(K key, V value) { |
1061 |
return putVal(hash(key), key, value, true, true); |
1062 |
} |
1063 |
|
1064 |
@Override |
1065 |
public boolean remove(Object key, Object value) { |
1066 |
return removeNode(hash(key), key, value, true, true) != null; |
1067 |
} |
1068 |
|
1069 |
@Override |
1070 |
public boolean replace(K key, V oldValue, V newValue) { |
1071 |
Node<K,V> e; V v; |
1072 |
if ((e = getNode(hash(key), key)) != null && |
1073 |
((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { |
1074 |
e.value = newValue; |
1075 |
afterNodeAccess(e); |
1076 |
return true; |
1077 |
} |
1078 |
return false; |
1079 |
} |
1080 |
|
1081 |
@Override |
1082 |
public V replace(K key, V value) { |
1083 |
Node<K,V> e; |
1084 |
if ((e = getNode(hash(key), key)) != null) { |
1085 |
V oldValue = e.value; |
1086 |
e.value = value; |
1087 |
afterNodeAccess(e); |
1088 |
return oldValue; |
1089 |
} |
1090 |
return null; |
1091 |
} |
1092 |
|
1093 |
@Override |
1094 |
public V computeIfAbsent(K key, |
1095 |
Function<? super K, ? extends V> mappingFunction) { |
1096 |
if (mappingFunction == null) |
1097 |
throw new NullPointerException(); |
1098 |
int hash = hash(key); |
1099 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1100 |
int binCount = 0; |
1101 |
TreeNode<K,V> t = null; |
1102 |
Node<K,V> old = null; |
1103 |
if (size > threshold || (tab = table) == null || |
1104 |
(n = tab.length) == 0) |
1105 |
n = (tab = resize()).length; |
1106 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1107 |
if (first instanceof TreeNode) |
1108 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1109 |
else { |
1110 |
Node<K,V> e = first; K k; |
1111 |
do { |
1112 |
if (e.hash == hash && |
1113 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1114 |
old = e; |
1115 |
break; |
1116 |
} |
1117 |
++binCount; |
1118 |
} while ((e = e.next) != null); |
1119 |
} |
1120 |
V oldValue; |
1121 |
if (old != null && (oldValue = old.value) != null) { |
1122 |
afterNodeAccess(old); |
1123 |
return oldValue; |
1124 |
} |
1125 |
} |
1126 |
V v = mappingFunction.apply(key); |
1127 |
if (v == null) { |
1128 |
return null; |
1129 |
} else if (old != null) { |
1130 |
old.value = v; |
1131 |
afterNodeAccess(old); |
1132 |
return v; |
1133 |
} |
1134 |
else if (t != null) |
1135 |
t.putTreeVal(this, tab, hash, key, v); |
1136 |
else { |
1137 |
tab[i] = newNode(hash, key, v, first); |
1138 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1139 |
treeifyBin(tab, hash); |
1140 |
} |
1141 |
++modCount; |
1142 |
++size; |
1143 |
afterNodeInsertion(true); |
1144 |
return v; |
1145 |
} |
1146 |
|
1147 |
public V computeIfPresent(K key, |
1148 |
BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
1149 |
if (remappingFunction == null) |
1150 |
throw new NullPointerException(); |
1151 |
Node<K,V> e; V oldValue; |
1152 |
int hash = hash(key); |
1153 |
if ((e = getNode(hash, key)) != null && |
1154 |
(oldValue = e.value) != null) { |
1155 |
V v = remappingFunction.apply(key, oldValue); |
1156 |
if (v != null) { |
1157 |
e.value = v; |
1158 |
afterNodeAccess(e); |
1159 |
return v; |
1160 |
} |
1161 |
else |
1162 |
removeNode(hash, key, null, false, true); |
1163 |
} |
1164 |
return null; |
1165 |
} |
1166 |
|
1167 |
@Override |
1168 |
public V compute(K key, |
1169 |
BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
1170 |
if (remappingFunction == null) |
1171 |
throw new NullPointerException(); |
1172 |
int hash = hash(key); |
1173 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1174 |
int binCount = 0; |
1175 |
TreeNode<K,V> t = null; |
1176 |
Node<K,V> old = null; |
1177 |
if (size > threshold || (tab = table) == null || |
1178 |
(n = tab.length) == 0) |
1179 |
n = (tab = resize()).length; |
1180 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1181 |
if (first instanceof TreeNode) |
1182 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1183 |
else { |
1184 |
Node<K,V> e = first; K k; |
1185 |
do { |
1186 |
if (e.hash == hash && |
1187 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1188 |
old = e; |
1189 |
break; |
1190 |
} |
1191 |
++binCount; |
1192 |
} while ((e = e.next) != null); |
1193 |
} |
1194 |
} |
1195 |
V oldValue = (old == null) ? null : old.value; |
1196 |
V v = remappingFunction.apply(key, oldValue); |
1197 |
if (old != null) { |
1198 |
if (v != null) { |
1199 |
old.value = v; |
1200 |
afterNodeAccess(old); |
1201 |
} |
1202 |
else |
1203 |
removeNode(hash, key, null, false, true); |
1204 |
} |
1205 |
else if (v != null) { |
1206 |
if (t != null) |
1207 |
t.putTreeVal(this, tab, hash, key, v); |
1208 |
else { |
1209 |
tab[i] = newNode(hash, key, v, first); |
1210 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1211 |
treeifyBin(tab, hash); |
1212 |
} |
1213 |
++modCount; |
1214 |
++size; |
1215 |
afterNodeInsertion(true); |
1216 |
} |
1217 |
return v; |
1218 |
} |
1219 |
|
1220 |
@Override |
1221 |
public V merge(K key, V value, |
1222 |
BiFunction<? super V, ? super V, ? extends V> remappingFunction) { |
1223 |
if (value == null) |
1224 |
throw new NullPointerException(); |
1225 |
if (remappingFunction == null) |
1226 |
throw new NullPointerException(); |
1227 |
int hash = hash(key); |
1228 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1229 |
int binCount = 0; |
1230 |
TreeNode<K,V> t = null; |
1231 |
Node<K,V> old = null; |
1232 |
if (size > threshold || (tab = table) == null || |
1233 |
(n = tab.length) == 0) |
1234 |
n = (tab = resize()).length; |
1235 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1236 |
if (first instanceof TreeNode) |
1237 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1238 |
else { |
1239 |
Node<K,V> e = first; K k; |
1240 |
do { |
1241 |
if (e.hash == hash && |
1242 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1243 |
old = e; |
1244 |
break; |
1245 |
} |
1246 |
++binCount; |
1247 |
} while ((e = e.next) != null); |
1248 |
} |
1249 |
} |
1250 |
if (old != null) { |
1251 |
V v; |
1252 |
if (old.value != null) |
1253 |
v = remappingFunction.apply(old.value, value); |
1254 |
else |
1255 |
v = value; |
1256 |
if (v != null) { |
1257 |
old.value = v; |
1258 |
afterNodeAccess(old); |
1259 |
} |
1260 |
else |
1261 |
removeNode(hash, key, null, false, true); |
1262 |
return v; |
1263 |
} |
1264 |
if (value != null) { |
1265 |
if (t != null) |
1266 |
t.putTreeVal(this, tab, hash, key, value); |
1267 |
else { |
1268 |
tab[i] = newNode(hash, key, value, first); |
1269 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1270 |
treeifyBin(tab, hash); |
1271 |
} |
1272 |
++modCount; |
1273 |
++size; |
1274 |
afterNodeInsertion(true); |
1275 |
} |
1276 |
return value; |
1277 |
} |
1278 |
|
1279 |
@Override |
1280 |
public void forEach(BiConsumer<? super K, ? super V> action) { |
1281 |
Node<K,V>[] tab; |
1282 |
if (action == null) |
1283 |
throw new NullPointerException(); |
1284 |
if (size > 0 && (tab = table) != null) { |
1285 |
int mc = modCount; |
1286 |
for (int i = 0; i < tab.length; ++i) { |
1287 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) |
1288 |
action.accept(e.key, e.value); |
1289 |
} |
1290 |
if (modCount != mc) |
1291 |
throw new ConcurrentModificationException(); |
1292 |
} |
1293 |
} |
1294 |
|
1295 |
@Override |
1296 |
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { |
1297 |
Node<K,V>[] tab; |
1298 |
if (function == null) |
1299 |
throw new NullPointerException(); |
1300 |
if (size > 0 && (tab = table) != null) { |
1301 |
int mc = modCount; |
1302 |
for (int i = 0; i < tab.length; ++i) { |
1303 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) { |
1304 |
e.value = function.apply(e.key, e.value); |
1305 |
} |
1306 |
} |
1307 |
if (modCount != mc) |
1308 |
throw new ConcurrentModificationException(); |
1309 |
} |
1310 |
} |
1311 |
|
1312 |
/* ------------------------------------------------------------ */ |
1313 |
// Cloning and serialization |
1314 |
|
1315 |
/** |
1316 |
* Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and |
1317 |
* values themselves are not cloned. |
1318 |
* |
1319 |
* @return a shallow copy of this map |
1320 |
*/ |
1321 |
@SuppressWarnings("unchecked") |
1322 |
@Override |
1323 |
public Object clone() { |
1324 |
HashMap<K,V> result; |
1325 |
try { |
1326 |
result = (HashMap<K,V>)super.clone(); |
1327 |
} catch (CloneNotSupportedException e) { |
1328 |
// this shouldn't happen, since we are Cloneable |
1329 |
throw new InternalError(e); |
1330 |
} |
1331 |
result.reinitialize(); |
1332 |
result.putMapEntries(this, false); |
1333 |
return result; |
1334 |
} |
1335 |
|
1336 |
// These methods are also used when serializing HashSets |
1337 |
final float loadFactor() { return loadFactor; } |
1338 |
final int capacity() { |
1339 |
return (table != null) ? table.length : |
1340 |
(threshold > 0) ? threshold : |
1341 |
DEFAULT_INITIAL_CAPACITY; |
1342 |
} |
1343 |
|
1344 |
/** |
1345 |
* Save the state of the <tt>HashMap</tt> instance to a stream (i.e., |
1346 |
* serialize it). |
1347 |
* |
1348 |
* @serialData The <i>capacity</i> of the HashMap (the length of the |
1349 |
* bucket array) is emitted (int), followed by the |
1350 |
* <i>size</i> (an int, the number of key-value |
1351 |
* mappings), followed by the key (Object) and value (Object) |
1352 |
* for each key-value mapping. The key-value mappings are |
1353 |
* emitted in no particular order. |
1354 |
*/ |
1355 |
private void writeObject(java.io.ObjectOutputStream s) |
1356 |
throws IOException { |
1357 |
int buckets = capacity(); |
1358 |
// Write out the threshold, loadfactor, and any hidden stuff |
1359 |
s.defaultWriteObject(); |
1360 |
s.writeInt(buckets); |
1361 |
s.writeInt(size); |
1362 |
internalWriteEntries(s); |
1363 |
} |
1364 |
|
1365 |
/** |
1366 |
* Reconstitute the {@code HashMap} instance from a stream (i.e., |
1367 |
* deserialize it). |
1368 |
*/ |
1369 |
private void readObject(java.io.ObjectInputStream s) |
1370 |
throws IOException, ClassNotFoundException { |
1371 |
// Read in the threshold (ignored), loadfactor, and any hidden stuff |
1372 |
s.defaultReadObject(); |
1373 |
reinitialize(); |
1374 |
if (loadFactor <= 0 || Float.isNaN(loadFactor)) |
1375 |
throw new InvalidObjectException("Illegal load factor: " + |
1376 |
loadFactor); |
1377 |
s.readInt(); // Read and ignore number of buckets |
1378 |
int mappings = s.readInt(); // Read number of mappings (size) |
1379 |
if (mappings < 0) |
1380 |
throw new InvalidObjectException("Illegal mappings count: " + |
1381 |
mappings); |
1382 |
else if (mappings > 0) { // (if zero, use defaults) |
1383 |
// Size the table using given load factor only if within |
1384 |
// range of 0.25...4.0 |
1385 |
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); |
1386 |
float fc = (float)mappings / lf + 1.0f; |
1387 |
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? |
1388 |
DEFAULT_INITIAL_CAPACITY : |
1389 |
(fc >= MAXIMUM_CAPACITY) ? |
1390 |
MAXIMUM_CAPACITY : |
1391 |
tableSizeFor((int)fc)); |
1392 |
float ft = (float)cap * lf; |
1393 |
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? |
1394 |
(int)ft : Integer.MAX_VALUE); |
1395 |
@SuppressWarnings({"rawtypes","unchecked"}) |
1396 |
Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; |
1397 |
table = tab; |
1398 |
|
1399 |
// Read the keys and values, and put the mappings in the HashMap |
1400 |
for (int i = 0; i < mappings; i++) { |
1401 |
@SuppressWarnings("unchecked") |
1402 |
K key = (K) s.readObject(); |
1403 |
@SuppressWarnings("unchecked") |
1404 |
V value = (V) s.readObject(); |
1405 |
putVal(hash(key), key, value, false, false); |
1406 |
} |
1407 |
} |
1408 |
} |
1409 |
|
1410 |
/* ------------------------------------------------------------ */ |
1411 |
// iterators |
1412 |
|
1413 |
abstract class HashIterator { |
1414 |
Node<K,V> next; // next entry to return |
1415 |
Node<K,V> current; // current entry |
1416 |
int expectedModCount; // for fast-fail |
1417 |
int index; // current slot |
1418 |
|
1419 |
HashIterator() { |
1420 |
expectedModCount = modCount; |
1421 |
Node<K,V>[] t = table; |
1422 |
current = next = null; |
1423 |
index = 0; |
1424 |
if (t != null && size > 0) { // advance to first entry |
1425 |
do {} while (index < t.length && (next = t[index++]) == null); |
1426 |
} |
1427 |
} |
1428 |
|
1429 |
public final boolean hasNext() { |
1430 |
return next != null; |
1431 |
} |
1432 |
|
1433 |
final Node<K,V> nextNode() { |
1434 |
Node<K,V>[] t; |
1435 |
Node<K,V> e = next; |
1436 |
if (modCount != expectedModCount) |
1437 |
throw new ConcurrentModificationException(); |
1438 |
if (e == null) |
1439 |
throw new NoSuchElementException(); |
1440 |
if ((next = (current = e).next) == null && (t = table) != null) { |
1441 |
do {} while (index < t.length && (next = t[index++]) == null); |
1442 |
} |
1443 |
return e; |
1444 |
} |
1445 |
|
1446 |
public final void remove() { |
1447 |
Node<K,V> p = current; |
1448 |
if (p == null) |
1449 |
throw new IllegalStateException(); |
1450 |
if (modCount != expectedModCount) |
1451 |
throw new ConcurrentModificationException(); |
1452 |
current = null; |
1453 |
K key = p.key; |
1454 |
removeNode(hash(key), key, null, false, false); |
1455 |
expectedModCount = modCount; |
1456 |
} |
1457 |
} |
1458 |
|
1459 |
final class KeyIterator extends HashIterator |
1460 |
implements Iterator<K> { |
1461 |
public final K next() { return nextNode().key; } |
1462 |
} |
1463 |
|
1464 |
final class ValueIterator extends HashIterator |
1465 |
implements Iterator<V> { |
1466 |
public final V next() { return nextNode().value; } |
1467 |
} |
1468 |
|
1469 |
final class EntryIterator extends HashIterator |
1470 |
implements Iterator<Map.Entry<K,V>> { |
1471 |
public final Map.Entry<K,V> next() { return nextNode(); } |
1472 |
} |
1473 |
|
1474 |
/* ------------------------------------------------------------ */ |
1475 |
// spliterators |
1476 |
|
1477 |
static class HashMapSpliterator<K,V> { |
1478 |
final HashMap<K,V> map; |
1479 |
Node<K,V> current; // current node |
1480 |
int index; // current index, modified on advance/split |
1481 |
int fence; // one past last index |
1482 |
int est; // size estimate |
1483 |
int expectedModCount; // for comodification checks |
1484 |
|
1485 |
HashMapSpliterator(HashMap<K,V> m, int origin, |
1486 |
int fence, int est, |
1487 |
int expectedModCount) { |
1488 |
this.map = m; |
1489 |
this.index = origin; |
1490 |
this.fence = fence; |
1491 |
this.est = est; |
1492 |
this.expectedModCount = expectedModCount; |
1493 |
} |
1494 |
|
1495 |
final int getFence() { // initialize fence and size on first use |
1496 |
int hi; |
1497 |
if ((hi = fence) < 0) { |
1498 |
HashMap<K,V> m = map; |
1499 |
est = m.size; |
1500 |
expectedModCount = m.modCount; |
1501 |
Node<K,V>[] tab = m.table; |
1502 |
hi = fence = (tab == null) ? 0 : tab.length; |
1503 |
} |
1504 |
return hi; |
1505 |
} |
1506 |
|
1507 |
public final long estimateSize() { |
1508 |
getFence(); // force init |
1509 |
return (long) est; |
1510 |
} |
1511 |
} |
1512 |
|
1513 |
static final class KeySpliterator<K,V> |
1514 |
extends HashMapSpliterator<K,V> |
1515 |
implements Spliterator<K> { |
1516 |
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1517 |
int expectedModCount) { |
1518 |
super(m, origin, fence, est, expectedModCount); |
1519 |
} |
1520 |
|
1521 |
public KeySpliterator<K,V> trySplit() { |
1522 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1523 |
return (lo >= mid || current != null) ? null : |
1524 |
new KeySpliterator<>(map, lo, index = mid, est >>>= 1, |
1525 |
expectedModCount); |
1526 |
} |
1527 |
|
1528 |
public void forEachRemaining(Consumer<? super K> action) { |
1529 |
int i, hi, mc; |
1530 |
if (action == null) |
1531 |
throw new NullPointerException(); |
1532 |
HashMap<K,V> m = map; |
1533 |
Node<K,V>[] tab = m.table; |
1534 |
if ((hi = fence) < 0) { |
1535 |
mc = expectedModCount = m.modCount; |
1536 |
hi = fence = (tab == null) ? 0 : tab.length; |
1537 |
} |
1538 |
else |
1539 |
mc = expectedModCount; |
1540 |
if (tab != null && tab.length >= hi && |
1541 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1542 |
Node<K,V> p = current; |
1543 |
current = null; |
1544 |
do { |
1545 |
if (p == null) |
1546 |
p = tab[i++]; |
1547 |
else { |
1548 |
action.accept(p.key); |
1549 |
p = p.next; |
1550 |
} |
1551 |
} while (p != null || i < hi); |
1552 |
if (m.modCount != mc) |
1553 |
throw new ConcurrentModificationException(); |
1554 |
} |
1555 |
} |
1556 |
|
1557 |
public boolean tryAdvance(Consumer<? super K> action) { |
1558 |
int hi; |
1559 |
if (action == null) |
1560 |
throw new NullPointerException(); |
1561 |
Node<K,V>[] tab = map.table; |
1562 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1563 |
while (current != null || index < hi) { |
1564 |
if (current == null) |
1565 |
current = tab[index++]; |
1566 |
else { |
1567 |
K k = current.key; |
1568 |
current = current.next; |
1569 |
action.accept(k); |
1570 |
if (map.modCount != expectedModCount) |
1571 |
throw new ConcurrentModificationException(); |
1572 |
return true; |
1573 |
} |
1574 |
} |
1575 |
} |
1576 |
return false; |
1577 |
} |
1578 |
|
1579 |
public int characteristics() { |
1580 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | |
1581 |
Spliterator.DISTINCT; |
1582 |
} |
1583 |
} |
1584 |
|
1585 |
static final class ValueSpliterator<K,V> |
1586 |
extends HashMapSpliterator<K,V> |
1587 |
implements Spliterator<V> { |
1588 |
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1589 |
int expectedModCount) { |
1590 |
super(m, origin, fence, est, expectedModCount); |
1591 |
} |
1592 |
|
1593 |
public ValueSpliterator<K,V> trySplit() { |
1594 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1595 |
return (lo >= mid || current != null) ? null : |
1596 |
new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, |
1597 |
expectedModCount); |
1598 |
} |
1599 |
|
1600 |
public void forEachRemaining(Consumer<? super V> action) { |
1601 |
int i, hi, mc; |
1602 |
if (action == null) |
1603 |
throw new NullPointerException(); |
1604 |
HashMap<K,V> m = map; |
1605 |
Node<K,V>[] tab = m.table; |
1606 |
if ((hi = fence) < 0) { |
1607 |
mc = expectedModCount = m.modCount; |
1608 |
hi = fence = (tab == null) ? 0 : tab.length; |
1609 |
} |
1610 |
else |
1611 |
mc = expectedModCount; |
1612 |
if (tab != null && tab.length >= hi && |
1613 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1614 |
Node<K,V> p = current; |
1615 |
current = null; |
1616 |
do { |
1617 |
if (p == null) |
1618 |
p = tab[i++]; |
1619 |
else { |
1620 |
action.accept(p.value); |
1621 |
p = p.next; |
1622 |
} |
1623 |
} while (p != null || i < hi); |
1624 |
if (m.modCount != mc) |
1625 |
throw new ConcurrentModificationException(); |
1626 |
} |
1627 |
} |
1628 |
|
1629 |
public boolean tryAdvance(Consumer<? super V> action) { |
1630 |
int hi; |
1631 |
if (action == null) |
1632 |
throw new NullPointerException(); |
1633 |
Node<K,V>[] tab = map.table; |
1634 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1635 |
while (current != null || index < hi) { |
1636 |
if (current == null) |
1637 |
current = tab[index++]; |
1638 |
else { |
1639 |
V v = current.value; |
1640 |
current = current.next; |
1641 |
action.accept(v); |
1642 |
if (map.modCount != expectedModCount) |
1643 |
throw new ConcurrentModificationException(); |
1644 |
return true; |
1645 |
} |
1646 |
} |
1647 |
} |
1648 |
return false; |
1649 |
} |
1650 |
|
1651 |
public int characteristics() { |
1652 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); |
1653 |
} |
1654 |
} |
1655 |
|
1656 |
static final class EntrySpliterator<K,V> |
1657 |
extends HashMapSpliterator<K,V> |
1658 |
implements Spliterator<Map.Entry<K,V>> { |
1659 |
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1660 |
int expectedModCount) { |
1661 |
super(m, origin, fence, est, expectedModCount); |
1662 |
} |
1663 |
|
1664 |
public EntrySpliterator<K,V> trySplit() { |
1665 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1666 |
return (lo >= mid || current != null) ? null : |
1667 |
new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, |
1668 |
expectedModCount); |
1669 |
} |
1670 |
|
1671 |
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { |
1672 |
int i, hi, mc; |
1673 |
if (action == null) |
1674 |
throw new NullPointerException(); |
1675 |
HashMap<K,V> m = map; |
1676 |
Node<K,V>[] tab = m.table; |
1677 |
if ((hi = fence) < 0) { |
1678 |
mc = expectedModCount = m.modCount; |
1679 |
hi = fence = (tab == null) ? 0 : tab.length; |
1680 |
} |
1681 |
else |
1682 |
mc = expectedModCount; |
1683 |
if (tab != null && tab.length >= hi && |
1684 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1685 |
Node<K,V> p = current; |
1686 |
current = null; |
1687 |
do { |
1688 |
if (p == null) |
1689 |
p = tab[i++]; |
1690 |
else { |
1691 |
action.accept(p); |
1692 |
p = p.next; |
1693 |
} |
1694 |
} while (p != null || i < hi); |
1695 |
if (m.modCount != mc) |
1696 |
throw new ConcurrentModificationException(); |
1697 |
} |
1698 |
} |
1699 |
|
1700 |
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { |
1701 |
int hi; |
1702 |
if (action == null) |
1703 |
throw new NullPointerException(); |
1704 |
Node<K,V>[] tab = map.table; |
1705 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1706 |
while (current != null || index < hi) { |
1707 |
if (current == null) |
1708 |
current = tab[index++]; |
1709 |
else { |
1710 |
Node<K,V> e = current; |
1711 |
current = current.next; |
1712 |
action.accept(e); |
1713 |
if (map.modCount != expectedModCount) |
1714 |
throw new ConcurrentModificationException(); |
1715 |
return true; |
1716 |
} |
1717 |
} |
1718 |
} |
1719 |
return false; |
1720 |
} |
1721 |
|
1722 |
public int characteristics() { |
1723 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | |
1724 |
Spliterator.DISTINCT; |
1725 |
} |
1726 |
} |
1727 |
|
1728 |
/* ------------------------------------------------------------ */ |
1729 |
// LinkedHashMap support |
1730 |
|
1731 |
|
1732 |
/* |
1733 |
* The following package-protected methods are designed to be |
1734 |
* overridden by LinkedHashMap, but not by any other subclass. |
1735 |
* Nearly all other internal methods are also package-protected |
1736 |
* but are declared final, so can be used by LinkedHashMap, view |
1737 |
* classes, and HashSet. |
1738 |
*/ |
1739 |
|
1740 |
// Create a regular (non-tree) node |
1741 |
Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { |
1742 |
return new Node<>(hash, key, value, next); |
1743 |
} |
1744 |
|
1745 |
// For conversion from TreeNodes to plain nodes |
1746 |
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { |
1747 |
return new Node<>(p.hash, p.key, p.value, next); |
1748 |
} |
1749 |
|
1750 |
// Create a tree bin node |
1751 |
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { |
1752 |
return new TreeNode<>(hash, key, value, next); |
1753 |
} |
1754 |
|
1755 |
// For treeifyBin |
1756 |
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { |
1757 |
return new TreeNode<>(p.hash, p.key, p.value, next); |
1758 |
} |
1759 |
|
1760 |
/** |
1761 |
* Reset to initial default state. Called by clone and readObject. |
1762 |
*/ |
1763 |
void reinitialize() { |
1764 |
table = null; |
1765 |
entrySet = null; |
1766 |
keySet = null; |
1767 |
values = null; |
1768 |
modCount = 0; |
1769 |
threshold = 0; |
1770 |
size = 0; |
1771 |
} |
1772 |
|
1773 |
// Callbacks to allow LinkedHashMap post-actions |
1774 |
void afterNodeAccess(Node<K,V> p) { } |
1775 |
void afterNodeInsertion(boolean evict) { } |
1776 |
void afterNodeRemoval(Node<K,V> p) { } |
1777 |
|
1778 |
// Called only from writeObject, to ensure compatible ordering. |
1779 |
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { |
1780 |
Node<K,V>[] tab; |
1781 |
if (size > 0 && (tab = table) != null) { |
1782 |
for (int i = 0; i < tab.length; ++i) { |
1783 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) { |
1784 |
s.writeObject(e.key); |
1785 |
s.writeObject(e.value); |
1786 |
} |
1787 |
} |
1788 |
} |
1789 |
} |
1790 |
|
1791 |
/* ------------------------------------------------------------ */ |
1792 |
// Tree bins |
1793 |
|
1794 |
/** |
1795 |
* Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn |
1796 |
* extends Node) so can be used as extension of either regular or |
1797 |
* linked node. |
1798 |
*/ |
1799 |
static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> { |
1800 |
TreeNode<K,V> parent; // red-black tree links |
1801 |
TreeNode<K,V> left; |
1802 |
TreeNode<K,V> right; |
1803 |
TreeNode<K,V> prev; // needed to unlink next upon deletion |
1804 |
boolean red; |
1805 |
TreeNode(int hash, K key, V val, Node<K,V> next) { |
1806 |
super(hash, key, val, next); |
1807 |
} |
1808 |
|
1809 |
/** |
1810 |
* Returns root of tree containing this node. |
1811 |
*/ |
1812 |
final TreeNode<K,V> root() { |
1813 |
for (TreeNode<K,V> r = this, p;;) { |
1814 |
if ((p = r.parent) == null) |
1815 |
return r; |
1816 |
r = p; |
1817 |
} |
1818 |
} |
1819 |
|
1820 |
/** |
1821 |
* Ensures that the given root is the first node of its bin. |
1822 |
*/ |
1823 |
static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { |
1824 |
int n; |
1825 |
if (root != null && tab != null && (n = tab.length) > 0) { |
1826 |
int index = (n - 1) & root.hash; |
1827 |
TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; |
1828 |
if (root != first) { |
1829 |
Node<K,V> rn; |
1830 |
tab[index] = root; |
1831 |
TreeNode<K,V> rp = root.prev; |
1832 |
if ((rn = root.next) != null) |
1833 |
((TreeNode<K,V>)rn).prev = rp; |
1834 |
if (rp != null) |
1835 |
rp.next = rn; |
1836 |
if (first != null) |
1837 |
first.prev = root; |
1838 |
root.next = first; |
1839 |
root.prev = null; |
1840 |
} |
1841 |
assert checkInvariants(root); |
1842 |
} |
1843 |
} |
1844 |
|
1845 |
/** |
1846 |
* Finds the node starting at root p with the given hash and key. |
1847 |
* The kc argument caches comparableClassFor(key) upon first use |
1848 |
* comparing keys. |
1849 |
*/ |
1850 |
final TreeNode<K,V> find(int h, Object k, Class<?> kc) { |
1851 |
TreeNode<K,V> p = this; |
1852 |
do { |
1853 |
int ph, dir; K pk; |
1854 |
TreeNode<K,V> pl = p.left, pr = p.right, q; |
1855 |
if ((ph = p.hash) > h) |
1856 |
p = pl; |
1857 |
else if (ph < h) |
1858 |
p = pr; |
1859 |
else if ((pk = p.key) == k || (k != null && k.equals(pk))) |
1860 |
return p; |
1861 |
else if (pl == null) |
1862 |
p = pr; |
1863 |
else if (pr == null) |
1864 |
p = pl; |
1865 |
else if ((kc != null || |
1866 |
(kc = comparableClassFor(k)) != null) && |
1867 |
(dir = compareComparables(kc, k, pk)) != 0) |
1868 |
p = (dir < 0) ? pl : pr; |
1869 |
else if ((q = pr.find(h, k, kc)) != null) |
1870 |
return q; |
1871 |
else |
1872 |
p = pl; |
1873 |
} while (p != null); |
1874 |
return null; |
1875 |
} |
1876 |
|
1877 |
/** |
1878 |
* Calls find for root node. |
1879 |
*/ |
1880 |
final TreeNode<K,V> getTreeNode(int h, Object k) { |
1881 |
return ((parent != null) ? root() : this).find(h, k, null); |
1882 |
} |
1883 |
|
1884 |
/** |
1885 |
* Tie-breaking utility for ordering insertions when equal |
1886 |
* hashCodes and non-comparable. We don't require a total |
1887 |
* order, just a consistent insertion rule to maintain |
1888 |
* equivalence across rebalancings. Tie-breaking further than |
1889 |
* necessary simplifies testing a bit. |
1890 |
*/ |
1891 |
static int tieBreakOrder(Object a, Object b) { |
1892 |
int d; |
1893 |
if (a == null || b == null || |
1894 |
(d = a.getClass().getName(). |
1895 |
compareTo(b.getClass().getName())) == 0) |
1896 |
d = (System.identityHashCode(a) <= System.identityHashCode(b) ? |
1897 |
-1 : 1); |
1898 |
return d; |
1899 |
} |
1900 |
|
1901 |
/** |
1902 |
* Forms tree of the nodes linked from this node. |
1903 |
* @return root of tree |
1904 |
*/ |
1905 |
final void treeify(Node<K,V>[] tab) { |
1906 |
TreeNode<K,V> root = null; |
1907 |
for (TreeNode<K,V> x = this, next; x != null; x = next) { |
1908 |
next = (TreeNode<K,V>)x.next; |
1909 |
x.left = x.right = null; |
1910 |
if (root == null) { |
1911 |
x.parent = null; |
1912 |
x.red = false; |
1913 |
root = x; |
1914 |
} |
1915 |
else { |
1916 |
K k = x.key; |
1917 |
int h = x.hash; |
1918 |
Class<?> kc = null; |
1919 |
for (TreeNode<K,V> p = root;;) { |
1920 |
int dir, ph; |
1921 |
K pk = p.key; |
1922 |
if ((ph = p.hash) > h) |
1923 |
dir = -1; |
1924 |
else if (ph < h) |
1925 |
dir = 1; |
1926 |
else if ((kc == null && |
1927 |
(kc = comparableClassFor(k)) == null) || |
1928 |
(dir = compareComparables(kc, k, pk)) == 0) |
1929 |
dir = tieBreakOrder(k, pk); |
1930 |
|
1931 |
TreeNode<K,V> xp = p; |
1932 |
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
1933 |
x.parent = xp; |
1934 |
if (dir <= 0) |
1935 |
xp.left = x; |
1936 |
else |
1937 |
xp.right = x; |
1938 |
root = balanceInsertion(root, x); |
1939 |
break; |
1940 |
} |
1941 |
} |
1942 |
} |
1943 |
} |
1944 |
moveRootToFront(tab, root); |
1945 |
} |
1946 |
|
1947 |
/** |
1948 |
* Returns a list of non-TreeNodes replacing those linked from |
1949 |
* this node. |
1950 |
*/ |
1951 |
final Node<K,V> untreeify(HashMap<K,V> map) { |
1952 |
Node<K,V> hd = null, tl = null; |
1953 |
for (Node<K,V> q = this; q != null; q = q.next) { |
1954 |
Node<K,V> p = map.replacementNode(q, null); |
1955 |
if (tl == null) |
1956 |
hd = p; |
1957 |
else |
1958 |
tl.next = p; |
1959 |
tl = p; |
1960 |
} |
1961 |
return hd; |
1962 |
} |
1963 |
|
1964 |
/** |
1965 |
* Tree version of putVal. |
1966 |
*/ |
1967 |
final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, |
1968 |
int h, K k, V v) { |
1969 |
Class<?> kc = null; |
1970 |
boolean searched = false; |
1971 |
TreeNode<K,V> root = (parent != null) ? root() : this; |
1972 |
for (TreeNode<K,V> p = root;;) { |
1973 |
int dir, ph; K pk; |
1974 |
if ((ph = p.hash) > h) |
1975 |
dir = -1; |
1976 |
else if (ph < h) |
1977 |
dir = 1; |
1978 |
else if ((pk = p.key) == k || (k != null && k.equals(pk))) |
1979 |
return p; |
1980 |
else if ((kc == null && |
1981 |
(kc = comparableClassFor(k)) == null) || |
1982 |
(dir = compareComparables(kc, k, pk)) == 0) { |
1983 |
if (!searched) { |
1984 |
TreeNode<K,V> q, ch; |
1985 |
searched = true; |
1986 |
if (((ch = p.left) != null && |
1987 |
(q = ch.find(h, k, kc)) != null) || |
1988 |
((ch = p.right) != null && |
1989 |
(q = ch.find(h, k, kc)) != null)) |
1990 |
return q; |
1991 |
} |
1992 |
dir = tieBreakOrder(k, pk); |
1993 |
} |
1994 |
|
1995 |
TreeNode<K,V> xp = p; |
1996 |
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
1997 |
Node<K,V> xpn = xp.next; |
1998 |
TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); |
1999 |
if (dir <= 0) |
2000 |
xp.left = x; |
2001 |
else |
2002 |
xp.right = x; |
2003 |
xp.next = x; |
2004 |
x.parent = x.prev = xp; |
2005 |
if (xpn != null) |
2006 |
((TreeNode<K,V>)xpn).prev = x; |
2007 |
moveRootToFront(tab, balanceInsertion(root, x)); |
2008 |
return null; |
2009 |
} |
2010 |
} |
2011 |
} |
2012 |
|
2013 |
/** |
2014 |
* Removes the given node, that must be present before this call. |
2015 |
* This is messier than typical red-black deletion code because we |
2016 |
* cannot swap the contents of an interior node with a leaf |
2017 |
* successor that is pinned by "next" pointers that are accessible |
2018 |
* independently during traversal. So instead we swap the tree |
2019 |
* linkages. If the current tree appears to have too few nodes, |
2020 |
* the bin is converted back to a plain bin. (The test triggers |
2021 |
* somewhere between 2 and 6 nodes, depending on tree structure). |
2022 |
*/ |
2023 |
final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, |
2024 |
boolean movable) { |
2025 |
int n; |
2026 |
if (tab == null || (n = tab.length) == 0) |
2027 |
return; |
2028 |
int index = (n - 1) & hash; |
2029 |
TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; |
2030 |
TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; |
2031 |
if (pred == null) |
2032 |
tab[index] = first = succ; |
2033 |
else |
2034 |
pred.next = succ; |
2035 |
if (succ != null) |
2036 |
succ.prev = pred; |
2037 |
if (first == null) |
2038 |
return; |
2039 |
if (root.parent != null) |
2040 |
root = root.root(); |
2041 |
if (root == null |
2042 |
|| (movable |
2043 |
&& (root.right == null |
2044 |
|| (rl = root.left) == null |
2045 |
|| rl.left == null))) { |
2046 |
tab[index] = first.untreeify(map); // too small |
2047 |
return; |
2048 |
} |
2049 |
TreeNode<K,V> p = this, pl = left, pr = right, replacement; |
2050 |
if (pl != null && pr != null) { |
2051 |
TreeNode<K,V> s = pr, sl; |
2052 |
while ((sl = s.left) != null) // find successor |
2053 |
s = sl; |
2054 |
boolean c = s.red; s.red = p.red; p.red = c; // swap colors |
2055 |
TreeNode<K,V> sr = s.right; |
2056 |
TreeNode<K,V> pp = p.parent; |
2057 |
if (s == pr) { // p was s's direct parent |
2058 |
p.parent = s; |
2059 |
s.right = p; |
2060 |
} |
2061 |
else { |
2062 |
TreeNode<K,V> sp = s.parent; |
2063 |
if ((p.parent = sp) != null) { |
2064 |
if (s == sp.left) |
2065 |
sp.left = p; |
2066 |
else |
2067 |
sp.right = p; |
2068 |
} |
2069 |
if ((s.right = pr) != null) |
2070 |
pr.parent = s; |
2071 |
} |
2072 |
p.left = null; |
2073 |
if ((p.right = sr) != null) |
2074 |
sr.parent = p; |
2075 |
if ((s.left = pl) != null) |
2076 |
pl.parent = s; |
2077 |
if ((s.parent = pp) == null) |
2078 |
root = s; |
2079 |
else if (p == pp.left) |
2080 |
pp.left = s; |
2081 |
else |
2082 |
pp.right = s; |
2083 |
if (sr != null) |
2084 |
replacement = sr; |
2085 |
else |
2086 |
replacement = p; |
2087 |
} |
2088 |
else if (pl != null) |
2089 |
replacement = pl; |
2090 |
else if (pr != null) |
2091 |
replacement = pr; |
2092 |
else |
2093 |
replacement = p; |
2094 |
if (replacement != p) { |
2095 |
TreeNode<K,V> pp = replacement.parent = p.parent; |
2096 |
if (pp == null) |
2097 |
root = replacement; |
2098 |
else if (p == pp.left) |
2099 |
pp.left = replacement; |
2100 |
else |
2101 |
pp.right = replacement; |
2102 |
p.left = p.right = p.parent = null; |
2103 |
} |
2104 |
|
2105 |
TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); |
2106 |
|
2107 |
if (replacement == p) { // detach |
2108 |
TreeNode<K,V> pp = p.parent; |
2109 |
p.parent = null; |
2110 |
if (pp != null) { |
2111 |
if (p == pp.left) |
2112 |
pp.left = null; |
2113 |
else if (p == pp.right) |
2114 |
pp.right = null; |
2115 |
} |
2116 |
} |
2117 |
if (movable) |
2118 |
moveRootToFront(tab, r); |
2119 |
} |
2120 |
|
2121 |
/** |
2122 |
* Splits nodes in a tree bin into lower and upper tree bins, |
2123 |
* or untreeifies if now too small. Called only from resize; |
2124 |
* see above discussion about split bits and indices. |
2125 |
* |
2126 |
* @param map the map |
2127 |
* @param tab the table for recording bin heads |
2128 |
* @param index the index of the table being split |
2129 |
* @param bit the bit of hash to split on |
2130 |
*/ |
2131 |
final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) { |
2132 |
TreeNode<K,V> b = this; |
2133 |
// Relink into lo and hi lists, preserving order |
2134 |
TreeNode<K,V> loHead = null, loTail = null; |
2135 |
TreeNode<K,V> hiHead = null, hiTail = null; |
2136 |
int lc = 0, hc = 0; |
2137 |
for (TreeNode<K,V> e = b, next; e != null; e = next) { |
2138 |
next = (TreeNode<K,V>)e.next; |
2139 |
e.next = null; |
2140 |
if ((e.hash & bit) == 0) { |
2141 |
if ((e.prev = loTail) == null) |
2142 |
loHead = e; |
2143 |
else |
2144 |
loTail.next = e; |
2145 |
loTail = e; |
2146 |
++lc; |
2147 |
} |
2148 |
else { |
2149 |
if ((e.prev = hiTail) == null) |
2150 |
hiHead = e; |
2151 |
else |
2152 |
hiTail.next = e; |
2153 |
hiTail = e; |
2154 |
++hc; |
2155 |
} |
2156 |
} |
2157 |
|
2158 |
if (loHead != null) { |
2159 |
if (lc <= UNTREEIFY_THRESHOLD) |
2160 |
tab[index] = loHead.untreeify(map); |
2161 |
else { |
2162 |
tab[index] = loHead; |
2163 |
if (hiHead != null) // (else is already treeified) |
2164 |
loHead.treeify(tab); |
2165 |
} |
2166 |
} |
2167 |
if (hiHead != null) { |
2168 |
if (hc <= UNTREEIFY_THRESHOLD) |
2169 |
tab[index + bit] = hiHead.untreeify(map); |
2170 |
else { |
2171 |
tab[index + bit] = hiHead; |
2172 |
if (loHead != null) |
2173 |
hiHead.treeify(tab); |
2174 |
} |
2175 |
} |
2176 |
} |
2177 |
|
2178 |
/* ------------------------------------------------------------ */ |
2179 |
// Red-black tree methods, all adapted from CLR |
2180 |
|
2181 |
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, |
2182 |
TreeNode<K,V> p) { |
2183 |
TreeNode<K,V> r, pp, rl; |
2184 |
if (p != null && (r = p.right) != null) { |
2185 |
if ((rl = p.right = r.left) != null) |
2186 |
rl.parent = p; |
2187 |
if ((pp = r.parent = p.parent) == null) |
2188 |
(root = r).red = false; |
2189 |
else if (pp.left == p) |
2190 |
pp.left = r; |
2191 |
else |
2192 |
pp.right = r; |
2193 |
r.left = p; |
2194 |
p.parent = r; |
2195 |
} |
2196 |
return root; |
2197 |
} |
2198 |
|
2199 |
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, |
2200 |
TreeNode<K,V> p) { |
2201 |
TreeNode<K,V> l, pp, lr; |
2202 |
if (p != null && (l = p.left) != null) { |
2203 |
if ((lr = p.left = l.right) != null) |
2204 |
lr.parent = p; |
2205 |
if ((pp = l.parent = p.parent) == null) |
2206 |
(root = l).red = false; |
2207 |
else if (pp.right == p) |
2208 |
pp.right = l; |
2209 |
else |
2210 |
pp.left = l; |
2211 |
l.right = p; |
2212 |
p.parent = l; |
2213 |
} |
2214 |
return root; |
2215 |
} |
2216 |
|
2217 |
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, |
2218 |
TreeNode<K,V> x) { |
2219 |
x.red = true; |
2220 |
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { |
2221 |
if ((xp = x.parent) == null) { |
2222 |
x.red = false; |
2223 |
return x; |
2224 |
} |
2225 |
else if (!xp.red || (xpp = xp.parent) == null) |
2226 |
return root; |
2227 |
if (xp == (xppl = xpp.left)) { |
2228 |
if ((xppr = xpp.right) != null && xppr.red) { |
2229 |
xppr.red = false; |
2230 |
xp.red = false; |
2231 |
xpp.red = true; |
2232 |
x = xpp; |
2233 |
} |
2234 |
else { |
2235 |
if (x == xp.right) { |
2236 |
root = rotateLeft(root, x = xp); |
2237 |
xpp = (xp = x.parent) == null ? null : xp.parent; |
2238 |
} |
2239 |
if (xp != null) { |
2240 |
xp.red = false; |
2241 |
if (xpp != null) { |
2242 |
xpp.red = true; |
2243 |
root = rotateRight(root, xpp); |
2244 |
} |
2245 |
} |
2246 |
} |
2247 |
} |
2248 |
else { |
2249 |
if (xppl != null && xppl.red) { |
2250 |
xppl.red = false; |
2251 |
xp.red = false; |
2252 |
xpp.red = true; |
2253 |
x = xpp; |
2254 |
} |
2255 |
else { |
2256 |
if (x == xp.left) { |
2257 |
root = rotateRight(root, x = xp); |
2258 |
xpp = (xp = x.parent) == null ? null : xp.parent; |
2259 |
} |
2260 |
if (xp != null) { |
2261 |
xp.red = false; |
2262 |
if (xpp != null) { |
2263 |
xpp.red = true; |
2264 |
root = rotateLeft(root, xpp); |
2265 |
} |
2266 |
} |
2267 |
} |
2268 |
} |
2269 |
} |
2270 |
} |
2271 |
|
2272 |
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, |
2273 |
TreeNode<K,V> x) { |
2274 |
for (TreeNode<K,V> xp, xpl, xpr;;) { |
2275 |
if (x == null || x == root) |
2276 |
return root; |
2277 |
else if ((xp = x.parent) == null) { |
2278 |
x.red = false; |
2279 |
return x; |
2280 |
} |
2281 |
else if (x.red) { |
2282 |
x.red = false; |
2283 |
return root; |
2284 |
} |
2285 |
else if ((xpl = xp.left) == x) { |
2286 |
if ((xpr = xp.right) != null && xpr.red) { |
2287 |
xpr.red = false; |
2288 |
xp.red = true; |
2289 |
root = rotateLeft(root, xp); |
2290 |
xpr = (xp = x.parent) == null ? null : xp.right; |
2291 |
} |
2292 |
if (xpr == null) |
2293 |
x = xp; |
2294 |
else { |
2295 |
TreeNode<K,V> sl = xpr.left, sr = xpr.right; |
2296 |
if ((sr == null || !sr.red) && |
2297 |
(sl == null || !sl.red)) { |
2298 |
xpr.red = true; |
2299 |
x = xp; |
2300 |
} |
2301 |
else { |
2302 |
if (sr == null || !sr.red) { |
2303 |
if (sl != null) |
2304 |
sl.red = false; |
2305 |
xpr.red = true; |
2306 |
root = rotateRight(root, xpr); |
2307 |
xpr = (xp = x.parent) == null ? |
2308 |
null : xp.right; |
2309 |
} |
2310 |
if (xpr != null) { |
2311 |
xpr.red = (xp == null) ? false : xp.red; |
2312 |
if ((sr = xpr.right) != null) |
2313 |
sr.red = false; |
2314 |
} |
2315 |
if (xp != null) { |
2316 |
xp.red = false; |
2317 |
root = rotateLeft(root, xp); |
2318 |
} |
2319 |
x = root; |
2320 |
} |
2321 |
} |
2322 |
} |
2323 |
else { // symmetric |
2324 |
if (xpl != null && xpl.red) { |
2325 |
xpl.red = false; |
2326 |
xp.red = true; |
2327 |
root = rotateRight(root, xp); |
2328 |
xpl = (xp = x.parent) == null ? null : xp.left; |
2329 |
} |
2330 |
if (xpl == null) |
2331 |
x = xp; |
2332 |
else { |
2333 |
TreeNode<K,V> sl = xpl.left, sr = xpl.right; |
2334 |
if ((sl == null || !sl.red) && |
2335 |
(sr == null || !sr.red)) { |
2336 |
xpl.red = true; |
2337 |
x = xp; |
2338 |
} |
2339 |
else { |
2340 |
if (sl == null || !sl.red) { |
2341 |
if (sr != null) |
2342 |
sr.red = false; |
2343 |
xpl.red = true; |
2344 |
root = rotateLeft(root, xpl); |
2345 |
xpl = (xp = x.parent) == null ? |
2346 |
null : xp.left; |
2347 |
} |
2348 |
if (xpl != null) { |
2349 |
xpl.red = (xp == null) ? false : xp.red; |
2350 |
if ((sl = xpl.left) != null) |
2351 |
sl.red = false; |
2352 |
} |
2353 |
if (xp != null) { |
2354 |
xp.red = false; |
2355 |
root = rotateRight(root, xp); |
2356 |
} |
2357 |
x = root; |
2358 |
} |
2359 |
} |
2360 |
} |
2361 |
} |
2362 |
} |
2363 |
|
2364 |
/** |
2365 |
* Recursive invariant check |
2366 |
*/ |
2367 |
static <K,V> boolean checkInvariants(TreeNode<K,V> t) { |
2368 |
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, |
2369 |
tb = t.prev, tn = (TreeNode<K,V>)t.next; |
2370 |
if (tb != null && tb.next != t) |
2371 |
return false; |
2372 |
if (tn != null && tn.prev != t) |
2373 |
return false; |
2374 |
if (tp != null && t != tp.left && t != tp.right) |
2375 |
return false; |
2376 |
if (tl != null && (tl.parent != t || tl.hash > t.hash)) |
2377 |
return false; |
2378 |
if (tr != null && (tr.parent != t || tr.hash < t.hash)) |
2379 |
return false; |
2380 |
if (t.red && tl != null && tl.red && tr != null && tr.red) |
2381 |
return false; |
2382 |
if (tl != null && !checkInvariants(tl)) |
2383 |
return false; |
2384 |
if (tr != null && !checkInvariants(tr)) |
2385 |
return false; |
2386 |
return true; |
2387 |
} |
2388 |
} |
2389 |
|
2390 |
} |