1 |
/* |
2 |
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. |
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
5 |
* This code is free software; you can redistribute it and/or modify it |
6 |
* under the terms of the GNU General Public License version 2 only, as |
7 |
* published by the Free Software Foundation. Oracle designates this |
8 |
* particular file as subject to the "Classpath" exception as provided |
9 |
* by Oracle in the LICENSE file that accompanied this code. |
10 |
* |
11 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
12 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 |
* version 2 for more details (a copy is included in the LICENSE file that |
15 |
* accompanied this code). |
16 |
* |
17 |
* You should have received a copy of the GNU General Public License version |
18 |
* 2 along with this work; if not, write to the Free Software Foundation, |
19 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
20 |
* |
21 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
22 |
* or visit www.oracle.com if you need additional information or have any |
23 |
* questions. |
24 |
*/ |
25 |
|
26 |
package java.util; |
27 |
|
28 |
import java.io.IOException; |
29 |
import java.io.InvalidObjectException; |
30 |
import java.io.Serializable; |
31 |
import java.lang.reflect.ParameterizedType; |
32 |
import java.lang.reflect.Type; |
33 |
import java.util.function.BiConsumer; |
34 |
import java.util.function.BiFunction; |
35 |
import java.util.function.Consumer; |
36 |
import java.util.function.Function; |
37 |
// OPENJDK import jdk.internal.access.SharedSecrets; |
38 |
|
39 |
/** |
40 |
* Hash table based implementation of the {@code Map} interface. This |
41 |
* implementation provides all of the optional map operations, and permits |
42 |
* {@code null} values and the {@code null} key. (The {@code HashMap} |
43 |
* class is roughly equivalent to {@code Hashtable}, except that it is |
44 |
* unsynchronized and permits nulls.) This class makes no guarantees as to |
45 |
* the order of the map; in particular, it does not guarantee that the order |
46 |
* will remain constant over time. |
47 |
* |
48 |
* <p>This implementation provides constant-time performance for the basic |
49 |
* operations ({@code get} and {@code put}), assuming the hash function |
50 |
* disperses the elements properly among the buckets. Iteration over |
51 |
* collection views requires time proportional to the "capacity" of the |
52 |
* {@code HashMap} instance (the number of buckets) plus its size (the number |
53 |
* of key-value mappings). Thus, it's very important not to set the initial |
54 |
* capacity too high (or the load factor too low) if iteration performance is |
55 |
* important. |
56 |
* |
57 |
* <p>An instance of {@code HashMap} has two parameters that affect its |
58 |
* performance: <i>initial capacity</i> and <i>load factor</i>. The |
59 |
* <i>capacity</i> is the number of buckets in the hash table, and the initial |
60 |
* capacity is simply the capacity at the time the hash table is created. The |
61 |
* <i>load factor</i> is a measure of how full the hash table is allowed to |
62 |
* get before its capacity is automatically increased. When the number of |
63 |
* entries in the hash table exceeds the product of the load factor and the |
64 |
* current capacity, the hash table is <i>rehashed</i> (that is, internal data |
65 |
* structures are rebuilt) so that the hash table has approximately twice the |
66 |
* number of buckets. |
67 |
* |
68 |
* <p>As a general rule, the default load factor (.75) offers a good |
69 |
* tradeoff between time and space costs. Higher values decrease the |
70 |
* space overhead but increase the lookup cost (reflected in most of |
71 |
* the operations of the {@code HashMap} class, including |
72 |
* {@code get} and {@code put}). The expected number of entries in |
73 |
* the map and its load factor should be taken into account when |
74 |
* setting its initial capacity, so as to minimize the number of |
75 |
* rehash operations. If the initial capacity is greater than the |
76 |
* maximum number of entries divided by the load factor, no rehash |
77 |
* operations will ever occur. |
78 |
* |
79 |
* <p>If many mappings are to be stored in a {@code HashMap} |
80 |
* instance, creating it with a sufficiently large capacity will allow |
81 |
* the mappings to be stored more efficiently than letting it perform |
82 |
* automatic rehashing as needed to grow the table. Note that using |
83 |
* many keys with the same {@code hashCode()} is a sure way to slow |
84 |
* down performance of any hash table. To ameliorate impact, when keys |
85 |
* are {@link Comparable}, this class may use comparison order among |
86 |
* keys to help break ties. |
87 |
* |
88 |
* <p><strong>Note that this implementation is not synchronized.</strong> |
89 |
* If multiple threads access a hash map concurrently, and at least one of |
90 |
* the threads modifies the map structurally, it <i>must</i> be |
91 |
* synchronized externally. (A structural modification is any operation |
92 |
* that adds or deletes one or more mappings; merely changing the value |
93 |
* associated with a key that an instance already contains is not a |
94 |
* structural modification.) This is typically accomplished by |
95 |
* synchronizing on some object that naturally encapsulates the map. |
96 |
* |
97 |
* If no such object exists, the map should be "wrapped" using the |
98 |
* {@link Collections#synchronizedMap Collections.synchronizedMap} |
99 |
* method. This is best done at creation time, to prevent accidental |
100 |
* unsynchronized access to the map:<pre> |
101 |
* Map m = Collections.synchronizedMap(new HashMap(...));</pre> |
102 |
* |
103 |
* <p>The iterators returned by all of this class's "collection view methods" |
104 |
* are <i>fail-fast</i>: if the map is structurally modified at any time after |
105 |
* the iterator is created, in any way except through the iterator's own |
106 |
* {@code remove} method, the iterator will throw a |
107 |
* {@link ConcurrentModificationException}. Thus, in the face of concurrent |
108 |
* modification, the iterator fails quickly and cleanly, rather than risking |
109 |
* arbitrary, non-deterministic behavior at an undetermined time in the |
110 |
* future. |
111 |
* |
112 |
* <p>Note that the fail-fast behavior of an iterator cannot be guaranteed |
113 |
* as it is, generally speaking, impossible to make any hard guarantees in the |
114 |
* presence of unsynchronized concurrent modification. Fail-fast iterators |
115 |
* throw {@code ConcurrentModificationException} on a best-effort basis. |
116 |
* Therefore, it would be wrong to write a program that depended on this |
117 |
* exception for its correctness: <i>the fail-fast behavior of iterators |
118 |
* should be used only to detect bugs.</i> |
119 |
* |
120 |
* <p>This class is a member of the |
121 |
* <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework"> |
122 |
* Java Collections Framework</a>. |
123 |
* |
124 |
* @param <K> the type of keys maintained by this map |
125 |
* @param <V> the type of mapped values |
126 |
* |
127 |
* @author Doug Lea |
128 |
* @author Josh Bloch |
129 |
* @author Arthur van Hoff |
130 |
* @author Neal Gafter |
131 |
* @see Object#hashCode() |
132 |
* @see Collection |
133 |
* @see Map |
134 |
* @see TreeMap |
135 |
* @see Hashtable |
136 |
* @since 1.2 |
137 |
*/ |
138 |
public class HashMap<K,V> extends AbstractMap<K,V> |
139 |
implements Map<K,V>, Cloneable, Serializable { |
140 |
|
141 |
private static final long serialVersionUID = 362498820763181265L; |
142 |
|
143 |
/* |
144 |
* Implementation notes. |
145 |
* |
146 |
* This map usually acts as a binned (bucketed) hash table, but |
147 |
* when bins get too large, they are transformed into bins of |
148 |
* TreeNodes, each structured similarly to those in |
149 |
* java.util.TreeMap. Most methods try to use normal bins, but |
150 |
* relay to TreeNode methods when applicable (simply by checking |
151 |
* instanceof a node). Bins of TreeNodes may be traversed and |
152 |
* used like any others, but additionally support faster lookup |
153 |
* when overpopulated. However, since the vast majority of bins in |
154 |
* normal use are not overpopulated, checking for existence of |
155 |
* tree bins may be delayed in the course of table methods. |
156 |
* |
157 |
* Tree bins (i.e., bins whose elements are all TreeNodes) are |
158 |
* ordered primarily by hashCode, but in the case of ties, if two |
159 |
* elements are of the same "class C implements Comparable<C>", |
160 |
* type then their compareTo method is used for ordering. (We |
161 |
* conservatively check generic types via reflection to validate |
162 |
* this -- see method comparableClassFor). The added complexity |
163 |
* of tree bins is worthwhile in providing worst-case O(log n) |
164 |
* operations when keys either have distinct hashes or are |
165 |
* orderable, Thus, performance degrades gracefully under |
166 |
* accidental or malicious usages in which hashCode() methods |
167 |
* return values that are poorly distributed, as well as those in |
168 |
* which many keys share a hashCode, so long as they are also |
169 |
* Comparable. (If neither of these apply, we may waste about a |
170 |
* factor of two in time and space compared to taking no |
171 |
* precautions. But the only known cases stem from poor user |
172 |
* programming practices that are already so slow that this makes |
173 |
* little difference.) |
174 |
* |
175 |
* Because TreeNodes are about twice the size of regular nodes, we |
176 |
* use them only when bins contain enough nodes to warrant use |
177 |
* (see TREEIFY_THRESHOLD). And when they become too small (due to |
178 |
* removal or resizing) they are converted back to plain bins. In |
179 |
* usages with well-distributed user hashCodes, tree bins are |
180 |
* rarely used. Ideally, under random hashCodes, the frequency of |
181 |
* nodes in bins follows a Poisson distribution |
182 |
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a |
183 |
* parameter of about 0.5 on average for the default resizing |
184 |
* threshold of 0.75, although with a large variance because of |
185 |
* resizing granularity. Ignoring variance, the expected |
186 |
* occurrences of list size k are (exp(-0.5) * pow(0.5, k) / |
187 |
* factorial(k)). The first values are: |
188 |
* |
189 |
* 0: 0.60653066 |
190 |
* 1: 0.30326533 |
191 |
* 2: 0.07581633 |
192 |
* 3: 0.01263606 |
193 |
* 4: 0.00157952 |
194 |
* 5: 0.00015795 |
195 |
* 6: 0.00001316 |
196 |
* 7: 0.00000094 |
197 |
* 8: 0.00000006 |
198 |
* more: less than 1 in ten million |
199 |
* |
200 |
* The root of a tree bin is normally its first node. However, |
201 |
* sometimes (currently only upon Iterator.remove), the root might |
202 |
* be elsewhere, but can be recovered following parent links |
203 |
* (method TreeNode.root()). |
204 |
* |
205 |
* All applicable internal methods accept a hash code as an |
206 |
* argument (as normally supplied from a public method), allowing |
207 |
* them to call each other without recomputing user hashCodes. |
208 |
* Most internal methods also accept a "tab" argument, that is |
209 |
* normally the current table, but may be a new or old one when |
210 |
* resizing or converting. |
211 |
* |
212 |
* When bin lists are treeified, split, or untreeified, we keep |
213 |
* them in the same relative access/traversal order (i.e., field |
214 |
* Node.next) to better preserve locality, and to slightly |
215 |
* simplify handling of splits and traversals that invoke |
216 |
* iterator.remove. When using comparators on insertion, to keep a |
217 |
* total ordering (or as close as is required here) across |
218 |
* rebalancings, we compare classes and identityHashCodes as |
219 |
* tie-breakers. |
220 |
* |
221 |
* The use and transitions among plain vs tree modes is |
222 |
* complicated by the existence of subclass LinkedHashMap. See |
223 |
* below for hook methods defined to be invoked upon insertion, |
224 |
* removal and access that allow LinkedHashMap internals to |
225 |
* otherwise remain independent of these mechanics. (This also |
226 |
* requires that a map instance be passed to some utility methods |
227 |
* that may create new nodes.) |
228 |
* |
229 |
* The concurrent-programming-like SSA-based coding style helps |
230 |
* avoid aliasing errors amid all of the twisty pointer operations. |
231 |
*/ |
232 |
|
233 |
/** |
234 |
* The default initial capacity - MUST be a power of two. |
235 |
*/ |
236 |
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 |
237 |
|
238 |
/** |
239 |
* The maximum capacity, used if a higher value is implicitly specified |
240 |
* by either of the constructors with arguments. |
241 |
* MUST be a power of two <= 1<<30. |
242 |
*/ |
243 |
static final int MAXIMUM_CAPACITY = 1 << 30; |
244 |
|
245 |
/** |
246 |
* The load factor used when none specified in constructor. |
247 |
*/ |
248 |
static final float DEFAULT_LOAD_FACTOR = 0.75f; |
249 |
|
250 |
/** |
251 |
* The bin count threshold for using a tree rather than list for a |
252 |
* bin. Bins are converted to trees when adding an element to a |
253 |
* bin with at least this many nodes. The value must be greater |
254 |
* than 2 and should be at least 8 to mesh with assumptions in |
255 |
* tree removal about conversion back to plain bins upon |
256 |
* shrinkage. |
257 |
*/ |
258 |
static final int TREEIFY_THRESHOLD = 8; |
259 |
|
260 |
/** |
261 |
* The bin count threshold for untreeifying a (split) bin during a |
262 |
* resize operation. Should be less than TREEIFY_THRESHOLD, and at |
263 |
* most 6 to mesh with shrinkage detection under removal. |
264 |
*/ |
265 |
static final int UNTREEIFY_THRESHOLD = 6; |
266 |
|
267 |
/** |
268 |
* The smallest table capacity for which bins may be treeified. |
269 |
* (Otherwise the table is resized if too many nodes in a bin.) |
270 |
* Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts |
271 |
* between resizing and treeification thresholds. |
272 |
*/ |
273 |
static final int MIN_TREEIFY_CAPACITY = 64; |
274 |
|
275 |
/** |
276 |
* Basic hash bin node, used for most entries. (See below for |
277 |
* TreeNode subclass, and in LinkedHashMap for its Entry subclass.) |
278 |
*/ |
279 |
static class Node<K,V> implements Map.Entry<K,V> { |
280 |
final int hash; |
281 |
final K key; |
282 |
V value; |
283 |
Node<K,V> next; |
284 |
|
285 |
Node(int hash, K key, V value, Node<K,V> next) { |
286 |
this.hash = hash; |
287 |
this.key = key; |
288 |
this.value = value; |
289 |
this.next = next; |
290 |
} |
291 |
|
292 |
public final K getKey() { return key; } |
293 |
public final V getValue() { return value; } |
294 |
public final String toString() { return key + "=" + value; } |
295 |
|
296 |
public final int hashCode() { |
297 |
return Objects.hashCode(key) ^ Objects.hashCode(value); |
298 |
} |
299 |
|
300 |
public final V setValue(V newValue) { |
301 |
V oldValue = value; |
302 |
value = newValue; |
303 |
return oldValue; |
304 |
} |
305 |
|
306 |
public final boolean equals(Object o) { |
307 |
if (o == this) |
308 |
return true; |
309 |
if (o instanceof Map.Entry) { |
310 |
Map.Entry<?,?> e = (Map.Entry<?,?>)o; |
311 |
if (Objects.equals(key, e.getKey()) && |
312 |
Objects.equals(value, e.getValue())) |
313 |
return true; |
314 |
} |
315 |
return false; |
316 |
} |
317 |
} |
318 |
|
319 |
/* ---------------- Static utilities -------------- */ |
320 |
|
321 |
/** |
322 |
* Computes key.hashCode() and spreads (XORs) higher bits of hash |
323 |
* to lower. Because the table uses power-of-two masking, sets of |
324 |
* hashes that vary only in bits above the current mask will |
325 |
* always collide. (Among known examples are sets of Float keys |
326 |
* holding consecutive whole numbers in small tables.) So we |
327 |
* apply a transform that spreads the impact of higher bits |
328 |
* downward. There is a tradeoff between speed, utility, and |
329 |
* quality of bit-spreading. Because many common sets of hashes |
330 |
* are already reasonably distributed (so don't benefit from |
331 |
* spreading), and because we use trees to handle large sets of |
332 |
* collisions in bins, we just XOR some shifted bits in the |
333 |
* cheapest possible way to reduce systematic lossage, as well as |
334 |
* to incorporate impact of the highest bits that would otherwise |
335 |
* never be used in index calculations because of table bounds. |
336 |
*/ |
337 |
static final int hash(Object key) { |
338 |
int h; |
339 |
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); |
340 |
} |
341 |
|
342 |
/** |
343 |
* Returns x's Class if it is of the form "class C implements |
344 |
* Comparable<C>", else null. |
345 |
*/ |
346 |
static Class<?> comparableClassFor(Object x) { |
347 |
if (x instanceof Comparable) { |
348 |
Class<?> c; Type[] ts, as; ParameterizedType p; |
349 |
if ((c = x.getClass()) == String.class) // bypass checks |
350 |
return c; |
351 |
if ((ts = c.getGenericInterfaces()) != null) { |
352 |
for (Type t : ts) { |
353 |
if ((t instanceof ParameterizedType) && |
354 |
((p = (ParameterizedType) t).getRawType() == |
355 |
Comparable.class) && |
356 |
(as = p.getActualTypeArguments()) != null && |
357 |
as.length == 1 && as[0] == c) // type arg is c |
358 |
return c; |
359 |
} |
360 |
} |
361 |
} |
362 |
return null; |
363 |
} |
364 |
|
365 |
/** |
366 |
* Returns k.compareTo(x) if x matches kc (k's screened comparable |
367 |
* class), else 0. |
368 |
*/ |
369 |
@SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable |
370 |
static int compareComparables(Class<?> kc, Object k, Object x) { |
371 |
return (x == null || x.getClass() != kc ? 0 : |
372 |
((Comparable)k).compareTo(x)); |
373 |
} |
374 |
|
375 |
/** |
376 |
* Returns a power of two size for the given target capacity. |
377 |
*/ |
378 |
static final int tableSizeFor(int cap) { |
379 |
int n = -1 >>> Integer.numberOfLeadingZeros(cap - 1); |
380 |
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; |
381 |
} |
382 |
|
383 |
/* ---------------- Fields -------------- */ |
384 |
|
385 |
/** |
386 |
* The table, initialized on first use, and resized as |
387 |
* necessary. When allocated, length is always a power of two. |
388 |
* (We also tolerate length zero in some operations to allow |
389 |
* bootstrapping mechanics that are currently not needed.) |
390 |
*/ |
391 |
transient Node<K,V>[] table; |
392 |
|
393 |
/** |
394 |
* Holds cached entrySet(). Note that AbstractMap fields are used |
395 |
* for keySet() and values(). |
396 |
*/ |
397 |
transient Set<Map.Entry<K,V>> entrySet; |
398 |
|
399 |
/** |
400 |
* The number of key-value mappings contained in this map. |
401 |
*/ |
402 |
transient int size; |
403 |
|
404 |
/** |
405 |
* The number of times this HashMap has been structurally modified |
406 |
* Structural modifications are those that change the number of mappings in |
407 |
* the HashMap or otherwise modify its internal structure (e.g., |
408 |
* rehash). This field is used to make iterators on Collection-views of |
409 |
* the HashMap fail-fast. (See ConcurrentModificationException). |
410 |
*/ |
411 |
transient int modCount; |
412 |
|
413 |
/** |
414 |
* The next size value at which to resize (capacity * load factor). |
415 |
* |
416 |
* @serial |
417 |
*/ |
418 |
// (The javadoc description is true upon serialization. |
419 |
// Additionally, if the table array has not been allocated, this |
420 |
// field holds the initial array capacity, or zero signifying |
421 |
// DEFAULT_INITIAL_CAPACITY.) |
422 |
int threshold; |
423 |
|
424 |
/** |
425 |
* The load factor for the hash table. |
426 |
* |
427 |
* @serial |
428 |
*/ |
429 |
final float loadFactor; |
430 |
|
431 |
/* ---------------- Public operations -------------- */ |
432 |
|
433 |
/** |
434 |
* Constructs an empty {@code HashMap} with the specified initial |
435 |
* capacity and load factor. |
436 |
* |
437 |
* @param initialCapacity the initial capacity |
438 |
* @param loadFactor the load factor |
439 |
* @throws IllegalArgumentException if the initial capacity is negative |
440 |
* or the load factor is nonpositive |
441 |
*/ |
442 |
public HashMap(int initialCapacity, float loadFactor) { |
443 |
if (initialCapacity < 0) |
444 |
throw new IllegalArgumentException("Illegal initial capacity: " + |
445 |
initialCapacity); |
446 |
if (initialCapacity > MAXIMUM_CAPACITY) |
447 |
initialCapacity = MAXIMUM_CAPACITY; |
448 |
if (loadFactor <= 0 || Float.isNaN(loadFactor)) |
449 |
throw new IllegalArgumentException("Illegal load factor: " + |
450 |
loadFactor); |
451 |
this.loadFactor = loadFactor; |
452 |
this.threshold = tableSizeFor(initialCapacity); |
453 |
} |
454 |
|
455 |
/** |
456 |
* Constructs an empty {@code HashMap} with the specified initial |
457 |
* capacity and the default load factor (0.75). |
458 |
* |
459 |
* @param initialCapacity the initial capacity. |
460 |
* @throws IllegalArgumentException if the initial capacity is negative. |
461 |
*/ |
462 |
public HashMap(int initialCapacity) { |
463 |
this(initialCapacity, DEFAULT_LOAD_FACTOR); |
464 |
} |
465 |
|
466 |
/** |
467 |
* Constructs an empty {@code HashMap} with the default initial capacity |
468 |
* (16) and the default load factor (0.75). |
469 |
*/ |
470 |
public HashMap() { |
471 |
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted |
472 |
} |
473 |
|
474 |
/** |
475 |
* Constructs a new {@code HashMap} with the same mappings as the |
476 |
* specified {@code Map}. The {@code HashMap} is created with |
477 |
* default load factor (0.75) and an initial capacity sufficient to |
478 |
* hold the mappings in the specified {@code Map}. |
479 |
* |
480 |
* @param m the map whose mappings are to be placed in this map |
481 |
* @throws NullPointerException if the specified map is null |
482 |
*/ |
483 |
public HashMap(Map<? extends K, ? extends V> m) { |
484 |
this.loadFactor = DEFAULT_LOAD_FACTOR; |
485 |
putMapEntries(m, false); |
486 |
} |
487 |
|
488 |
/** |
489 |
* Implements Map.putAll and Map constructor. |
490 |
* |
491 |
* @param m the map |
492 |
* @param evict false when initially constructing this map, else |
493 |
* true (relayed to method afterNodeInsertion). |
494 |
*/ |
495 |
final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { |
496 |
int s = m.size(); |
497 |
if (s > 0) { |
498 |
if (table == null) { // pre-size |
499 |
float ft = ((float)s / loadFactor) + 1.0F; |
500 |
int t = ((ft < (float)MAXIMUM_CAPACITY) ? |
501 |
(int)ft : MAXIMUM_CAPACITY); |
502 |
if (t > threshold) |
503 |
threshold = tableSizeFor(t); |
504 |
} else { |
505 |
// Because of linked-list bucket constraints, we cannot |
506 |
// expand all at once, but can reduce total resize |
507 |
// effort by repeated doubling now vs later |
508 |
while (s > threshold && table.length < MAXIMUM_CAPACITY) |
509 |
resize(); |
510 |
} |
511 |
|
512 |
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { |
513 |
K key = e.getKey(); |
514 |
V value = e.getValue(); |
515 |
putVal(hash(key), key, value, false, evict); |
516 |
} |
517 |
} |
518 |
} |
519 |
|
520 |
/** |
521 |
* Returns the number of key-value mappings in this map. |
522 |
* |
523 |
* @return the number of key-value mappings in this map |
524 |
*/ |
525 |
public int size() { |
526 |
return size; |
527 |
} |
528 |
|
529 |
/** |
530 |
* Returns {@code true} if this map contains no key-value mappings. |
531 |
* |
532 |
* @return {@code true} if this map contains no key-value mappings |
533 |
*/ |
534 |
public boolean isEmpty() { |
535 |
return size == 0; |
536 |
} |
537 |
|
538 |
/** |
539 |
* Returns the value to which the specified key is mapped, |
540 |
* or {@code null} if this map contains no mapping for the key. |
541 |
* |
542 |
* <p>More formally, if this map contains a mapping from a key |
543 |
* {@code k} to a value {@code v} such that {@code (key==null ? k==null : |
544 |
* key.equals(k))}, then this method returns {@code v}; otherwise |
545 |
* it returns {@code null}. (There can be at most one such mapping.) |
546 |
* |
547 |
* <p>A return value of {@code null} does not <i>necessarily</i> |
548 |
* indicate that the map contains no mapping for the key; it's also |
549 |
* possible that the map explicitly maps the key to {@code null}. |
550 |
* The {@link #containsKey containsKey} operation may be used to |
551 |
* distinguish these two cases. |
552 |
* |
553 |
* @see #put(Object, Object) |
554 |
*/ |
555 |
public V get(Object key) { |
556 |
Node<K,V> e; |
557 |
return (e = getNode(hash(key), key)) == null ? null : e.value; |
558 |
} |
559 |
|
560 |
/** |
561 |
* Implements Map.get and related methods. |
562 |
* |
563 |
* @param hash hash for key |
564 |
* @param key the key |
565 |
* @return the node, or null if none |
566 |
*/ |
567 |
final Node<K,V> getNode(int hash, Object key) { |
568 |
Node<K,V>[] tab; Node<K,V> first, e; int n; K k; |
569 |
if ((tab = table) != null && (n = tab.length) > 0 && |
570 |
(first = tab[(n - 1) & hash]) != null) { |
571 |
if (first.hash == hash && // always check first node |
572 |
((k = first.key) == key || (key != null && key.equals(k)))) |
573 |
return first; |
574 |
if ((e = first.next) != null) { |
575 |
if (first instanceof TreeNode) |
576 |
return ((TreeNode<K,V>)first).getTreeNode(hash, key); |
577 |
do { |
578 |
if (e.hash == hash && |
579 |
((k = e.key) == key || (key != null && key.equals(k)))) |
580 |
return e; |
581 |
} while ((e = e.next) != null); |
582 |
} |
583 |
} |
584 |
return null; |
585 |
} |
586 |
|
587 |
/** |
588 |
* Returns {@code true} if this map contains a mapping for the |
589 |
* specified key. |
590 |
* |
591 |
* @param key The key whose presence in this map is to be tested |
592 |
* @return {@code true} if this map contains a mapping for the specified |
593 |
* key. |
594 |
*/ |
595 |
public boolean containsKey(Object key) { |
596 |
return getNode(hash(key), key) != null; |
597 |
} |
598 |
|
599 |
/** |
600 |
* Associates the specified value with the specified key in this map. |
601 |
* If the map previously contained a mapping for the key, the old |
602 |
* value is replaced. |
603 |
* |
604 |
* @param key key with which the specified value is to be associated |
605 |
* @param value value to be associated with the specified key |
606 |
* @return the previous value associated with {@code key}, or |
607 |
* {@code null} if there was no mapping for {@code key}. |
608 |
* (A {@code null} return can also indicate that the map |
609 |
* previously associated {@code null} with {@code key}.) |
610 |
*/ |
611 |
public V put(K key, V value) { |
612 |
return putVal(hash(key), key, value, false, true); |
613 |
} |
614 |
|
615 |
/** |
616 |
* Implements Map.put and related methods. |
617 |
* |
618 |
* @param hash hash for key |
619 |
* @param key the key |
620 |
* @param value the value to put |
621 |
* @param onlyIfAbsent if true, don't change existing value |
622 |
* @param evict if false, the table is in creation mode. |
623 |
* @return previous value, or null if none |
624 |
*/ |
625 |
final V putVal(int hash, K key, V value, boolean onlyIfAbsent, |
626 |
boolean evict) { |
627 |
Node<K,V>[] tab; Node<K,V> p; int n, i; |
628 |
if ((tab = table) == null || (n = tab.length) == 0) |
629 |
n = (tab = resize()).length; |
630 |
if ((p = tab[i = (n - 1) & hash]) == null) |
631 |
tab[i] = newNode(hash, key, value, null); |
632 |
else { |
633 |
Node<K,V> e; K k; |
634 |
if (p.hash == hash && |
635 |
((k = p.key) == key || (key != null && key.equals(k)))) |
636 |
e = p; |
637 |
else if (p instanceof TreeNode) |
638 |
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); |
639 |
else { |
640 |
for (int binCount = 0; ; ++binCount) { |
641 |
if ((e = p.next) == null) { |
642 |
p.next = newNode(hash, key, value, null); |
643 |
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st |
644 |
treeifyBin(tab, hash); |
645 |
break; |
646 |
} |
647 |
if (e.hash == hash && |
648 |
((k = e.key) == key || (key != null && key.equals(k)))) |
649 |
break; |
650 |
p = e; |
651 |
} |
652 |
} |
653 |
if (e != null) { // existing mapping for key |
654 |
V oldValue = e.value; |
655 |
if (!onlyIfAbsent || oldValue == null) |
656 |
e.value = value; |
657 |
afterNodeAccess(e); |
658 |
return oldValue; |
659 |
} |
660 |
} |
661 |
++modCount; |
662 |
if (++size > threshold) |
663 |
resize(); |
664 |
afterNodeInsertion(evict); |
665 |
return null; |
666 |
} |
667 |
|
668 |
/** |
669 |
* Initializes or doubles table size. If null, allocates in |
670 |
* accord with initial capacity target held in field threshold. |
671 |
* Otherwise, because we are using power-of-two expansion, the |
672 |
* elements from each bin must either stay at same index, or move |
673 |
* with a power of two offset in the new table. |
674 |
* |
675 |
* @return the table |
676 |
*/ |
677 |
final Node<K,V>[] resize() { |
678 |
Node<K,V>[] oldTab = table; |
679 |
int oldCap = (oldTab == null) ? 0 : oldTab.length; |
680 |
int oldThr = threshold; |
681 |
int newCap, newThr = 0; |
682 |
if (oldCap > 0) { |
683 |
if (oldCap >= MAXIMUM_CAPACITY) { |
684 |
threshold = Integer.MAX_VALUE; |
685 |
return oldTab; |
686 |
} |
687 |
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && |
688 |
oldCap >= DEFAULT_INITIAL_CAPACITY) |
689 |
newThr = oldThr << 1; // double threshold |
690 |
} |
691 |
else if (oldThr > 0) // initial capacity was placed in threshold |
692 |
newCap = oldThr; |
693 |
else { // zero initial threshold signifies using defaults |
694 |
newCap = DEFAULT_INITIAL_CAPACITY; |
695 |
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); |
696 |
} |
697 |
if (newThr == 0) { |
698 |
float ft = (float)newCap * loadFactor; |
699 |
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? |
700 |
(int)ft : Integer.MAX_VALUE); |
701 |
} |
702 |
threshold = newThr; |
703 |
@SuppressWarnings({"rawtypes","unchecked"}) |
704 |
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; |
705 |
table = newTab; |
706 |
if (oldTab != null) { |
707 |
for (int j = 0; j < oldCap; ++j) { |
708 |
Node<K,V> e; |
709 |
if ((e = oldTab[j]) != null) { |
710 |
oldTab[j] = null; |
711 |
if (e.next == null) |
712 |
newTab[e.hash & (newCap - 1)] = e; |
713 |
else if (e instanceof TreeNode) |
714 |
((TreeNode<K,V>)e).split(this, newTab, j, oldCap); |
715 |
else { // preserve order |
716 |
Node<K,V> loHead = null, loTail = null; |
717 |
Node<K,V> hiHead = null, hiTail = null; |
718 |
Node<K,V> next; |
719 |
do { |
720 |
next = e.next; |
721 |
if ((e.hash & oldCap) == 0) { |
722 |
if (loTail == null) |
723 |
loHead = e; |
724 |
else |
725 |
loTail.next = e; |
726 |
loTail = e; |
727 |
} |
728 |
else { |
729 |
if (hiTail == null) |
730 |
hiHead = e; |
731 |
else |
732 |
hiTail.next = e; |
733 |
hiTail = e; |
734 |
} |
735 |
} while ((e = next) != null); |
736 |
if (loTail != null) { |
737 |
loTail.next = null; |
738 |
newTab[j] = loHead; |
739 |
} |
740 |
if (hiTail != null) { |
741 |
hiTail.next = null; |
742 |
newTab[j + oldCap] = hiHead; |
743 |
} |
744 |
} |
745 |
} |
746 |
} |
747 |
} |
748 |
return newTab; |
749 |
} |
750 |
|
751 |
/** |
752 |
* Replaces all linked nodes in bin at index for given hash unless |
753 |
* table is too small, in which case resizes instead. |
754 |
*/ |
755 |
final void treeifyBin(Node<K,V>[] tab, int hash) { |
756 |
int n, index; Node<K,V> e; |
757 |
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) |
758 |
resize(); |
759 |
else if ((e = tab[index = (n - 1) & hash]) != null) { |
760 |
TreeNode<K,V> hd = null, tl = null; |
761 |
do { |
762 |
TreeNode<K,V> p = replacementTreeNode(e, null); |
763 |
if (tl == null) |
764 |
hd = p; |
765 |
else { |
766 |
p.prev = tl; |
767 |
tl.next = p; |
768 |
} |
769 |
tl = p; |
770 |
} while ((e = e.next) != null); |
771 |
if ((tab[index] = hd) != null) |
772 |
hd.treeify(tab); |
773 |
} |
774 |
} |
775 |
|
776 |
/** |
777 |
* Copies all of the mappings from the specified map to this map. |
778 |
* These mappings will replace any mappings that this map had for |
779 |
* any of the keys currently in the specified map. |
780 |
* |
781 |
* @param m mappings to be stored in this map |
782 |
* @throws NullPointerException if the specified map is null |
783 |
*/ |
784 |
public void putAll(Map<? extends K, ? extends V> m) { |
785 |
putMapEntries(m, true); |
786 |
} |
787 |
|
788 |
/** |
789 |
* Removes the mapping for the specified key from this map if present. |
790 |
* |
791 |
* @param key key whose mapping is to be removed from the map |
792 |
* @return the previous value associated with {@code key}, or |
793 |
* {@code null} if there was no mapping for {@code key}. |
794 |
* (A {@code null} return can also indicate that the map |
795 |
* previously associated {@code null} with {@code key}.) |
796 |
*/ |
797 |
public V remove(Object key) { |
798 |
Node<K,V> e; |
799 |
return (e = removeNode(hash(key), key, null, false, true)) == null ? |
800 |
null : e.value; |
801 |
} |
802 |
|
803 |
/** |
804 |
* Implements Map.remove and related methods. |
805 |
* |
806 |
* @param hash hash for key |
807 |
* @param key the key |
808 |
* @param value the value to match if matchValue, else ignored |
809 |
* @param matchValue if true only remove if value is equal |
810 |
* @param movable if false do not move other nodes while removing |
811 |
* @return the node, or null if none |
812 |
*/ |
813 |
final Node<K,V> removeNode(int hash, Object key, Object value, |
814 |
boolean matchValue, boolean movable) { |
815 |
Node<K,V>[] tab; Node<K,V> p; int n, index; |
816 |
if ((tab = table) != null && (n = tab.length) > 0 && |
817 |
(p = tab[index = (n - 1) & hash]) != null) { |
818 |
Node<K,V> node = null, e; K k; V v; |
819 |
if (p.hash == hash && |
820 |
((k = p.key) == key || (key != null && key.equals(k)))) |
821 |
node = p; |
822 |
else if ((e = p.next) != null) { |
823 |
if (p instanceof TreeNode) |
824 |
node = ((TreeNode<K,V>)p).getTreeNode(hash, key); |
825 |
else { |
826 |
do { |
827 |
if (e.hash == hash && |
828 |
((k = e.key) == key || |
829 |
(key != null && key.equals(k)))) { |
830 |
node = e; |
831 |
break; |
832 |
} |
833 |
p = e; |
834 |
} while ((e = e.next) != null); |
835 |
} |
836 |
} |
837 |
if (node != null && (!matchValue || (v = node.value) == value || |
838 |
(value != null && value.equals(v)))) { |
839 |
if (node instanceof TreeNode) |
840 |
((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); |
841 |
else if (node == p) |
842 |
tab[index] = node.next; |
843 |
else |
844 |
p.next = node.next; |
845 |
++modCount; |
846 |
--size; |
847 |
afterNodeRemoval(node); |
848 |
return node; |
849 |
} |
850 |
} |
851 |
return null; |
852 |
} |
853 |
|
854 |
/** |
855 |
* Removes all of the mappings from this map. |
856 |
* The map will be empty after this call returns. |
857 |
*/ |
858 |
public void clear() { |
859 |
Node<K,V>[] tab; |
860 |
modCount++; |
861 |
if ((tab = table) != null && size > 0) { |
862 |
size = 0; |
863 |
for (int i = 0; i < tab.length; ++i) |
864 |
tab[i] = null; |
865 |
} |
866 |
} |
867 |
|
868 |
/** |
869 |
* Returns {@code true} if this map maps one or more keys to the |
870 |
* specified value. |
871 |
* |
872 |
* @param value value whose presence in this map is to be tested |
873 |
* @return {@code true} if this map maps one or more keys to the |
874 |
* specified value |
875 |
*/ |
876 |
public boolean containsValue(Object value) { |
877 |
Node<K,V>[] tab; V v; |
878 |
if ((tab = table) != null && size > 0) { |
879 |
for (Node<K,V> e : tab) { |
880 |
for (; e != null; e = e.next) { |
881 |
if ((v = e.value) == value || |
882 |
(value != null && value.equals(v))) |
883 |
return true; |
884 |
} |
885 |
} |
886 |
} |
887 |
return false; |
888 |
} |
889 |
|
890 |
/** |
891 |
* Returns a {@link Set} view of the keys contained in this map. |
892 |
* The set is backed by the map, so changes to the map are |
893 |
* reflected in the set, and vice-versa. If the map is modified |
894 |
* while an iteration over the set is in progress (except through |
895 |
* the iterator's own {@code remove} operation), the results of |
896 |
* the iteration are undefined. The set supports element removal, |
897 |
* which removes the corresponding mapping from the map, via the |
898 |
* {@code Iterator.remove}, {@code Set.remove}, |
899 |
* {@code removeAll}, {@code retainAll}, and {@code clear} |
900 |
* operations. It does not support the {@code add} or {@code addAll} |
901 |
* operations. |
902 |
* |
903 |
* @return a set view of the keys contained in this map |
904 |
*/ |
905 |
public Set<K> keySet() { |
906 |
Set<K> ks = keySet; |
907 |
if (ks == null) { |
908 |
ks = new KeySet(); |
909 |
keySet = ks; |
910 |
} |
911 |
return ks; |
912 |
} |
913 |
|
914 |
final class KeySet extends AbstractSet<K> { |
915 |
public final int size() { return size; } |
916 |
public final void clear() { HashMap.this.clear(); } |
917 |
public final Iterator<K> iterator() { return new KeyIterator(); } |
918 |
public final boolean contains(Object o) { return containsKey(o); } |
919 |
public final boolean remove(Object key) { |
920 |
return removeNode(hash(key), key, null, false, true) != null; |
921 |
} |
922 |
public final Spliterator<K> spliterator() { |
923 |
return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); |
924 |
} |
925 |
public final void forEach(Consumer<? super K> action) { |
926 |
Node<K,V>[] tab; |
927 |
if (action == null) |
928 |
throw new NullPointerException(); |
929 |
if (size > 0 && (tab = table) != null) { |
930 |
int mc = modCount; |
931 |
for (Node<K,V> e : tab) { |
932 |
for (; e != null; e = e.next) |
933 |
action.accept(e.key); |
934 |
} |
935 |
if (modCount != mc) |
936 |
throw new ConcurrentModificationException(); |
937 |
} |
938 |
} |
939 |
} |
940 |
|
941 |
/** |
942 |
* Returns a {@link Collection} view of the values contained in this map. |
943 |
* The collection is backed by the map, so changes to the map are |
944 |
* reflected in the collection, and vice-versa. If the map is |
945 |
* modified while an iteration over the collection is in progress |
946 |
* (except through the iterator's own {@code remove} operation), |
947 |
* the results of the iteration are undefined. The collection |
948 |
* supports element removal, which removes the corresponding |
949 |
* mapping from the map, via the {@code Iterator.remove}, |
950 |
* {@code Collection.remove}, {@code removeAll}, |
951 |
* {@code retainAll} and {@code clear} operations. It does not |
952 |
* support the {@code add} or {@code addAll} operations. |
953 |
* |
954 |
* @return a view of the values contained in this map |
955 |
*/ |
956 |
public Collection<V> values() { |
957 |
Collection<V> vs = values; |
958 |
if (vs == null) { |
959 |
vs = new Values(); |
960 |
values = vs; |
961 |
} |
962 |
return vs; |
963 |
} |
964 |
|
965 |
final class Values extends AbstractCollection<V> { |
966 |
public final int size() { return size; } |
967 |
public final void clear() { HashMap.this.clear(); } |
968 |
public final Iterator<V> iterator() { return new ValueIterator(); } |
969 |
public final boolean contains(Object o) { return containsValue(o); } |
970 |
public final Spliterator<V> spliterator() { |
971 |
return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0); |
972 |
} |
973 |
public final void forEach(Consumer<? super V> action) { |
974 |
Node<K,V>[] tab; |
975 |
if (action == null) |
976 |
throw new NullPointerException(); |
977 |
if (size > 0 && (tab = table) != null) { |
978 |
int mc = modCount; |
979 |
for (Node<K,V> e : tab) { |
980 |
for (; e != null; e = e.next) |
981 |
action.accept(e.value); |
982 |
} |
983 |
if (modCount != mc) |
984 |
throw new ConcurrentModificationException(); |
985 |
} |
986 |
} |
987 |
} |
988 |
|
989 |
/** |
990 |
* Returns a {@link Set} view of the mappings contained in this map. |
991 |
* The set is backed by the map, so changes to the map are |
992 |
* reflected in the set, and vice-versa. If the map is modified |
993 |
* while an iteration over the set is in progress (except through |
994 |
* the iterator's own {@code remove} operation, or through the |
995 |
* {@code setValue} operation on a map entry returned by the |
996 |
* iterator) the results of the iteration are undefined. The set |
997 |
* supports element removal, which removes the corresponding |
998 |
* mapping from the map, via the {@code Iterator.remove}, |
999 |
* {@code Set.remove}, {@code removeAll}, {@code retainAll} and |
1000 |
* {@code clear} operations. It does not support the |
1001 |
* {@code add} or {@code addAll} operations. |
1002 |
* |
1003 |
* @return a set view of the mappings contained in this map |
1004 |
*/ |
1005 |
public Set<Map.Entry<K,V>> entrySet() { |
1006 |
Set<Map.Entry<K,V>> es; |
1007 |
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; |
1008 |
} |
1009 |
|
1010 |
final class EntrySet extends AbstractSet<Map.Entry<K,V>> { |
1011 |
public final int size() { return size; } |
1012 |
public final void clear() { HashMap.this.clear(); } |
1013 |
public final Iterator<Map.Entry<K,V>> iterator() { |
1014 |
return new EntryIterator(); |
1015 |
} |
1016 |
public final boolean contains(Object o) { |
1017 |
if (!(o instanceof Map.Entry)) |
1018 |
return false; |
1019 |
Map.Entry<?,?> e = (Map.Entry<?,?>) o; |
1020 |
Object key = e.getKey(); |
1021 |
Node<K,V> candidate = getNode(hash(key), key); |
1022 |
return candidate != null && candidate.equals(e); |
1023 |
} |
1024 |
public final boolean remove(Object o) { |
1025 |
if (o instanceof Map.Entry) { |
1026 |
Map.Entry<?,?> e = (Map.Entry<?,?>) o; |
1027 |
Object key = e.getKey(); |
1028 |
Object value = e.getValue(); |
1029 |
return removeNode(hash(key), key, value, true, true) != null; |
1030 |
} |
1031 |
return false; |
1032 |
} |
1033 |
public final Spliterator<Map.Entry<K,V>> spliterator() { |
1034 |
return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0); |
1035 |
} |
1036 |
public final void forEach(Consumer<? super Map.Entry<K,V>> action) { |
1037 |
Node<K,V>[] tab; |
1038 |
if (action == null) |
1039 |
throw new NullPointerException(); |
1040 |
if (size > 0 && (tab = table) != null) { |
1041 |
int mc = modCount; |
1042 |
for (Node<K,V> e : tab) { |
1043 |
for (; e != null; e = e.next) |
1044 |
action.accept(e); |
1045 |
} |
1046 |
if (modCount != mc) |
1047 |
throw new ConcurrentModificationException(); |
1048 |
} |
1049 |
} |
1050 |
} |
1051 |
|
1052 |
// Overrides of JDK8 Map extension methods |
1053 |
|
1054 |
@Override |
1055 |
public V getOrDefault(Object key, V defaultValue) { |
1056 |
Node<K,V> e; |
1057 |
return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; |
1058 |
} |
1059 |
|
1060 |
@Override |
1061 |
public V putIfAbsent(K key, V value) { |
1062 |
return putVal(hash(key), key, value, true, true); |
1063 |
} |
1064 |
|
1065 |
@Override |
1066 |
public boolean remove(Object key, Object value) { |
1067 |
return removeNode(hash(key), key, value, true, true) != null; |
1068 |
} |
1069 |
|
1070 |
@Override |
1071 |
public boolean replace(K key, V oldValue, V newValue) { |
1072 |
Node<K,V> e; V v; |
1073 |
if ((e = getNode(hash(key), key)) != null && |
1074 |
((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { |
1075 |
e.value = newValue; |
1076 |
afterNodeAccess(e); |
1077 |
return true; |
1078 |
} |
1079 |
return false; |
1080 |
} |
1081 |
|
1082 |
@Override |
1083 |
public V replace(K key, V value) { |
1084 |
Node<K,V> e; |
1085 |
if ((e = getNode(hash(key), key)) != null) { |
1086 |
V oldValue = e.value; |
1087 |
e.value = value; |
1088 |
afterNodeAccess(e); |
1089 |
return oldValue; |
1090 |
} |
1091 |
return null; |
1092 |
} |
1093 |
|
1094 |
/** |
1095 |
* {@inheritDoc} |
1096 |
* |
1097 |
* <p>This method will, on a best-effort basis, throw a |
1098 |
* {@link ConcurrentModificationException} if it is detected that the |
1099 |
* mapping function modifies this map during computation. |
1100 |
* |
1101 |
* @throws ConcurrentModificationException if it is detected that the |
1102 |
* mapping function modified this map |
1103 |
*/ |
1104 |
@Override |
1105 |
public V computeIfAbsent(K key, |
1106 |
Function<? super K, ? extends V> mappingFunction) { |
1107 |
if (mappingFunction == null) |
1108 |
throw new NullPointerException(); |
1109 |
int hash = hash(key); |
1110 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1111 |
int binCount = 0; |
1112 |
TreeNode<K,V> t = null; |
1113 |
Node<K,V> old = null; |
1114 |
if (size > threshold || (tab = table) == null || |
1115 |
(n = tab.length) == 0) |
1116 |
n = (tab = resize()).length; |
1117 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1118 |
if (first instanceof TreeNode) |
1119 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1120 |
else { |
1121 |
Node<K,V> e = first; K k; |
1122 |
do { |
1123 |
if (e.hash == hash && |
1124 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1125 |
old = e; |
1126 |
break; |
1127 |
} |
1128 |
++binCount; |
1129 |
} while ((e = e.next) != null); |
1130 |
} |
1131 |
V oldValue; |
1132 |
if (old != null && (oldValue = old.value) != null) { |
1133 |
afterNodeAccess(old); |
1134 |
return oldValue; |
1135 |
} |
1136 |
} |
1137 |
int mc = modCount; |
1138 |
V v = mappingFunction.apply(key); |
1139 |
if (mc != modCount) { throw new ConcurrentModificationException(); } |
1140 |
if (v == null) { |
1141 |
return null; |
1142 |
} else if (old != null) { |
1143 |
old.value = v; |
1144 |
afterNodeAccess(old); |
1145 |
return v; |
1146 |
} |
1147 |
else if (t != null) |
1148 |
t.putTreeVal(this, tab, hash, key, v); |
1149 |
else { |
1150 |
tab[i] = newNode(hash, key, v, first); |
1151 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1152 |
treeifyBin(tab, hash); |
1153 |
} |
1154 |
modCount = mc + 1; |
1155 |
++size; |
1156 |
afterNodeInsertion(true); |
1157 |
return v; |
1158 |
} |
1159 |
|
1160 |
/** |
1161 |
* {@inheritDoc} |
1162 |
* |
1163 |
* <p>This method will, on a best-effort basis, throw a |
1164 |
* {@link ConcurrentModificationException} if it is detected that the |
1165 |
* remapping function modifies this map during computation. |
1166 |
* |
1167 |
* @throws ConcurrentModificationException if it is detected that the |
1168 |
* remapping function modified this map |
1169 |
*/ |
1170 |
@Override |
1171 |
public V computeIfPresent(K key, |
1172 |
BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
1173 |
if (remappingFunction == null) |
1174 |
throw new NullPointerException(); |
1175 |
Node<K,V> e; V oldValue; |
1176 |
int hash = hash(key); |
1177 |
if ((e = getNode(hash, key)) != null && |
1178 |
(oldValue = e.value) != null) { |
1179 |
int mc = modCount; |
1180 |
V v = remappingFunction.apply(key, oldValue); |
1181 |
if (mc != modCount) { throw new ConcurrentModificationException(); } |
1182 |
if (v != null) { |
1183 |
e.value = v; |
1184 |
afterNodeAccess(e); |
1185 |
return v; |
1186 |
} |
1187 |
else |
1188 |
removeNode(hash, key, null, false, true); |
1189 |
} |
1190 |
return null; |
1191 |
} |
1192 |
|
1193 |
/** |
1194 |
* {@inheritDoc} |
1195 |
* |
1196 |
* <p>This method will, on a best-effort basis, throw a |
1197 |
* {@link ConcurrentModificationException} if it is detected that the |
1198 |
* remapping function modifies this map during computation. |
1199 |
* |
1200 |
* @throws ConcurrentModificationException if it is detected that the |
1201 |
* remapping function modified this map |
1202 |
*/ |
1203 |
@Override |
1204 |
public V compute(K key, |
1205 |
BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
1206 |
if (remappingFunction == null) |
1207 |
throw new NullPointerException(); |
1208 |
int hash = hash(key); |
1209 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1210 |
int binCount = 0; |
1211 |
TreeNode<K,V> t = null; |
1212 |
Node<K,V> old = null; |
1213 |
if (size > threshold || (tab = table) == null || |
1214 |
(n = tab.length) == 0) |
1215 |
n = (tab = resize()).length; |
1216 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1217 |
if (first instanceof TreeNode) |
1218 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1219 |
else { |
1220 |
Node<K,V> e = first; K k; |
1221 |
do { |
1222 |
if (e.hash == hash && |
1223 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1224 |
old = e; |
1225 |
break; |
1226 |
} |
1227 |
++binCount; |
1228 |
} while ((e = e.next) != null); |
1229 |
} |
1230 |
} |
1231 |
V oldValue = (old == null) ? null : old.value; |
1232 |
int mc = modCount; |
1233 |
V v = remappingFunction.apply(key, oldValue); |
1234 |
if (mc != modCount) { throw new ConcurrentModificationException(); } |
1235 |
if (old != null) { |
1236 |
if (v != null) { |
1237 |
old.value = v; |
1238 |
afterNodeAccess(old); |
1239 |
} |
1240 |
else |
1241 |
removeNode(hash, key, null, false, true); |
1242 |
} |
1243 |
else if (v != null) { |
1244 |
if (t != null) |
1245 |
t.putTreeVal(this, tab, hash, key, v); |
1246 |
else { |
1247 |
tab[i] = newNode(hash, key, v, first); |
1248 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1249 |
treeifyBin(tab, hash); |
1250 |
} |
1251 |
modCount = mc + 1; |
1252 |
++size; |
1253 |
afterNodeInsertion(true); |
1254 |
} |
1255 |
return v; |
1256 |
} |
1257 |
|
1258 |
/** |
1259 |
* {@inheritDoc} |
1260 |
* |
1261 |
* <p>This method will, on a best-effort basis, throw a |
1262 |
* {@link ConcurrentModificationException} if it is detected that the |
1263 |
* remapping function modifies this map during computation. |
1264 |
* |
1265 |
* @throws ConcurrentModificationException if it is detected that the |
1266 |
* remapping function modified this map |
1267 |
*/ |
1268 |
@Override |
1269 |
public V merge(K key, V value, |
1270 |
BiFunction<? super V, ? super V, ? extends V> remappingFunction) { |
1271 |
if (value == null || remappingFunction == null) |
1272 |
throw new NullPointerException(); |
1273 |
int hash = hash(key); |
1274 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1275 |
int binCount = 0; |
1276 |
TreeNode<K,V> t = null; |
1277 |
Node<K,V> old = null; |
1278 |
if (size > threshold || (tab = table) == null || |
1279 |
(n = tab.length) == 0) |
1280 |
n = (tab = resize()).length; |
1281 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1282 |
if (first instanceof TreeNode) |
1283 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1284 |
else { |
1285 |
Node<K,V> e = first; K k; |
1286 |
do { |
1287 |
if (e.hash == hash && |
1288 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1289 |
old = e; |
1290 |
break; |
1291 |
} |
1292 |
++binCount; |
1293 |
} while ((e = e.next) != null); |
1294 |
} |
1295 |
} |
1296 |
if (old != null) { |
1297 |
V v; |
1298 |
if (old.value != null) { |
1299 |
int mc = modCount; |
1300 |
v = remappingFunction.apply(old.value, value); |
1301 |
if (mc != modCount) { |
1302 |
throw new ConcurrentModificationException(); |
1303 |
} |
1304 |
} else { |
1305 |
v = value; |
1306 |
} |
1307 |
if (v != null) { |
1308 |
old.value = v; |
1309 |
afterNodeAccess(old); |
1310 |
} |
1311 |
else |
1312 |
removeNode(hash, key, null, false, true); |
1313 |
return v; |
1314 |
} else { |
1315 |
if (t != null) |
1316 |
t.putTreeVal(this, tab, hash, key, value); |
1317 |
else { |
1318 |
tab[i] = newNode(hash, key, value, first); |
1319 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1320 |
treeifyBin(tab, hash); |
1321 |
} |
1322 |
++modCount; |
1323 |
++size; |
1324 |
afterNodeInsertion(true); |
1325 |
return value; |
1326 |
} |
1327 |
} |
1328 |
|
1329 |
@Override |
1330 |
public void forEach(BiConsumer<? super K, ? super V> action) { |
1331 |
Node<K,V>[] tab; |
1332 |
if (action == null) |
1333 |
throw new NullPointerException(); |
1334 |
if (size > 0 && (tab = table) != null) { |
1335 |
int mc = modCount; |
1336 |
for (Node<K,V> e : tab) { |
1337 |
for (; e != null; e = e.next) |
1338 |
action.accept(e.key, e.value); |
1339 |
} |
1340 |
if (modCount != mc) |
1341 |
throw new ConcurrentModificationException(); |
1342 |
} |
1343 |
} |
1344 |
|
1345 |
@Override |
1346 |
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { |
1347 |
Node<K,V>[] tab; |
1348 |
if (function == null) |
1349 |
throw new NullPointerException(); |
1350 |
if (size > 0 && (tab = table) != null) { |
1351 |
int mc = modCount; |
1352 |
for (Node<K,V> e : tab) { |
1353 |
for (; e != null; e = e.next) { |
1354 |
e.value = function.apply(e.key, e.value); |
1355 |
} |
1356 |
} |
1357 |
if (modCount != mc) |
1358 |
throw new ConcurrentModificationException(); |
1359 |
} |
1360 |
} |
1361 |
|
1362 |
/* ------------------------------------------------------------ */ |
1363 |
// Cloning and serialization |
1364 |
|
1365 |
/** |
1366 |
* Returns a shallow copy of this {@code HashMap} instance: the keys and |
1367 |
* values themselves are not cloned. |
1368 |
* |
1369 |
* @return a shallow copy of this map |
1370 |
*/ |
1371 |
@SuppressWarnings("unchecked") |
1372 |
@Override |
1373 |
public Object clone() { |
1374 |
HashMap<K,V> result; |
1375 |
try { |
1376 |
result = (HashMap<K,V>)super.clone(); |
1377 |
} catch (CloneNotSupportedException e) { |
1378 |
// this shouldn't happen, since we are Cloneable |
1379 |
throw new InternalError(e); |
1380 |
} |
1381 |
result.reinitialize(); |
1382 |
result.putMapEntries(this, false); |
1383 |
return result; |
1384 |
} |
1385 |
|
1386 |
// These methods are also used when serializing HashSets |
1387 |
final float loadFactor() { return loadFactor; } |
1388 |
final int capacity() { |
1389 |
return (table != null) ? table.length : |
1390 |
(threshold > 0) ? threshold : |
1391 |
DEFAULT_INITIAL_CAPACITY; |
1392 |
} |
1393 |
|
1394 |
/** |
1395 |
* Saves this map to a stream (that is, serializes it). |
1396 |
* |
1397 |
* @param s the stream |
1398 |
* @throws IOException if an I/O error occurs |
1399 |
* @serialData The <i>capacity</i> of the HashMap (the length of the |
1400 |
* bucket array) is emitted (int), followed by the |
1401 |
* <i>size</i> (an int, the number of key-value |
1402 |
* mappings), followed by the key (Object) and value (Object) |
1403 |
* for each key-value mapping. The key-value mappings are |
1404 |
* emitted in no particular order. |
1405 |
*/ |
1406 |
private void writeObject(java.io.ObjectOutputStream s) |
1407 |
throws IOException { |
1408 |
int buckets = capacity(); |
1409 |
// Write out the threshold, loadfactor, and any hidden stuff |
1410 |
s.defaultWriteObject(); |
1411 |
s.writeInt(buckets); |
1412 |
s.writeInt(size); |
1413 |
internalWriteEntries(s); |
1414 |
} |
1415 |
|
1416 |
/** |
1417 |
* Reconstitutes this map from a stream (that is, deserializes it). |
1418 |
* @param s the stream |
1419 |
* @throws ClassNotFoundException if the class of a serialized object |
1420 |
* could not be found |
1421 |
* @throws IOException if an I/O error occurs |
1422 |
*/ |
1423 |
private void readObject(java.io.ObjectInputStream s) |
1424 |
throws IOException, ClassNotFoundException { |
1425 |
// Read in the threshold (ignored), loadfactor, and any hidden stuff |
1426 |
s.defaultReadObject(); |
1427 |
reinitialize(); |
1428 |
if (loadFactor <= 0 || Float.isNaN(loadFactor)) |
1429 |
throw new InvalidObjectException("Illegal load factor: " + |
1430 |
loadFactor); |
1431 |
s.readInt(); // Read and ignore number of buckets |
1432 |
int mappings = s.readInt(); // Read number of mappings (size) |
1433 |
if (mappings < 0) |
1434 |
throw new InvalidObjectException("Illegal mappings count: " + |
1435 |
mappings); |
1436 |
else if (mappings > 0) { // (if zero, use defaults) |
1437 |
// Size the table using given load factor only if within |
1438 |
// range of 0.25...4.0 |
1439 |
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); |
1440 |
float fc = (float)mappings / lf + 1.0f; |
1441 |
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? |
1442 |
DEFAULT_INITIAL_CAPACITY : |
1443 |
(fc >= MAXIMUM_CAPACITY) ? |
1444 |
MAXIMUM_CAPACITY : |
1445 |
tableSizeFor((int)fc)); |
1446 |
float ft = (float)cap * lf; |
1447 |
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? |
1448 |
(int)ft : Integer.MAX_VALUE); |
1449 |
|
1450 |
// Check Map.Entry[].class since it's the nearest public type to |
1451 |
// what we're actually creating. |
1452 |
jsr166.Platform.checkArray(s, Map.Entry[].class, cap); |
1453 |
@SuppressWarnings({"rawtypes","unchecked"}) |
1454 |
Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; |
1455 |
table = tab; |
1456 |
|
1457 |
// Read the keys and values, and put the mappings in the HashMap |
1458 |
for (int i = 0; i < mappings; i++) { |
1459 |
@SuppressWarnings("unchecked") |
1460 |
K key = (K) s.readObject(); |
1461 |
@SuppressWarnings("unchecked") |
1462 |
V value = (V) s.readObject(); |
1463 |
putVal(hash(key), key, value, false, false); |
1464 |
} |
1465 |
} |
1466 |
} |
1467 |
|
1468 |
/* ------------------------------------------------------------ */ |
1469 |
// iterators |
1470 |
|
1471 |
abstract class HashIterator { |
1472 |
Node<K,V> next; // next entry to return |
1473 |
Node<K,V> current; // current entry |
1474 |
int expectedModCount; // for fast-fail |
1475 |
int index; // current slot |
1476 |
|
1477 |
HashIterator() { |
1478 |
expectedModCount = modCount; |
1479 |
Node<K,V>[] t = table; |
1480 |
current = next = null; |
1481 |
index = 0; |
1482 |
if (t != null && size > 0) { // advance to first entry |
1483 |
do {} while (index < t.length && (next = t[index++]) == null); |
1484 |
} |
1485 |
} |
1486 |
|
1487 |
public final boolean hasNext() { |
1488 |
return next != null; |
1489 |
} |
1490 |
|
1491 |
final Node<K,V> nextNode() { |
1492 |
Node<K,V>[] t; |
1493 |
Node<K,V> e = next; |
1494 |
if (modCount != expectedModCount) |
1495 |
throw new ConcurrentModificationException(); |
1496 |
if (e == null) |
1497 |
throw new NoSuchElementException(); |
1498 |
if ((next = (current = e).next) == null && (t = table) != null) { |
1499 |
do {} while (index < t.length && (next = t[index++]) == null); |
1500 |
} |
1501 |
return e; |
1502 |
} |
1503 |
|
1504 |
public final void remove() { |
1505 |
Node<K,V> p = current; |
1506 |
if (p == null) |
1507 |
throw new IllegalStateException(); |
1508 |
if (modCount != expectedModCount) |
1509 |
throw new ConcurrentModificationException(); |
1510 |
current = null; |
1511 |
removeNode(p.hash, p.key, null, false, false); |
1512 |
expectedModCount = modCount; |
1513 |
} |
1514 |
} |
1515 |
|
1516 |
final class KeyIterator extends HashIterator |
1517 |
implements Iterator<K> { |
1518 |
public final K next() { return nextNode().key; } |
1519 |
} |
1520 |
|
1521 |
final class ValueIterator extends HashIterator |
1522 |
implements Iterator<V> { |
1523 |
public final V next() { return nextNode().value; } |
1524 |
} |
1525 |
|
1526 |
final class EntryIterator extends HashIterator |
1527 |
implements Iterator<Map.Entry<K,V>> { |
1528 |
public final Map.Entry<K,V> next() { return nextNode(); } |
1529 |
} |
1530 |
|
1531 |
/* ------------------------------------------------------------ */ |
1532 |
// spliterators |
1533 |
|
1534 |
static class HashMapSpliterator<K,V> { |
1535 |
final HashMap<K,V> map; |
1536 |
Node<K,V> current; // current node |
1537 |
int index; // current index, modified on advance/split |
1538 |
int fence; // one past last index |
1539 |
int est; // size estimate |
1540 |
int expectedModCount; // for comodification checks |
1541 |
|
1542 |
HashMapSpliterator(HashMap<K,V> m, int origin, |
1543 |
int fence, int est, |
1544 |
int expectedModCount) { |
1545 |
this.map = m; |
1546 |
this.index = origin; |
1547 |
this.fence = fence; |
1548 |
this.est = est; |
1549 |
this.expectedModCount = expectedModCount; |
1550 |
} |
1551 |
|
1552 |
final int getFence() { // initialize fence and size on first use |
1553 |
int hi; |
1554 |
if ((hi = fence) < 0) { |
1555 |
HashMap<K,V> m = map; |
1556 |
est = m.size; |
1557 |
expectedModCount = m.modCount; |
1558 |
Node<K,V>[] tab = m.table; |
1559 |
hi = fence = (tab == null) ? 0 : tab.length; |
1560 |
} |
1561 |
return hi; |
1562 |
} |
1563 |
|
1564 |
public final long estimateSize() { |
1565 |
getFence(); // force init |
1566 |
return (long) est; |
1567 |
} |
1568 |
} |
1569 |
|
1570 |
static final class KeySpliterator<K,V> |
1571 |
extends HashMapSpliterator<K,V> |
1572 |
implements Spliterator<K> { |
1573 |
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1574 |
int expectedModCount) { |
1575 |
super(m, origin, fence, est, expectedModCount); |
1576 |
} |
1577 |
|
1578 |
public KeySpliterator<K,V> trySplit() { |
1579 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1580 |
return (lo >= mid || current != null) ? null : |
1581 |
new KeySpliterator<>(map, lo, index = mid, est >>>= 1, |
1582 |
expectedModCount); |
1583 |
} |
1584 |
|
1585 |
public void forEachRemaining(Consumer<? super K> action) { |
1586 |
int i, hi, mc; |
1587 |
if (action == null) |
1588 |
throw new NullPointerException(); |
1589 |
HashMap<K,V> m = map; |
1590 |
Node<K,V>[] tab = m.table; |
1591 |
if ((hi = fence) < 0) { |
1592 |
mc = expectedModCount = m.modCount; |
1593 |
hi = fence = (tab == null) ? 0 : tab.length; |
1594 |
} |
1595 |
else |
1596 |
mc = expectedModCount; |
1597 |
if (tab != null && tab.length >= hi && |
1598 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1599 |
Node<K,V> p = current; |
1600 |
current = null; |
1601 |
do { |
1602 |
if (p == null) |
1603 |
p = tab[i++]; |
1604 |
else { |
1605 |
action.accept(p.key); |
1606 |
p = p.next; |
1607 |
} |
1608 |
} while (p != null || i < hi); |
1609 |
if (m.modCount != mc) |
1610 |
throw new ConcurrentModificationException(); |
1611 |
} |
1612 |
} |
1613 |
|
1614 |
public boolean tryAdvance(Consumer<? super K> action) { |
1615 |
int hi; |
1616 |
if (action == null) |
1617 |
throw new NullPointerException(); |
1618 |
Node<K,V>[] tab = map.table; |
1619 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1620 |
while (current != null || index < hi) { |
1621 |
if (current == null) |
1622 |
current = tab[index++]; |
1623 |
else { |
1624 |
K k = current.key; |
1625 |
current = current.next; |
1626 |
action.accept(k); |
1627 |
if (map.modCount != expectedModCount) |
1628 |
throw new ConcurrentModificationException(); |
1629 |
return true; |
1630 |
} |
1631 |
} |
1632 |
} |
1633 |
return false; |
1634 |
} |
1635 |
|
1636 |
public int characteristics() { |
1637 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | |
1638 |
Spliterator.DISTINCT; |
1639 |
} |
1640 |
} |
1641 |
|
1642 |
static final class ValueSpliterator<K,V> |
1643 |
extends HashMapSpliterator<K,V> |
1644 |
implements Spliterator<V> { |
1645 |
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1646 |
int expectedModCount) { |
1647 |
super(m, origin, fence, est, expectedModCount); |
1648 |
} |
1649 |
|
1650 |
public ValueSpliterator<K,V> trySplit() { |
1651 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1652 |
return (lo >= mid || current != null) ? null : |
1653 |
new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, |
1654 |
expectedModCount); |
1655 |
} |
1656 |
|
1657 |
public void forEachRemaining(Consumer<? super V> action) { |
1658 |
int i, hi, mc; |
1659 |
if (action == null) |
1660 |
throw new NullPointerException(); |
1661 |
HashMap<K,V> m = map; |
1662 |
Node<K,V>[] tab = m.table; |
1663 |
if ((hi = fence) < 0) { |
1664 |
mc = expectedModCount = m.modCount; |
1665 |
hi = fence = (tab == null) ? 0 : tab.length; |
1666 |
} |
1667 |
else |
1668 |
mc = expectedModCount; |
1669 |
if (tab != null && tab.length >= hi && |
1670 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1671 |
Node<K,V> p = current; |
1672 |
current = null; |
1673 |
do { |
1674 |
if (p == null) |
1675 |
p = tab[i++]; |
1676 |
else { |
1677 |
action.accept(p.value); |
1678 |
p = p.next; |
1679 |
} |
1680 |
} while (p != null || i < hi); |
1681 |
if (m.modCount != mc) |
1682 |
throw new ConcurrentModificationException(); |
1683 |
} |
1684 |
} |
1685 |
|
1686 |
public boolean tryAdvance(Consumer<? super V> action) { |
1687 |
int hi; |
1688 |
if (action == null) |
1689 |
throw new NullPointerException(); |
1690 |
Node<K,V>[] tab = map.table; |
1691 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1692 |
while (current != null || index < hi) { |
1693 |
if (current == null) |
1694 |
current = tab[index++]; |
1695 |
else { |
1696 |
V v = current.value; |
1697 |
current = current.next; |
1698 |
action.accept(v); |
1699 |
if (map.modCount != expectedModCount) |
1700 |
throw new ConcurrentModificationException(); |
1701 |
return true; |
1702 |
} |
1703 |
} |
1704 |
} |
1705 |
return false; |
1706 |
} |
1707 |
|
1708 |
public int characteristics() { |
1709 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); |
1710 |
} |
1711 |
} |
1712 |
|
1713 |
static final class EntrySpliterator<K,V> |
1714 |
extends HashMapSpliterator<K,V> |
1715 |
implements Spliterator<Map.Entry<K,V>> { |
1716 |
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1717 |
int expectedModCount) { |
1718 |
super(m, origin, fence, est, expectedModCount); |
1719 |
} |
1720 |
|
1721 |
public EntrySpliterator<K,V> trySplit() { |
1722 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1723 |
return (lo >= mid || current != null) ? null : |
1724 |
new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, |
1725 |
expectedModCount); |
1726 |
} |
1727 |
|
1728 |
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { |
1729 |
int i, hi, mc; |
1730 |
if (action == null) |
1731 |
throw new NullPointerException(); |
1732 |
HashMap<K,V> m = map; |
1733 |
Node<K,V>[] tab = m.table; |
1734 |
if ((hi = fence) < 0) { |
1735 |
mc = expectedModCount = m.modCount; |
1736 |
hi = fence = (tab == null) ? 0 : tab.length; |
1737 |
} |
1738 |
else |
1739 |
mc = expectedModCount; |
1740 |
if (tab != null && tab.length >= hi && |
1741 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1742 |
Node<K,V> p = current; |
1743 |
current = null; |
1744 |
do { |
1745 |
if (p == null) |
1746 |
p = tab[i++]; |
1747 |
else { |
1748 |
action.accept(p); |
1749 |
p = p.next; |
1750 |
} |
1751 |
} while (p != null || i < hi); |
1752 |
if (m.modCount != mc) |
1753 |
throw new ConcurrentModificationException(); |
1754 |
} |
1755 |
} |
1756 |
|
1757 |
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { |
1758 |
int hi; |
1759 |
if (action == null) |
1760 |
throw new NullPointerException(); |
1761 |
Node<K,V>[] tab = map.table; |
1762 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1763 |
while (current != null || index < hi) { |
1764 |
if (current == null) |
1765 |
current = tab[index++]; |
1766 |
else { |
1767 |
Node<K,V> e = current; |
1768 |
current = current.next; |
1769 |
action.accept(e); |
1770 |
if (map.modCount != expectedModCount) |
1771 |
throw new ConcurrentModificationException(); |
1772 |
return true; |
1773 |
} |
1774 |
} |
1775 |
} |
1776 |
return false; |
1777 |
} |
1778 |
|
1779 |
public int characteristics() { |
1780 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | |
1781 |
Spliterator.DISTINCT; |
1782 |
} |
1783 |
} |
1784 |
|
1785 |
/* ------------------------------------------------------------ */ |
1786 |
// LinkedHashMap support |
1787 |
|
1788 |
|
1789 |
/* |
1790 |
* The following package-protected methods are designed to be |
1791 |
* overridden by LinkedHashMap, but not by any other subclass. |
1792 |
* Nearly all other internal methods are also package-protected |
1793 |
* but are declared final, so can be used by LinkedHashMap, view |
1794 |
* classes, and HashSet. |
1795 |
*/ |
1796 |
|
1797 |
// Create a regular (non-tree) node |
1798 |
Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { |
1799 |
return new Node<>(hash, key, value, next); |
1800 |
} |
1801 |
|
1802 |
// For conversion from TreeNodes to plain nodes |
1803 |
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { |
1804 |
return new Node<>(p.hash, p.key, p.value, next); |
1805 |
} |
1806 |
|
1807 |
// Create a tree bin node |
1808 |
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { |
1809 |
return new TreeNode<>(hash, key, value, next); |
1810 |
} |
1811 |
|
1812 |
// For treeifyBin |
1813 |
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { |
1814 |
return new TreeNode<>(p.hash, p.key, p.value, next); |
1815 |
} |
1816 |
|
1817 |
/** |
1818 |
* Reset to initial default state. Called by clone and readObject. |
1819 |
*/ |
1820 |
void reinitialize() { |
1821 |
table = null; |
1822 |
entrySet = null; |
1823 |
keySet = null; |
1824 |
values = null; |
1825 |
modCount = 0; |
1826 |
threshold = 0; |
1827 |
size = 0; |
1828 |
} |
1829 |
|
1830 |
// Callbacks to allow LinkedHashMap post-actions |
1831 |
void afterNodeAccess(Node<K,V> p) { } |
1832 |
void afterNodeInsertion(boolean evict) { } |
1833 |
void afterNodeRemoval(Node<K,V> p) { } |
1834 |
|
1835 |
// Called only from writeObject, to ensure compatible ordering. |
1836 |
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { |
1837 |
Node<K,V>[] tab; |
1838 |
if (size > 0 && (tab = table) != null) { |
1839 |
for (Node<K,V> e : tab) { |
1840 |
for (; e != null; e = e.next) { |
1841 |
s.writeObject(e.key); |
1842 |
s.writeObject(e.value); |
1843 |
} |
1844 |
} |
1845 |
} |
1846 |
} |
1847 |
|
1848 |
/* ------------------------------------------------------------ */ |
1849 |
// Tree bins |
1850 |
|
1851 |
/** |
1852 |
* Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn |
1853 |
* extends Node) so can be used as extension of either regular or |
1854 |
* linked node. |
1855 |
*/ |
1856 |
static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> { |
1857 |
TreeNode<K,V> parent; // red-black tree links |
1858 |
TreeNode<K,V> left; |
1859 |
TreeNode<K,V> right; |
1860 |
TreeNode<K,V> prev; // needed to unlink next upon deletion |
1861 |
boolean red; |
1862 |
TreeNode(int hash, K key, V val, Node<K,V> next) { |
1863 |
super(hash, key, val, next); |
1864 |
} |
1865 |
|
1866 |
/** |
1867 |
* Returns root of tree containing this node. |
1868 |
*/ |
1869 |
final TreeNode<K,V> root() { |
1870 |
for (TreeNode<K,V> r = this, p;;) { |
1871 |
if ((p = r.parent) == null) |
1872 |
return r; |
1873 |
r = p; |
1874 |
} |
1875 |
} |
1876 |
|
1877 |
/** |
1878 |
* Ensures that the given root is the first node of its bin. |
1879 |
*/ |
1880 |
static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { |
1881 |
int n; |
1882 |
if (root != null && tab != null && (n = tab.length) > 0) { |
1883 |
int index = (n - 1) & root.hash; |
1884 |
TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; |
1885 |
if (root != first) { |
1886 |
Node<K,V> rn; |
1887 |
tab[index] = root; |
1888 |
TreeNode<K,V> rp = root.prev; |
1889 |
if ((rn = root.next) != null) |
1890 |
((TreeNode<K,V>)rn).prev = rp; |
1891 |
if (rp != null) |
1892 |
rp.next = rn; |
1893 |
if (first != null) |
1894 |
first.prev = root; |
1895 |
root.next = first; |
1896 |
root.prev = null; |
1897 |
} |
1898 |
assert checkInvariants(root); |
1899 |
} |
1900 |
} |
1901 |
|
1902 |
/** |
1903 |
* Finds the node starting at root p with the given hash and key. |
1904 |
* The kc argument caches comparableClassFor(key) upon first use |
1905 |
* comparing keys. |
1906 |
*/ |
1907 |
final TreeNode<K,V> find(int h, Object k, Class<?> kc) { |
1908 |
TreeNode<K,V> p = this; |
1909 |
do { |
1910 |
int ph, dir; K pk; |
1911 |
TreeNode<K,V> pl = p.left, pr = p.right, q; |
1912 |
if ((ph = p.hash) > h) |
1913 |
p = pl; |
1914 |
else if (ph < h) |
1915 |
p = pr; |
1916 |
else if ((pk = p.key) == k || (k != null && k.equals(pk))) |
1917 |
return p; |
1918 |
else if (pl == null) |
1919 |
p = pr; |
1920 |
else if (pr == null) |
1921 |
p = pl; |
1922 |
else if ((kc != null || |
1923 |
(kc = comparableClassFor(k)) != null) && |
1924 |
(dir = compareComparables(kc, k, pk)) != 0) |
1925 |
p = (dir < 0) ? pl : pr; |
1926 |
else if ((q = pr.find(h, k, kc)) != null) |
1927 |
return q; |
1928 |
else |
1929 |
p = pl; |
1930 |
} while (p != null); |
1931 |
return null; |
1932 |
} |
1933 |
|
1934 |
/** |
1935 |
* Calls find for root node. |
1936 |
*/ |
1937 |
final TreeNode<K,V> getTreeNode(int h, Object k) { |
1938 |
return ((parent != null) ? root() : this).find(h, k, null); |
1939 |
} |
1940 |
|
1941 |
/** |
1942 |
* Tie-breaking utility for ordering insertions when equal |
1943 |
* hashCodes and non-comparable. We don't require a total |
1944 |
* order, just a consistent insertion rule to maintain |
1945 |
* equivalence across rebalancings. Tie-breaking further than |
1946 |
* necessary simplifies testing a bit. |
1947 |
*/ |
1948 |
static int tieBreakOrder(Object a, Object b) { |
1949 |
int d; |
1950 |
if (a == null || b == null || |
1951 |
(d = a.getClass().getName(). |
1952 |
compareTo(b.getClass().getName())) == 0) |
1953 |
d = (System.identityHashCode(a) <= System.identityHashCode(b) ? |
1954 |
-1 : 1); |
1955 |
return d; |
1956 |
} |
1957 |
|
1958 |
/** |
1959 |
* Forms tree of the nodes linked from this node. |
1960 |
*/ |
1961 |
final void treeify(Node<K,V>[] tab) { |
1962 |
TreeNode<K,V> root = null; |
1963 |
for (TreeNode<K,V> x = this, next; x != null; x = next) { |
1964 |
next = (TreeNode<K,V>)x.next; |
1965 |
x.left = x.right = null; |
1966 |
if (root == null) { |
1967 |
x.parent = null; |
1968 |
x.red = false; |
1969 |
root = x; |
1970 |
} |
1971 |
else { |
1972 |
K k = x.key; |
1973 |
int h = x.hash; |
1974 |
Class<?> kc = null; |
1975 |
for (TreeNode<K,V> p = root;;) { |
1976 |
int dir, ph; |
1977 |
K pk = p.key; |
1978 |
if ((ph = p.hash) > h) |
1979 |
dir = -1; |
1980 |
else if (ph < h) |
1981 |
dir = 1; |
1982 |
else if ((kc == null && |
1983 |
(kc = comparableClassFor(k)) == null) || |
1984 |
(dir = compareComparables(kc, k, pk)) == 0) |
1985 |
dir = tieBreakOrder(k, pk); |
1986 |
|
1987 |
TreeNode<K,V> xp = p; |
1988 |
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
1989 |
x.parent = xp; |
1990 |
if (dir <= 0) |
1991 |
xp.left = x; |
1992 |
else |
1993 |
xp.right = x; |
1994 |
root = balanceInsertion(root, x); |
1995 |
break; |
1996 |
} |
1997 |
} |
1998 |
} |
1999 |
} |
2000 |
moveRootToFront(tab, root); |
2001 |
} |
2002 |
|
2003 |
/** |
2004 |
* Returns a list of non-TreeNodes replacing those linked from |
2005 |
* this node. |
2006 |
*/ |
2007 |
final Node<K,V> untreeify(HashMap<K,V> map) { |
2008 |
Node<K,V> hd = null, tl = null; |
2009 |
for (Node<K,V> q = this; q != null; q = q.next) { |
2010 |
Node<K,V> p = map.replacementNode(q, null); |
2011 |
if (tl == null) |
2012 |
hd = p; |
2013 |
else |
2014 |
tl.next = p; |
2015 |
tl = p; |
2016 |
} |
2017 |
return hd; |
2018 |
} |
2019 |
|
2020 |
/** |
2021 |
* Tree version of putVal. |
2022 |
*/ |
2023 |
final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, |
2024 |
int h, K k, V v) { |
2025 |
Class<?> kc = null; |
2026 |
boolean searched = false; |
2027 |
TreeNode<K,V> root = (parent != null) ? root() : this; |
2028 |
for (TreeNode<K,V> p = root;;) { |
2029 |
int dir, ph; K pk; |
2030 |
if ((ph = p.hash) > h) |
2031 |
dir = -1; |
2032 |
else if (ph < h) |
2033 |
dir = 1; |
2034 |
else if ((pk = p.key) == k || (k != null && k.equals(pk))) |
2035 |
return p; |
2036 |
else if ((kc == null && |
2037 |
(kc = comparableClassFor(k)) == null) || |
2038 |
(dir = compareComparables(kc, k, pk)) == 0) { |
2039 |
if (!searched) { |
2040 |
TreeNode<K,V> q, ch; |
2041 |
searched = true; |
2042 |
if (((ch = p.left) != null && |
2043 |
(q = ch.find(h, k, kc)) != null) || |
2044 |
((ch = p.right) != null && |
2045 |
(q = ch.find(h, k, kc)) != null)) |
2046 |
return q; |
2047 |
} |
2048 |
dir = tieBreakOrder(k, pk); |
2049 |
} |
2050 |
|
2051 |
TreeNode<K,V> xp = p; |
2052 |
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
2053 |
Node<K,V> xpn = xp.next; |
2054 |
TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); |
2055 |
if (dir <= 0) |
2056 |
xp.left = x; |
2057 |
else |
2058 |
xp.right = x; |
2059 |
xp.next = x; |
2060 |
x.parent = x.prev = xp; |
2061 |
if (xpn != null) |
2062 |
((TreeNode<K,V>)xpn).prev = x; |
2063 |
moveRootToFront(tab, balanceInsertion(root, x)); |
2064 |
return null; |
2065 |
} |
2066 |
} |
2067 |
} |
2068 |
|
2069 |
/** |
2070 |
* Removes the given node, that must be present before this call. |
2071 |
* This is messier than typical red-black deletion code because we |
2072 |
* cannot swap the contents of an interior node with a leaf |
2073 |
* successor that is pinned by "next" pointers that are accessible |
2074 |
* independently during traversal. So instead we swap the tree |
2075 |
* linkages. If the current tree appears to have too few nodes, |
2076 |
* the bin is converted back to a plain bin. (The test triggers |
2077 |
* somewhere between 2 and 6 nodes, depending on tree structure). |
2078 |
*/ |
2079 |
final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, |
2080 |
boolean movable) { |
2081 |
int n; |
2082 |
if (tab == null || (n = tab.length) == 0) |
2083 |
return; |
2084 |
int index = (n - 1) & hash; |
2085 |
TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; |
2086 |
TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; |
2087 |
if (pred == null) |
2088 |
tab[index] = first = succ; |
2089 |
else |
2090 |
pred.next = succ; |
2091 |
if (succ != null) |
2092 |
succ.prev = pred; |
2093 |
if (first == null) |
2094 |
return; |
2095 |
if (root.parent != null) |
2096 |
root = root.root(); |
2097 |
if (root == null |
2098 |
|| (movable |
2099 |
&& (root.right == null |
2100 |
|| (rl = root.left) == null |
2101 |
|| rl.left == null))) { |
2102 |
tab[index] = first.untreeify(map); // too small |
2103 |
return; |
2104 |
} |
2105 |
TreeNode<K,V> p = this, pl = left, pr = right, replacement; |
2106 |
if (pl != null && pr != null) { |
2107 |
TreeNode<K,V> s = pr, sl; |
2108 |
while ((sl = s.left) != null) // find successor |
2109 |
s = sl; |
2110 |
boolean c = s.red; s.red = p.red; p.red = c; // swap colors |
2111 |
TreeNode<K,V> sr = s.right; |
2112 |
TreeNode<K,V> pp = p.parent; |
2113 |
if (s == pr) { // p was s's direct parent |
2114 |
p.parent = s; |
2115 |
s.right = p; |
2116 |
} |
2117 |
else { |
2118 |
TreeNode<K,V> sp = s.parent; |
2119 |
if ((p.parent = sp) != null) { |
2120 |
if (s == sp.left) |
2121 |
sp.left = p; |
2122 |
else |
2123 |
sp.right = p; |
2124 |
} |
2125 |
if ((s.right = pr) != null) |
2126 |
pr.parent = s; |
2127 |
} |
2128 |
p.left = null; |
2129 |
if ((p.right = sr) != null) |
2130 |
sr.parent = p; |
2131 |
if ((s.left = pl) != null) |
2132 |
pl.parent = s; |
2133 |
if ((s.parent = pp) == null) |
2134 |
root = s; |
2135 |
else if (p == pp.left) |
2136 |
pp.left = s; |
2137 |
else |
2138 |
pp.right = s; |
2139 |
if (sr != null) |
2140 |
replacement = sr; |
2141 |
else |
2142 |
replacement = p; |
2143 |
} |
2144 |
else if (pl != null) |
2145 |
replacement = pl; |
2146 |
else if (pr != null) |
2147 |
replacement = pr; |
2148 |
else |
2149 |
replacement = p; |
2150 |
if (replacement != p) { |
2151 |
TreeNode<K,V> pp = replacement.parent = p.parent; |
2152 |
if (pp == null) |
2153 |
(root = replacement).red = false; |
2154 |
else if (p == pp.left) |
2155 |
pp.left = replacement; |
2156 |
else |
2157 |
pp.right = replacement; |
2158 |
p.left = p.right = p.parent = null; |
2159 |
} |
2160 |
|
2161 |
TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); |
2162 |
|
2163 |
if (replacement == p) { // detach |
2164 |
TreeNode<K,V> pp = p.parent; |
2165 |
p.parent = null; |
2166 |
if (pp != null) { |
2167 |
if (p == pp.left) |
2168 |
pp.left = null; |
2169 |
else if (p == pp.right) |
2170 |
pp.right = null; |
2171 |
} |
2172 |
} |
2173 |
if (movable) |
2174 |
moveRootToFront(tab, r); |
2175 |
} |
2176 |
|
2177 |
/** |
2178 |
* Splits nodes in a tree bin into lower and upper tree bins, |
2179 |
* or untreeifies if now too small. Called only from resize; |
2180 |
* see above discussion about split bits and indices. |
2181 |
* |
2182 |
* @param map the map |
2183 |
* @param tab the table for recording bin heads |
2184 |
* @param index the index of the table being split |
2185 |
* @param bit the bit of hash to split on |
2186 |
*/ |
2187 |
final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) { |
2188 |
TreeNode<K,V> b = this; |
2189 |
// Relink into lo and hi lists, preserving order |
2190 |
TreeNode<K,V> loHead = null, loTail = null; |
2191 |
TreeNode<K,V> hiHead = null, hiTail = null; |
2192 |
int lc = 0, hc = 0; |
2193 |
for (TreeNode<K,V> e = b, next; e != null; e = next) { |
2194 |
next = (TreeNode<K,V>)e.next; |
2195 |
e.next = null; |
2196 |
if ((e.hash & bit) == 0) { |
2197 |
if ((e.prev = loTail) == null) |
2198 |
loHead = e; |
2199 |
else |
2200 |
loTail.next = e; |
2201 |
loTail = e; |
2202 |
++lc; |
2203 |
} |
2204 |
else { |
2205 |
if ((e.prev = hiTail) == null) |
2206 |
hiHead = e; |
2207 |
else |
2208 |
hiTail.next = e; |
2209 |
hiTail = e; |
2210 |
++hc; |
2211 |
} |
2212 |
} |
2213 |
|
2214 |
if (loHead != null) { |
2215 |
if (lc <= UNTREEIFY_THRESHOLD) |
2216 |
tab[index] = loHead.untreeify(map); |
2217 |
else { |
2218 |
tab[index] = loHead; |
2219 |
if (hiHead != null) // (else is already treeified) |
2220 |
loHead.treeify(tab); |
2221 |
} |
2222 |
} |
2223 |
if (hiHead != null) { |
2224 |
if (hc <= UNTREEIFY_THRESHOLD) |
2225 |
tab[index + bit] = hiHead.untreeify(map); |
2226 |
else { |
2227 |
tab[index + bit] = hiHead; |
2228 |
if (loHead != null) |
2229 |
hiHead.treeify(tab); |
2230 |
} |
2231 |
} |
2232 |
} |
2233 |
|
2234 |
/* ------------------------------------------------------------ */ |
2235 |
// Red-black tree methods, all adapted from CLR |
2236 |
|
2237 |
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, |
2238 |
TreeNode<K,V> p) { |
2239 |
TreeNode<K,V> r, pp, rl; |
2240 |
if (p != null && (r = p.right) != null) { |
2241 |
if ((rl = p.right = r.left) != null) |
2242 |
rl.parent = p; |
2243 |
if ((pp = r.parent = p.parent) == null) |
2244 |
(root = r).red = false; |
2245 |
else if (pp.left == p) |
2246 |
pp.left = r; |
2247 |
else |
2248 |
pp.right = r; |
2249 |
r.left = p; |
2250 |
p.parent = r; |
2251 |
} |
2252 |
return root; |
2253 |
} |
2254 |
|
2255 |
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, |
2256 |
TreeNode<K,V> p) { |
2257 |
TreeNode<K,V> l, pp, lr; |
2258 |
if (p != null && (l = p.left) != null) { |
2259 |
if ((lr = p.left = l.right) != null) |
2260 |
lr.parent = p; |
2261 |
if ((pp = l.parent = p.parent) == null) |
2262 |
(root = l).red = false; |
2263 |
else if (pp.right == p) |
2264 |
pp.right = l; |
2265 |
else |
2266 |
pp.left = l; |
2267 |
l.right = p; |
2268 |
p.parent = l; |
2269 |
} |
2270 |
return root; |
2271 |
} |
2272 |
|
2273 |
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, |
2274 |
TreeNode<K,V> x) { |
2275 |
x.red = true; |
2276 |
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { |
2277 |
if ((xp = x.parent) == null) { |
2278 |
x.red = false; |
2279 |
return x; |
2280 |
} |
2281 |
else if (!xp.red || (xpp = xp.parent) == null) |
2282 |
return root; |
2283 |
if (xp == (xppl = xpp.left)) { |
2284 |
if ((xppr = xpp.right) != null && xppr.red) { |
2285 |
xppr.red = false; |
2286 |
xp.red = false; |
2287 |
xpp.red = true; |
2288 |
x = xpp; |
2289 |
} |
2290 |
else { |
2291 |
if (x == xp.right) { |
2292 |
root = rotateLeft(root, x = xp); |
2293 |
xpp = (xp = x.parent) == null ? null : xp.parent; |
2294 |
} |
2295 |
if (xp != null) { |
2296 |
xp.red = false; |
2297 |
if (xpp != null) { |
2298 |
xpp.red = true; |
2299 |
root = rotateRight(root, xpp); |
2300 |
} |
2301 |
} |
2302 |
} |
2303 |
} |
2304 |
else { |
2305 |
if (xppl != null && xppl.red) { |
2306 |
xppl.red = false; |
2307 |
xp.red = false; |
2308 |
xpp.red = true; |
2309 |
x = xpp; |
2310 |
} |
2311 |
else { |
2312 |
if (x == xp.left) { |
2313 |
root = rotateRight(root, x = xp); |
2314 |
xpp = (xp = x.parent) == null ? null : xp.parent; |
2315 |
} |
2316 |
if (xp != null) { |
2317 |
xp.red = false; |
2318 |
if (xpp != null) { |
2319 |
xpp.red = true; |
2320 |
root = rotateLeft(root, xpp); |
2321 |
} |
2322 |
} |
2323 |
} |
2324 |
} |
2325 |
} |
2326 |
} |
2327 |
|
2328 |
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, |
2329 |
TreeNode<K,V> x) { |
2330 |
for (TreeNode<K,V> xp, xpl, xpr;;) { |
2331 |
if (x == null || x == root) |
2332 |
return root; |
2333 |
else if ((xp = x.parent) == null) { |
2334 |
x.red = false; |
2335 |
return x; |
2336 |
} |
2337 |
else if (x.red) { |
2338 |
x.red = false; |
2339 |
return root; |
2340 |
} |
2341 |
else if ((xpl = xp.left) == x) { |
2342 |
if ((xpr = xp.right) != null && xpr.red) { |
2343 |
xpr.red = false; |
2344 |
xp.red = true; |
2345 |
root = rotateLeft(root, xp); |
2346 |
xpr = (xp = x.parent) == null ? null : xp.right; |
2347 |
} |
2348 |
if (xpr == null) |
2349 |
x = xp; |
2350 |
else { |
2351 |
TreeNode<K,V> sl = xpr.left, sr = xpr.right; |
2352 |
if ((sr == null || !sr.red) && |
2353 |
(sl == null || !sl.red)) { |
2354 |
xpr.red = true; |
2355 |
x = xp; |
2356 |
} |
2357 |
else { |
2358 |
if (sr == null || !sr.red) { |
2359 |
if (sl != null) |
2360 |
sl.red = false; |
2361 |
xpr.red = true; |
2362 |
root = rotateRight(root, xpr); |
2363 |
xpr = (xp = x.parent) == null ? |
2364 |
null : xp.right; |
2365 |
} |
2366 |
if (xpr != null) { |
2367 |
xpr.red = (xp == null) ? false : xp.red; |
2368 |
if ((sr = xpr.right) != null) |
2369 |
sr.red = false; |
2370 |
} |
2371 |
if (xp != null) { |
2372 |
xp.red = false; |
2373 |
root = rotateLeft(root, xp); |
2374 |
} |
2375 |
x = root; |
2376 |
} |
2377 |
} |
2378 |
} |
2379 |
else { // symmetric |
2380 |
if (xpl != null && xpl.red) { |
2381 |
xpl.red = false; |
2382 |
xp.red = true; |
2383 |
root = rotateRight(root, xp); |
2384 |
xpl = (xp = x.parent) == null ? null : xp.left; |
2385 |
} |
2386 |
if (xpl == null) |
2387 |
x = xp; |
2388 |
else { |
2389 |
TreeNode<K,V> sl = xpl.left, sr = xpl.right; |
2390 |
if ((sl == null || !sl.red) && |
2391 |
(sr == null || !sr.red)) { |
2392 |
xpl.red = true; |
2393 |
x = xp; |
2394 |
} |
2395 |
else { |
2396 |
if (sl == null || !sl.red) { |
2397 |
if (sr != null) |
2398 |
sr.red = false; |
2399 |
xpl.red = true; |
2400 |
root = rotateLeft(root, xpl); |
2401 |
xpl = (xp = x.parent) == null ? |
2402 |
null : xp.left; |
2403 |
} |
2404 |
if (xpl != null) { |
2405 |
xpl.red = (xp == null) ? false : xp.red; |
2406 |
if ((sl = xpl.left) != null) |
2407 |
sl.red = false; |
2408 |
} |
2409 |
if (xp != null) { |
2410 |
xp.red = false; |
2411 |
root = rotateRight(root, xp); |
2412 |
} |
2413 |
x = root; |
2414 |
} |
2415 |
} |
2416 |
} |
2417 |
} |
2418 |
} |
2419 |
|
2420 |
/** |
2421 |
* Recursive invariant check |
2422 |
*/ |
2423 |
static <K,V> boolean checkInvariants(TreeNode<K,V> t) { |
2424 |
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, |
2425 |
tb = t.prev, tn = (TreeNode<K,V>)t.next; |
2426 |
if (tb != null && tb.next != t) |
2427 |
return false; |
2428 |
if (tn != null && tn.prev != t) |
2429 |
return false; |
2430 |
if (tp != null && t != tp.left && t != tp.right) |
2431 |
return false; |
2432 |
if (tl != null && (tl.parent != t || tl.hash > t.hash)) |
2433 |
return false; |
2434 |
if (tr != null && (tr.parent != t || tr.hash < t.hash)) |
2435 |
return false; |
2436 |
if (t.red && tl != null && tl.red && tr != null && tr.red) |
2437 |
return false; |
2438 |
if (tl != null && !checkInvariants(tl)) |
2439 |
return false; |
2440 |
if (tr != null && !checkInvariants(tr)) |
2441 |
return false; |
2442 |
return true; |
2443 |
} |
2444 |
} |
2445 |
|
2446 |
} |