1 |
/* |
2 |
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
5 |
* This code is free software; you can redistribute it and/or modify it |
6 |
* under the terms of the GNU General Public License version 2 only, as |
7 |
* published by the Free Software Foundation. Oracle designates this |
8 |
* particular file as subject to the "Classpath" exception as provided |
9 |
* by Oracle in the LICENSE file that accompanied this code. |
10 |
* |
11 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
12 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 |
* version 2 for more details (a copy is included in the LICENSE file that |
15 |
* accompanied this code). |
16 |
* |
17 |
* You should have received a copy of the GNU General Public License version |
18 |
* 2 along with this work; if not, write to the Free Software Foundation, |
19 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
20 |
* |
21 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
22 |
* or visit www.oracle.com if you need additional information or have any |
23 |
* questions. |
24 |
*/ |
25 |
|
26 |
package java.util; |
27 |
|
28 |
import java.io.IOException; |
29 |
import java.io.InvalidObjectException; |
30 |
import java.io.Serializable; |
31 |
import java.lang.reflect.ParameterizedType; |
32 |
import java.lang.reflect.Type; |
33 |
import java.util.function.BiConsumer; |
34 |
import java.util.function.BiFunction; |
35 |
import java.util.function.Consumer; |
36 |
import java.util.function.Function; |
37 |
|
38 |
/** |
39 |
* Hash table based implementation of the <tt>Map</tt> interface. This |
40 |
* implementation provides all of the optional map operations, and permits |
41 |
* <tt>null</tt> values and the <tt>null</tt> key. (The <tt>HashMap</tt> |
42 |
* class is roughly equivalent to <tt>Hashtable</tt>, except that it is |
43 |
* unsynchronized and permits nulls.) This class makes no guarantees as to |
44 |
* the order of the map; in particular, it does not guarantee that the order |
45 |
* will remain constant over time. |
46 |
* |
47 |
* <p>This implementation provides constant-time performance for the basic |
48 |
* operations (<tt>get</tt> and <tt>put</tt>), assuming the hash function |
49 |
* disperses the elements properly among the buckets. Iteration over |
50 |
* collection views requires time proportional to the "capacity" of the |
51 |
* <tt>HashMap</tt> instance (the number of buckets) plus its size (the number |
52 |
* of key-value mappings). Thus, it's very important not to set the initial |
53 |
* capacity too high (or the load factor too low) if iteration performance is |
54 |
* important. |
55 |
* |
56 |
* <p>An instance of <tt>HashMap</tt> has two parameters that affect its |
57 |
* performance: <i>initial capacity</i> and <i>load factor</i>. The |
58 |
* <i>capacity</i> is the number of buckets in the hash table, and the initial |
59 |
* capacity is simply the capacity at the time the hash table is created. The |
60 |
* <i>load factor</i> is a measure of how full the hash table is allowed to |
61 |
* get before its capacity is automatically increased. When the number of |
62 |
* entries in the hash table exceeds the product of the load factor and the |
63 |
* current capacity, the hash table is <i>rehashed</i> (that is, internal data |
64 |
* structures are rebuilt) so that the hash table has approximately twice the |
65 |
* number of buckets. |
66 |
* |
67 |
* <p>As a general rule, the default load factor (.75) offers a good |
68 |
* tradeoff between time and space costs. Higher values decrease the |
69 |
* space overhead but increase the lookup cost (reflected in most of |
70 |
* the operations of the <tt>HashMap</tt> class, including |
71 |
* <tt>get</tt> and <tt>put</tt>). The expected number of entries in |
72 |
* the map and its load factor should be taken into account when |
73 |
* setting its initial capacity, so as to minimize the number of |
74 |
* rehash operations. If the initial capacity is greater than the |
75 |
* maximum number of entries divided by the load factor, no rehash |
76 |
* operations will ever occur. |
77 |
* |
78 |
* <p>If many mappings are to be stored in a <tt>HashMap</tt> |
79 |
* instance, creating it with a sufficiently large capacity will allow |
80 |
* the mappings to be stored more efficiently than letting it perform |
81 |
* automatic rehashing as needed to grow the table. Note that using |
82 |
* many keys with the same {@code hashCode()} is a sure way to slow |
83 |
* down performance of any hash table. To ameliorate impact, when keys |
84 |
* are {@link Comparable}, this class may use comparison order among |
85 |
* keys to help break ties. |
86 |
* |
87 |
* <p><strong>Note that this implementation is not synchronized.</strong> |
88 |
* If multiple threads access a hash map concurrently, and at least one of |
89 |
* the threads modifies the map structurally, it <i>must</i> be |
90 |
* synchronized externally. (A structural modification is any operation |
91 |
* that adds or deletes one or more mappings; merely changing the value |
92 |
* associated with a key that an instance already contains is not a |
93 |
* structural modification.) This is typically accomplished by |
94 |
* synchronizing on some object that naturally encapsulates the map. |
95 |
* |
96 |
* If no such object exists, the map should be "wrapped" using the |
97 |
* {@link Collections#synchronizedMap Collections.synchronizedMap} |
98 |
* method. This is best done at creation time, to prevent accidental |
99 |
* unsynchronized access to the map:<pre> |
100 |
* Map m = Collections.synchronizedMap(new HashMap(...));</pre> |
101 |
* |
102 |
* <p>The iterators returned by all of this class's "collection view methods" |
103 |
* are <i>fail-fast</i>: if the map is structurally modified at any time after |
104 |
* the iterator is created, in any way except through the iterator's own |
105 |
* <tt>remove</tt> method, the iterator will throw a |
106 |
* {@link ConcurrentModificationException}. Thus, in the face of concurrent |
107 |
* modification, the iterator fails quickly and cleanly, rather than risking |
108 |
* arbitrary, non-deterministic behavior at an undetermined time in the |
109 |
* future. |
110 |
* |
111 |
* <p>Note that the fail-fast behavior of an iterator cannot be guaranteed |
112 |
* as it is, generally speaking, impossible to make any hard guarantees in the |
113 |
* presence of unsynchronized concurrent modification. Fail-fast iterators |
114 |
* throw <tt>ConcurrentModificationException</tt> on a best-effort basis. |
115 |
* Therefore, it would be wrong to write a program that depended on this |
116 |
* exception for its correctness: <i>the fail-fast behavior of iterators |
117 |
* should be used only to detect bugs.</i> |
118 |
* |
119 |
* <p>This class is a member of the |
120 |
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
121 |
* Java Collections Framework</a>. |
122 |
* |
123 |
* @param <K> the type of keys maintained by this map |
124 |
* @param <V> the type of mapped values |
125 |
* |
126 |
* @author Doug Lea |
127 |
* @author Josh Bloch |
128 |
* @author Arthur van Hoff |
129 |
* @author Neal Gafter |
130 |
* @see Object#hashCode() |
131 |
* @see Collection |
132 |
* @see Map |
133 |
* @see TreeMap |
134 |
* @see Hashtable |
135 |
* @since 1.2 |
136 |
*/ |
137 |
public class HashMap<K,V> extends AbstractMap<K,V> |
138 |
implements Map<K,V>, Cloneable, Serializable { |
139 |
|
140 |
private static final long serialVersionUID = 362498820763181265L; |
141 |
|
142 |
/* |
143 |
* Implementation notes. |
144 |
* |
145 |
* This map usually acts as a binned (bucketed) hash table, but |
146 |
* when bins get too large, they are transformed into bins of |
147 |
* TreeNodes, each structured similarly to those in |
148 |
* java.util.TreeMap. Most methods try to use normal bins, but |
149 |
* relay to TreeNode methods when applicable (simply by checking |
150 |
* instanceof a node). Bins of TreeNodes may be traversed and |
151 |
* used like any others, but additionally support faster lookup |
152 |
* when overpopulated. However, since the vast majority of bins in |
153 |
* normal use are not overpopulated, checking for existence of |
154 |
* tree bins may be delayed in the course of table methods. |
155 |
* Statistically, most operations in normal usages access only the |
156 |
* first node (if present) of a bin, so most methods check this |
157 |
* first, avoiding extra overhead and cache misses in the most |
158 |
* common paths. |
159 |
* |
160 |
* Tree bins (i.e., bins whose elements are all TreeNodes) are |
161 |
* ordered primarily by hashCode, but in the case of ties, if two |
162 |
* elements are of the same "class C implements Comparable<C>", |
163 |
* type then their compareTo method is used for ordering. (We |
164 |
* conservatively check generic types via reflection to validate |
165 |
* this -- see method comparableClassFor). The added complexity |
166 |
* of tree bins is worthwhile in providing worst-case O(n) |
167 |
* operations when keys either have distinct hashes or are |
168 |
* orderable, Thus, performance degrades gracefully under |
169 |
* accidental or malicious usages in which hashCode() methods |
170 |
* return values that are poorly distributed, as well as those in |
171 |
* which many keys share a hashCode, so long as they are also |
172 |
* Comparable. |
173 |
* |
174 |
* Because TreeNodes are about twice the size of regular nodes, we |
175 |
* use them only when bins contain enough nodes to warrant use |
176 |
* (see TREEIFY_THRESHOLD). And when they become too small (due to |
177 |
* remove() or resizing) they are converted back to plain bins. |
178 |
* In usages with well-distributed user hashCodes, tree bins are |
179 |
* rarely used. Ideally, under random hashCodes, the frequency of |
180 |
* nodes in bins follows a Poisson distribution |
181 |
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a |
182 |
* parameter of about 0.5 on average for the default resizing |
183 |
* threshold of 0.75, although with a large variance because of |
184 |
* resizing granularity. Ignoring variance, the expected |
185 |
* occurrences of list size k are (exp(-0.5) * pow(0.5, k) / |
186 |
* factorial(k)). The first values are: |
187 |
* |
188 |
* 0: 0.60653066 |
189 |
* 1: 0.30326533 |
190 |
* 2: 0.07581633 |
191 |
* 3: 0.01263606 |
192 |
* 4: 0.00157952 |
193 |
* 5: 0.00015795 |
194 |
* 6: 0.00001316 |
195 |
* 7: 0.00000094 |
196 |
* 8: 0.00000006 |
197 |
* more: less than 1 in ten million |
198 |
* |
199 |
* The root of a tree bin is normally its first node. However, |
200 |
* sometimes (currently only upon Iterator.remove), the root might |
201 |
* be elsewhere, but can be recovered following parent links |
202 |
* (method TreeNode.root()). |
203 |
* |
204 |
* All applicable internal methods accept a hash code as an |
205 |
* argument (as normally supplied from a public methd), allowing |
206 |
* them to call each other without recomputing user hashCodes. |
207 |
* |
208 |
* When bin lists are treeified, split, or untreeified, we keep |
209 |
* them in the same relative access/traversal order (i.e., field |
210 |
* Node.next) to better preserve locality, and to slightly |
211 |
* simplify handling of splits and traversals that invoke |
212 |
* iterator.remove. |
213 |
* |
214 |
* The use and transitions among plain vs tree modes is |
215 |
* complicated by the existence of subclass LinkedHashMap. See |
216 |
* below for hook methods defined to be invoked upon insertion, |
217 |
* removal and access that allow LinkedHashMap internals to |
218 |
* otherwise remain independent of these mechanics. |
219 |
* |
220 |
* Sorry if you don't like the concurrent-programming-like |
221 |
* SSA-based coding style, that helps avoid aliasing errors |
222 |
* amid all of the twisty pointer operations. |
223 |
*/ |
224 |
|
225 |
/** |
226 |
* The default initial capacity - MUST be a power of two. |
227 |
*/ |
228 |
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 |
229 |
|
230 |
/** |
231 |
* The maximum capacity, used if a higher value is implicitly specified |
232 |
* by either of the constructors with arguments. |
233 |
* MUST be a power of two <= 1<<30. |
234 |
*/ |
235 |
static final int MAXIMUM_CAPACITY = 1 << 30; |
236 |
|
237 |
/** |
238 |
* The load factor used when none specified in constructor. |
239 |
*/ |
240 |
static final float DEFAULT_LOAD_FACTOR = 0.75f; |
241 |
|
242 |
/** |
243 |
* The bin count threshold for using a tree rather than list for a |
244 |
* bin. Bins are converted to trees when adding an element to a |
245 |
* bin with at least this many nodes. The value should be at least |
246 |
* 8 to mesh with assumptions in tree removal about conversion |
247 |
* back to plain bins upon shrinkage. |
248 |
*/ |
249 |
static final int TREEIFY_THRESHOLD = 8; |
250 |
|
251 |
/** |
252 |
* The bin count threshold for untreeifying a (split) bin during a |
253 |
* resize operation. Should be less than TREEIFY_THRESHOLD, and at |
254 |
* most 6 to mesh with shrinkage detection under removal. |
255 |
*/ |
256 |
static final int UNTREEIFY_THRESHOLD = 6; |
257 |
|
258 |
/** |
259 |
* The smallest table capacity for which bins may be treeified. |
260 |
* (Otherwise the table is resized if too many nodes in a bin.) |
261 |
* Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts |
262 |
* between resizing and treeification thresholds. |
263 |
*/ |
264 |
static final int MIN_TREEIFY_CAPACITY = 64; |
265 |
|
266 |
/** |
267 |
* Basic hash bin node, used for most entries. (See below for |
268 |
* LinkedNode and TreeNode subclasses.) |
269 |
*/ |
270 |
static class Node<K,V> implements Map.Entry<K,V> { |
271 |
final int hash; |
272 |
final K key; |
273 |
V value; |
274 |
Node<K,V> next; |
275 |
|
276 |
Node(int hash, K key, V value, Node<K,V> next) { |
277 |
this.hash = hash; |
278 |
this.key = key; |
279 |
this.value = value; |
280 |
this.next = next; |
281 |
} |
282 |
|
283 |
public final K getKey() { return key; } |
284 |
public final V getValue() { return value; } |
285 |
public final String toString() { return key + "=" + value; } |
286 |
|
287 |
public final int hashCode() { |
288 |
return Objects.hashCode(key) ^ Objects.hashCode(value); |
289 |
} |
290 |
|
291 |
public final V setValue(V newValue) { |
292 |
V oldValue = value; |
293 |
value = newValue; |
294 |
return oldValue; |
295 |
} |
296 |
|
297 |
public final boolean equals(Object o) { |
298 |
if (o == this) |
299 |
return true; |
300 |
if (o instanceof Map.Entry) { |
301 |
Map.Entry<?,?> e = (Map.Entry<?,?>)o; |
302 |
if (Objects.equals(key, e.getKey()) && |
303 |
Objects.equals(value, e.getValue())) |
304 |
return true; |
305 |
} |
306 |
return false; |
307 |
} |
308 |
} |
309 |
|
310 |
/* ---------------- Static utilities -------------- */ |
311 |
|
312 |
/** |
313 |
* Computes key.hashCode() and spreads (XORs) higher bits of hash |
314 |
* to lower. Because the table uses power-of-two masking, sets of |
315 |
* hashes that vary only in bits above the current mask will |
316 |
* always collide. (Among known examples are sets of Float keys |
317 |
* holding consecutive whole numbers in small tables.) So we |
318 |
* apply a transform that spreads the impact of higher bits |
319 |
* downward. There is a tradeoff between speed, utility, and |
320 |
* quality of bit-spreading. Because many common sets of hashes |
321 |
* are already reasonably distributed (so don't benefit from |
322 |
* spreading), and because we use trees to handle large sets of |
323 |
* collisions in bins, we just XOR some shifted bits in the |
324 |
* cheapest possible way to reduce systematic lossage, as well as |
325 |
* to incorporate impact of the highest bits that would otherwise |
326 |
* never be used in index calculations because of table bounds. |
327 |
*/ |
328 |
static final int hash(Object key) { |
329 |
int h; |
330 |
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); |
331 |
} |
332 |
|
333 |
/** |
334 |
* Returns x's Class if it is of the form "class C implements |
335 |
* Comparable<C>", else null. |
336 |
*/ |
337 |
static Class<?> comparableClassFor(Object x) { |
338 |
if (x instanceof Comparable) { |
339 |
Class<?> c; Type[] ts, as; Type t; ParameterizedType p; |
340 |
if ((c = x.getClass()) == String.class) // bypass checks |
341 |
return c; |
342 |
if ((ts = c.getGenericInterfaces()) != null) { |
343 |
for (int i = 0; i < ts.length; ++i) { |
344 |
if (((t = ts[i]) instanceof ParameterizedType) && |
345 |
((p = (ParameterizedType)t).getRawType() == |
346 |
Comparable.class) && |
347 |
(as = p.getActualTypeArguments()) != null && |
348 |
as.length == 1 && as[0] == c) // type arg is c |
349 |
return c; |
350 |
} |
351 |
} |
352 |
} |
353 |
return null; |
354 |
} |
355 |
|
356 |
/** |
357 |
* Returns k.compareTo(x) if x matches kc (k's screened comparable |
358 |
* class), else 0. |
359 |
*/ |
360 |
@SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable |
361 |
static int compareComparables(Class<?> kc, Object k, Object x) { |
362 |
return (x == null || x.getClass() != kc ? 0 : |
363 |
((Comparable)k).compareTo(x)); |
364 |
} |
365 |
|
366 |
/** |
367 |
* Returns a power of two size for the given target capacity. |
368 |
*/ |
369 |
static final int tableSizeFor(int cap) { |
370 |
int n = cap - 1; |
371 |
n |= n >>> 1; |
372 |
n |= n >>> 2; |
373 |
n |= n >>> 4; |
374 |
n |= n >>> 8; |
375 |
n |= n >>> 16; |
376 |
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; |
377 |
} |
378 |
|
379 |
/* ---------------- Fields -------------- */ |
380 |
|
381 |
/** |
382 |
* The table, initialized on first use, and resized as |
383 |
* necessary. When allocated, length is always a power of two. |
384 |
* (We also tolerate length zero for some read-only operations.) |
385 |
*/ |
386 |
transient Node<K,V>[] table; |
387 |
|
388 |
/** |
389 |
* Holds cached entrySet(). Note that AbstractMap fields are used |
390 |
* for keySet() and values(). |
391 |
*/ |
392 |
transient Set<Map.Entry<K,V>> entrySet; |
393 |
|
394 |
/** |
395 |
* The number of key-value mappings contained in this map. |
396 |
*/ |
397 |
transient int size; |
398 |
|
399 |
/** |
400 |
* The number of times this HashMap has been structurally modified |
401 |
* Structural modifications are those that change the number of mappings in |
402 |
* the HashMap or otherwise modify its internal structure (e.g., |
403 |
* rehash). This field is used to make iterators on Collection-views of |
404 |
* the HashMap fail-fast. (See ConcurrentModificationException). |
405 |
*/ |
406 |
transient int modCount; |
407 |
|
408 |
/** |
409 |
* The next size value at which to resize (capacity * load factor). |
410 |
* |
411 |
* @serial |
412 |
*/ |
413 |
// (The javadoc description is true upon serialization. |
414 |
// Additionally, if the table array has not been allocated, this |
415 |
// field holds the initial array capacity, or zero signifying |
416 |
// DEFAULT_INITIAL_CAPACITY.) |
417 |
int threshold; |
418 |
|
419 |
/** |
420 |
* The load factor for the hash table. |
421 |
* |
422 |
* @serial |
423 |
*/ |
424 |
final float loadFactor; |
425 |
|
426 |
/* ---------------- Public operations -------------- */ |
427 |
|
428 |
/** |
429 |
* Constructs an empty <tt>HashMap</tt> with the specified initial |
430 |
* capacity and load factor. |
431 |
* |
432 |
* @param initialCapacity the initial capacity |
433 |
* @param loadFactor the load factor |
434 |
* @throws IllegalArgumentException if the initial capacity is negative |
435 |
* or the load factor is nonpositive |
436 |
*/ |
437 |
public HashMap(int initialCapacity, float loadFactor) { |
438 |
if (initialCapacity < 0) |
439 |
throw new IllegalArgumentException("Illegal initial capacity: " + |
440 |
initialCapacity); |
441 |
if (initialCapacity > MAXIMUM_CAPACITY) |
442 |
initialCapacity = MAXIMUM_CAPACITY; |
443 |
if (loadFactor <= 0 || Float.isNaN(loadFactor)) |
444 |
throw new IllegalArgumentException("Illegal load factor: " + |
445 |
loadFactor); |
446 |
this.loadFactor = loadFactor; |
447 |
this.threshold = tableSizeFor(initialCapacity); |
448 |
} |
449 |
|
450 |
/** |
451 |
* Constructs an empty <tt>HashMap</tt> with the specified initial |
452 |
* capacity and the default load factor (0.75). |
453 |
* |
454 |
* @param initialCapacity the initial capacity. |
455 |
* @throws IllegalArgumentException if the initial capacity is negative. |
456 |
*/ |
457 |
public HashMap(int initialCapacity) { |
458 |
this(initialCapacity, DEFAULT_LOAD_FACTOR); |
459 |
} |
460 |
|
461 |
/** |
462 |
* Constructs an empty <tt>HashMap</tt> with the default initial capacity |
463 |
* (16) and the default load factor (0.75). |
464 |
*/ |
465 |
public HashMap() { |
466 |
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted |
467 |
} |
468 |
|
469 |
/** |
470 |
* Constructs a new <tt>HashMap</tt> with the same mappings as the |
471 |
* specified <tt>Map</tt>. The <tt>HashMap</tt> is created with |
472 |
* default load factor (0.75) and an initial capacity sufficient to |
473 |
* hold the mappings in the specified <tt>Map</tt>. |
474 |
* |
475 |
* @param m the map whose mappings are to be placed in this map |
476 |
* @throws NullPointerException if the specified map is null |
477 |
*/ |
478 |
public HashMap(Map<? extends K, ? extends V> m) { |
479 |
this.loadFactor = DEFAULT_LOAD_FACTOR; |
480 |
putMapEntries(m, false); |
481 |
} |
482 |
|
483 |
/** |
484 |
* Implements Map.putAll and Map constructor |
485 |
* |
486 |
* @param m the map |
487 |
* @param evict false when initially constructing this map, else |
488 |
* true (relayed to method afterNodeInsertion). |
489 |
*/ |
490 |
final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { |
491 |
int s = m.size(); |
492 |
if (s > 0) { |
493 |
if (table == null) { // pre-size |
494 |
float ft = ((float)s / loadFactor) + 1.0F; |
495 |
int t = ((ft < (float)MAXIMUM_CAPACITY) ? |
496 |
(int)ft : MAXIMUM_CAPACITY); |
497 |
if (t > threshold) |
498 |
threshold = tableSizeFor(t); |
499 |
} |
500 |
else if (s > threshold) |
501 |
resize(); |
502 |
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { |
503 |
K key = e.getKey(); |
504 |
V value = e.getValue(); |
505 |
putVal(hash(key), key, value, false, evict); |
506 |
} |
507 |
} |
508 |
} |
509 |
|
510 |
/** |
511 |
* Returns the number of key-value mappings in this map. |
512 |
* |
513 |
* @return the number of key-value mappings in this map |
514 |
*/ |
515 |
public int size() { |
516 |
return size; |
517 |
} |
518 |
|
519 |
/** |
520 |
* Returns <tt>true</tt> if this map contains no key-value mappings. |
521 |
* |
522 |
* @return <tt>true</tt> if this map contains no key-value mappings |
523 |
*/ |
524 |
public boolean isEmpty() { |
525 |
return size == 0; |
526 |
} |
527 |
|
528 |
/** |
529 |
* Returns the value to which the specified key is mapped, |
530 |
* or {@code null} if this map contains no mapping for the key. |
531 |
* |
532 |
* <p>More formally, if this map contains a mapping from a key |
533 |
* {@code k} to a value {@code v} such that {@code (key==null ? k==null : |
534 |
* key.equals(k))}, then this method returns {@code v}; otherwise |
535 |
* it returns {@code null}. (There can be at most one such mapping.) |
536 |
* |
537 |
* <p>A return value of {@code null} does not <i>necessarily</i> |
538 |
* indicate that the map contains no mapping for the key; it's also |
539 |
* possible that the map explicitly maps the key to {@code null}. |
540 |
* The {@link #containsKey containsKey} operation may be used to |
541 |
* distinguish these two cases. |
542 |
* |
543 |
* @see #put(Object, Object) |
544 |
*/ |
545 |
public V get(Object key) { |
546 |
Node<K,V> e; |
547 |
return (e = getNode(hash(key), key)) == null ? null : e.value; |
548 |
} |
549 |
|
550 |
/** |
551 |
* Implements Map.get and related methods |
552 |
* |
553 |
* @param hash hash for key |
554 |
* @param key the key |
555 |
* @return the node, or null if none |
556 |
*/ |
557 |
final Node<K,V> getNode(int hash, Object key) { |
558 |
Node<K,V>[] tab; Node<K,V> first, e; int n; K k; |
559 |
if ((tab = table) != null && (n = tab.length) > 0 && |
560 |
(first = tab[(n - 1) & hash]) != null) { |
561 |
if (first.hash == hash && // always check first node |
562 |
((k = first.key) == key || (key != null && key.equals(k)))) |
563 |
return first; |
564 |
if ((e = first.next) != null) { |
565 |
if (first instanceof TreeNode) |
566 |
return ((TreeNode<K,V>)first).getTreeNode(hash, key); |
567 |
do { |
568 |
if (e.hash == hash && |
569 |
((k = e.key) == key || (key != null && key.equals(k)))) |
570 |
return e; |
571 |
} while ((e = e.next) != null); |
572 |
} |
573 |
} |
574 |
return null; |
575 |
} |
576 |
|
577 |
/** |
578 |
* Returns <tt>true</tt> if this map contains a mapping for the |
579 |
* specified key. |
580 |
* |
581 |
* @param key The key whose presence in this map is to be tested |
582 |
* @return <tt>true</tt> if this map contains a mapping for the specified |
583 |
* key. |
584 |
*/ |
585 |
public boolean containsKey(Object key) { |
586 |
return getNode(hash(key), key) != null; |
587 |
} |
588 |
|
589 |
/** |
590 |
* Associates the specified value with the specified key in this map. |
591 |
* If the map previously contained a mapping for the key, the old |
592 |
* value is replaced. |
593 |
* |
594 |
* @param key key with which the specified value is to be associated |
595 |
* @param value value to be associated with the specified key |
596 |
* @return the previous value associated with <tt>key</tt>, or |
597 |
* <tt>null</tt> if there was no mapping for <tt>key</tt>. |
598 |
* (A <tt>null</tt> return can also indicate that the map |
599 |
* previously associated <tt>null</tt> with <tt>key</tt>.) |
600 |
*/ |
601 |
public V put(K key, V value) { |
602 |
return putVal(hash(key), key, value, false, true); |
603 |
} |
604 |
|
605 |
/** |
606 |
* Implements Map.put and related methods |
607 |
* |
608 |
* @param hash hash for key |
609 |
* @param key the key |
610 |
* @param value the value to put |
611 |
* @param onlyIfAbsent if true, don't change existing value |
612 |
* @param evict if false, the table is in creation mode. |
613 |
* @return previous value, or null if none |
614 |
*/ |
615 |
final V putVal(int hash, K key, V value, boolean onlyIfAbsent, |
616 |
boolean evict) { |
617 |
Node<K,V>[] tab; Node<K,V> p; int n, i; |
618 |
if (size > threshold || (tab = table) == null || |
619 |
(n = tab.length) == 0) |
620 |
n = (tab = resize()).length; |
621 |
if ((p = tab[i = (n - 1) & hash]) == null) |
622 |
tab[i] = newNode(hash, key, value, null); |
623 |
else { |
624 |
Node<K,V> e; K k; |
625 |
if (p.hash == hash && |
626 |
((k = p.key) == key || (key != null && key.equals(k)))) |
627 |
e = p; |
628 |
else if (p instanceof TreeNode) |
629 |
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); |
630 |
else { |
631 |
for (int binCount = 0; ; ++binCount) { |
632 |
if ((e = p.next) == null) { |
633 |
p.next = newNode(hash, key, value, null); |
634 |
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st |
635 |
treeifyBin(tab, hash); |
636 |
break; |
637 |
} |
638 |
if (e.hash == hash && |
639 |
((k = e.key) == key || (key != null && key.equals(k)))) |
640 |
break; |
641 |
p = e; |
642 |
} |
643 |
} |
644 |
if (e != null) { |
645 |
V oldValue = e.value; |
646 |
if (!onlyIfAbsent || oldValue == null) |
647 |
e.value = value; |
648 |
afterNodeAccess(e); |
649 |
return oldValue; |
650 |
} |
651 |
} |
652 |
++modCount; |
653 |
++size; |
654 |
afterNodeInsertion(evict); |
655 |
return null; |
656 |
} |
657 |
|
658 |
/** |
659 |
* Initializes or doubles table size. If null, allocates in |
660 |
* accord with initial capacity target held in field threshold. |
661 |
* Otherwise, because we are using power-of-two expansion, the |
662 |
* elements from each bin must either stay at same index, or move |
663 |
* with a power of two offset in the new table. |
664 |
* |
665 |
* @return the table |
666 |
*/ |
667 |
final Node<K,V>[] resize() { |
668 |
Node<K,V>[] oldTab = table; |
669 |
int oldCap = (oldTab == null) ? 0 : oldTab.length; |
670 |
int oldThr = threshold; |
671 |
int newCap, newThr = 0; |
672 |
if (oldCap > 0) { |
673 |
if (oldCap >= MAXIMUM_CAPACITY) { |
674 |
threshold = Integer.MAX_VALUE; |
675 |
return oldTab; |
676 |
} |
677 |
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && |
678 |
oldCap >= DEFAULT_INITIAL_CAPACITY) |
679 |
newThr = oldThr << 1; // double threshold |
680 |
} |
681 |
else if (oldThr > 0) |
682 |
newCap = oldThr; |
683 |
else { |
684 |
newCap = DEFAULT_INITIAL_CAPACITY; |
685 |
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); |
686 |
} |
687 |
if (newThr == 0) { |
688 |
float ft = (float)newCap * loadFactor; |
689 |
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? |
690 |
(int)ft : Integer.MAX_VALUE); |
691 |
} |
692 |
threshold = newThr; |
693 |
@SuppressWarnings({"rawtypes","unchecked"}) |
694 |
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; |
695 |
table = newTab; |
696 |
if (oldTab != null) { |
697 |
for (int j = 0; j < oldCap; ++j) { |
698 |
Node<K,V> e; |
699 |
if ((e = oldTab[j]) != null) { |
700 |
oldTab[j] = null; |
701 |
if (e.next == null) |
702 |
newTab[e.hash & (newCap - 1)] = e; |
703 |
else if (e instanceof TreeNode) |
704 |
((TreeNode<K,V>)e).split(this, newTab, j, oldCap); |
705 |
else { // preserve order |
706 |
Node<K,V> loHead = null, loTail = null; |
707 |
Node<K,V> hiHead = null, hiTail = null; |
708 |
Node<K,V> next; |
709 |
do { |
710 |
next = e.next; |
711 |
if ((e.hash & oldCap) == 0) { |
712 |
if (loTail == null) |
713 |
loHead = e; |
714 |
else |
715 |
loTail.next = e; |
716 |
loTail = e; |
717 |
} |
718 |
else { |
719 |
if (hiTail == null) |
720 |
hiHead = e; |
721 |
else |
722 |
hiTail.next = e; |
723 |
hiTail = e; |
724 |
} |
725 |
} while ((e = next) != null); |
726 |
if (loTail != null) { |
727 |
loTail.next = null; |
728 |
newTab[j] = loHead; |
729 |
} |
730 |
if (hiTail != null) { |
731 |
hiTail.next = null; |
732 |
newTab[j + oldCap] = hiHead; |
733 |
} |
734 |
} |
735 |
} |
736 |
} |
737 |
} |
738 |
return newTab; |
739 |
} |
740 |
|
741 |
/** |
742 |
* Replaces all linked nodes in bin at index for given hash unless |
743 |
* table is too small, in which case resizes instead. |
744 |
*/ |
745 |
final void treeifyBin(Node<K,V>[] tab, int hash) { |
746 |
int n, index; Node<K,V> e; |
747 |
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) |
748 |
resize(); |
749 |
else if ((e = tab[index = (n - 1) & hash]) != null) { |
750 |
TreeNode<K,V> hd = null, tl = null; |
751 |
do { |
752 |
TreeNode<K,V> p = replacementTreeNode(e, null); |
753 |
if (tl == null) |
754 |
hd = p; |
755 |
else { |
756 |
p.prev = tl; |
757 |
tl.next = p; |
758 |
} |
759 |
tl = p; |
760 |
} while ((e = e.next) != null); |
761 |
if ((tab[index] = hd) != null) |
762 |
hd.treeify(tab); |
763 |
} |
764 |
} |
765 |
|
766 |
/** |
767 |
* Copies all of the mappings from the specified map to this map. |
768 |
* These mappings will replace any mappings that this map had for |
769 |
* any of the keys currently in the specified map. |
770 |
* |
771 |
* @param m mappings to be stored in this map |
772 |
* @throws NullPointerException if the specified map is null |
773 |
*/ |
774 |
public void putAll(Map<? extends K, ? extends V> m) { |
775 |
putMapEntries(m, true); |
776 |
} |
777 |
|
778 |
/** |
779 |
* Removes the mapping for the specified key from this map if present. |
780 |
* |
781 |
* @param key key whose mapping is to be removed from the map |
782 |
* @return the previous value associated with <tt>key</tt>, or |
783 |
* <tt>null</tt> if there was no mapping for <tt>key</tt>. |
784 |
* (A <tt>null</tt> return can also indicate that the map |
785 |
* previously associated <tt>null</tt> with <tt>key</tt>.) |
786 |
*/ |
787 |
public V remove(Object key) { |
788 |
Node<K,V> e; |
789 |
return (e = removeNode(hash(key), key, null, false, true)) == null ? |
790 |
null : e.value; |
791 |
} |
792 |
|
793 |
/** |
794 |
* Implements Map.remove and related methods |
795 |
* |
796 |
* @param hash hash for key |
797 |
* @param key the key |
798 |
* @param value the value to match if matchValue, else ignored |
799 |
* @param matchValue if true only remove if value is equal |
800 |
* @param movable if false do not move other nodes while removing |
801 |
* @return the node, or null if none |
802 |
*/ |
803 |
final Node<K,V> removeNode(int hash, Object key, Object value, |
804 |
boolean matchValue, boolean movable) { |
805 |
Node<K,V>[] tab; Node<K,V> p; int n, index; |
806 |
if ((tab = table) != null && (n = tab.length) > 0 && |
807 |
(p = tab[index = (n - 1) & hash]) != null) { |
808 |
Node<K,V> node = null, e; K k; V v; |
809 |
if (p.hash == hash && |
810 |
((k = p.key) == key || (key != null && key.equals(k)))) |
811 |
node = p; |
812 |
else if ((e = p.next) != null) { |
813 |
if (p instanceof TreeNode) |
814 |
node = ((TreeNode<K,V>)p).getTreeNode(hash, key); |
815 |
else { |
816 |
do { |
817 |
if (e.hash == hash && |
818 |
((k = e.key) == key || |
819 |
(key != null && key.equals(k)))) { |
820 |
node = e; |
821 |
break; |
822 |
} |
823 |
p = e; |
824 |
} while ((e = e.next) != null); |
825 |
} |
826 |
} |
827 |
if (node != null && (!matchValue || (v = node.value) == value || |
828 |
(value != null && value.equals(v)))) { |
829 |
if (node instanceof TreeNode) |
830 |
((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); |
831 |
else if (node == p) |
832 |
tab[index] = node.next; |
833 |
else |
834 |
p.next = node.next; |
835 |
++modCount; |
836 |
--size; |
837 |
afterNodeRemoval(node); |
838 |
return node; |
839 |
} |
840 |
} |
841 |
return null; |
842 |
} |
843 |
|
844 |
/** |
845 |
* Removes all of the mappings from this map. |
846 |
* The map will be empty after this call returns. |
847 |
*/ |
848 |
public void clear() { |
849 |
Node<K,V>[] tab; |
850 |
modCount++; |
851 |
if ((tab = table) != null && size > 0) { |
852 |
size = 0; |
853 |
for (int i = 0; i < tab.length; ++i) |
854 |
tab[i] = null; |
855 |
} |
856 |
} |
857 |
|
858 |
/** |
859 |
* Returns <tt>true</tt> if this map maps one or more keys to the |
860 |
* specified value. |
861 |
* |
862 |
* @param value value whose presence in this map is to be tested |
863 |
* @return <tt>true</tt> if this map maps one or more keys to the |
864 |
* specified value |
865 |
*/ |
866 |
public boolean containsValue(Object value) { |
867 |
Node<K,V>[] tab; V v; |
868 |
if ((tab = table) != null && size > 0) { |
869 |
for (int i = 0; i < tab.length; ++i) { |
870 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) { |
871 |
if ((v = e.value) == value || |
872 |
(value != null && value.equals(v))) |
873 |
return true; |
874 |
} |
875 |
} |
876 |
} |
877 |
return false; |
878 |
} |
879 |
|
880 |
/** |
881 |
* Returns a {@link Set} view of the keys contained in this map. |
882 |
* The set is backed by the map, so changes to the map are |
883 |
* reflected in the set, and vice-versa. If the map is modified |
884 |
* while an iteration over the set is in progress (except through |
885 |
* the iterator's own <tt>remove</tt> operation), the results of |
886 |
* the iteration are undefined. The set supports element removal, |
887 |
* which removes the corresponding mapping from the map, via the |
888 |
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, |
889 |
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> |
890 |
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt> |
891 |
* operations. |
892 |
* |
893 |
* @return a set view of the keys contained in this map |
894 |
*/ |
895 |
public Set<K> keySet() { |
896 |
Set<K> ks; |
897 |
return (ks = keySet) == null ? (keySet = new KeySet()) : ks; |
898 |
} |
899 |
|
900 |
final class KeySet extends AbstractSet<K> { |
901 |
public final int size() { return size; } |
902 |
public final void clear() { HashMap.this.clear(); } |
903 |
public final Iterator<K> iterator() { return new KeyIterator(); } |
904 |
public final boolean contains(Object o) { return containsKey(o); } |
905 |
public final boolean remove(Object key) { |
906 |
return removeNode(hash(key), key, null, false, true) != null; |
907 |
} |
908 |
public final Spliterator<K> spliterator() { |
909 |
return new KeySpliterator<K,V>(HashMap.this, 0, -1, 0, 0); |
910 |
} |
911 |
public final void forEach(Consumer<? super K> action) { |
912 |
Node<K,V>[] tab; |
913 |
if (action == null) |
914 |
throw new NullPointerException(); |
915 |
if (size > 0 && (tab = table) != null) { |
916 |
int mc = modCount; |
917 |
for (int i = 0; i < tab.length; ++i) { |
918 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) |
919 |
action.accept(e.key); |
920 |
} |
921 |
if (modCount != mc) |
922 |
throw new ConcurrentModificationException(); |
923 |
} |
924 |
} |
925 |
} |
926 |
|
927 |
/** |
928 |
* Returns a {@link Collection} view of the values contained in this map. |
929 |
* The collection is backed by the map, so changes to the map are |
930 |
* reflected in the collection, and vice-versa. If the map is |
931 |
* modified while an iteration over the collection is in progress |
932 |
* (except through the iterator's own <tt>remove</tt> operation), |
933 |
* the results of the iteration are undefined. The collection |
934 |
* supports element removal, which removes the corresponding |
935 |
* mapping from the map, via the <tt>Iterator.remove</tt>, |
936 |
* <tt>Collection.remove</tt>, <tt>removeAll</tt>, |
937 |
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not |
938 |
* support the <tt>add</tt> or <tt>addAll</tt> operations. |
939 |
* |
940 |
* @return a view of the values contained in this map |
941 |
*/ |
942 |
public Collection<V> values() { |
943 |
Collection<V> vs; |
944 |
return (vs = values) == null ? (values = new Values()) : vs; |
945 |
} |
946 |
|
947 |
final class Values extends AbstractCollection<V> { |
948 |
public final int size() { return size; } |
949 |
public final void clear() { HashMap.this.clear(); } |
950 |
public final Iterator<V> iterator() { return new ValueIterator(); } |
951 |
public final boolean contains(Object o) { return containsValue(o); } |
952 |
public final Spliterator<V> spliterator() { |
953 |
return new ValueSpliterator<K,V>(HashMap.this, 0, -1, 0, 0); |
954 |
} |
955 |
public final void forEach(Consumer<? super V> action) { |
956 |
Node<K,V>[] tab; |
957 |
if (action == null) |
958 |
throw new NullPointerException(); |
959 |
if (size > 0 && (tab = table) != null) { |
960 |
int mc = modCount; |
961 |
for (int i = 0; i < tab.length; ++i) { |
962 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) |
963 |
action.accept(e.value); |
964 |
} |
965 |
if (modCount != mc) |
966 |
throw new ConcurrentModificationException(); |
967 |
} |
968 |
} |
969 |
} |
970 |
|
971 |
/** |
972 |
* Returns a {@link Set} view of the mappings contained in this map. |
973 |
* The set is backed by the map, so changes to the map are |
974 |
* reflected in the set, and vice-versa. If the map is modified |
975 |
* while an iteration over the set is in progress (except through |
976 |
* the iterator's own <tt>remove</tt> operation, or through the |
977 |
* <tt>setValue</tt> operation on a map entry returned by the |
978 |
* iterator) the results of the iteration are undefined. The set |
979 |
* supports element removal, which removes the corresponding |
980 |
* mapping from the map, via the <tt>Iterator.remove</tt>, |
981 |
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and |
982 |
* <tt>clear</tt> operations. It does not support the |
983 |
* <tt>add</tt> or <tt>addAll</tt> operations. |
984 |
* |
985 |
* @return a set view of the mappings contained in this map |
986 |
*/ |
987 |
public Set<Map.Entry<K,V>> entrySet() { |
988 |
Set<Map.Entry<K,V>> es; |
989 |
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; |
990 |
} |
991 |
|
992 |
final class EntrySet extends AbstractSet<Map.Entry<K,V>> { |
993 |
public final int size() { return size; } |
994 |
public final void clear() { HashMap.this.clear(); } |
995 |
public final Iterator<Map.Entry<K,V>> iterator() { |
996 |
return new EntryIterator(); |
997 |
} |
998 |
public final boolean contains(Object o) { |
999 |
if (!(o instanceof Map.Entry)) |
1000 |
return false; |
1001 |
Map.Entry<?,?> e = (Map.Entry<?,?>) o; |
1002 |
Object key = e.getKey(); |
1003 |
Node<K,V> candidate = getNode(hash(key), key); |
1004 |
return candidate != null && candidate.equals(e); |
1005 |
} |
1006 |
public final boolean remove(Object o) { |
1007 |
if (o instanceof Map.Entry) { |
1008 |
Map.Entry<?,?> e = (Map.Entry<?,?>) o; |
1009 |
Object key = e.getKey(); |
1010 |
Object value = e.getValue(); |
1011 |
return removeNode(hash(key), key, value, true, true) != null; |
1012 |
} |
1013 |
return false; |
1014 |
} |
1015 |
public final Spliterator<Map.Entry<K,V>> spliterator() { |
1016 |
return new EntrySpliterator<K,V>(HashMap.this, 0, -1, 0, 0); |
1017 |
} |
1018 |
public final void forEach(Consumer<? super Map.Entry<K,V>> action) { |
1019 |
Node<K,V>[] tab; |
1020 |
if (action == null) |
1021 |
throw new NullPointerException(); |
1022 |
if (size > 0 && (tab = table) != null) { |
1023 |
int mc = modCount; |
1024 |
for (int i = 0; i < tab.length; ++i) { |
1025 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) |
1026 |
action.accept(e); |
1027 |
} |
1028 |
if (modCount != mc) |
1029 |
throw new ConcurrentModificationException(); |
1030 |
} |
1031 |
} |
1032 |
} |
1033 |
|
1034 |
// Overrides of JDK8 Map extension methods |
1035 |
|
1036 |
public V getOrDefault(Object key, V defaultValue) { |
1037 |
Node<K,V> e; |
1038 |
return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; |
1039 |
} |
1040 |
|
1041 |
public V putIfAbsent(K key, V value) { |
1042 |
return putVal(hash(key), key, value, true, true); |
1043 |
} |
1044 |
|
1045 |
public boolean remove(Object key, Object value) { |
1046 |
return removeNode(hash(key), key, value, true, true) != null; |
1047 |
} |
1048 |
|
1049 |
public boolean replace(K key, V oldValue, V newValue) { |
1050 |
Node<K,V> e; V v; |
1051 |
if ((e = getNode(hash(key), key)) != null && |
1052 |
((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { |
1053 |
e.value = newValue; |
1054 |
afterNodeAccess(e); |
1055 |
return true; |
1056 |
} |
1057 |
return false; |
1058 |
} |
1059 |
|
1060 |
public V replace(K key, V value) { |
1061 |
Node<K,V> e; |
1062 |
if ((e = getNode(hash(key), key)) != null) { |
1063 |
V oldValue = e.value; |
1064 |
e.value = value; |
1065 |
afterNodeAccess(e); |
1066 |
return oldValue; |
1067 |
} |
1068 |
return null; |
1069 |
} |
1070 |
|
1071 |
public V computeIfAbsent(K key, |
1072 |
Function<? super K, ? extends V> mappingFunction) { |
1073 |
if (mappingFunction == null) |
1074 |
throw new NullPointerException(); |
1075 |
int hash = hash(key); |
1076 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1077 |
int binCount = 0; |
1078 |
TreeNode<K,V> t = null; |
1079 |
Node<K,V> old = null; |
1080 |
if (size > threshold || (tab = table) == null || |
1081 |
(n = tab.length) == 0) |
1082 |
n = (tab = resize()).length; |
1083 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1084 |
if (first instanceof TreeNode) |
1085 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1086 |
else { |
1087 |
Node<K,V> e = first; K k; |
1088 |
do { |
1089 |
if (e.hash == hash && |
1090 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1091 |
old = e; |
1092 |
break; |
1093 |
} |
1094 |
++binCount; |
1095 |
} while ((e = e.next) != null); |
1096 |
} |
1097 |
V oldValue; |
1098 |
if (old != null && (oldValue = old.value) != null) { |
1099 |
afterNodeAccess(old); |
1100 |
return oldValue; |
1101 |
} |
1102 |
} |
1103 |
V v = mappingFunction.apply(key); |
1104 |
if (old != null) { |
1105 |
old.value = v; |
1106 |
afterNodeAccess(old); |
1107 |
return v; |
1108 |
} |
1109 |
else if (v == null) |
1110 |
return null; |
1111 |
else if (t != null) |
1112 |
t.putTreeVal(this, tab, hash, key, v); |
1113 |
else { |
1114 |
tab[i] = newNode(hash, key, v, first); |
1115 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1116 |
treeifyBin(tab, hash); |
1117 |
} |
1118 |
++modCount; |
1119 |
++size; |
1120 |
afterNodeInsertion(true); |
1121 |
return v; |
1122 |
} |
1123 |
|
1124 |
public V computeIfPresent(K key, |
1125 |
BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
1126 |
Node<K,V> e; V oldValue; |
1127 |
int hash = hash(key); |
1128 |
if ((e = getNode(hash, key)) != null && |
1129 |
(oldValue = e.value) != null) { |
1130 |
V v = remappingFunction.apply(key, oldValue); |
1131 |
if (v != null) { |
1132 |
e.value = v; |
1133 |
afterNodeAccess(e); |
1134 |
return v; |
1135 |
} |
1136 |
else |
1137 |
removeNode(hash, key, null, false, true); |
1138 |
} |
1139 |
return null; |
1140 |
} |
1141 |
|
1142 |
public V compute(K key, |
1143 |
BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
1144 |
if (remappingFunction == null) |
1145 |
throw new NullPointerException(); |
1146 |
int hash = hash(key); |
1147 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1148 |
int binCount = 0; |
1149 |
TreeNode<K,V> t = null; |
1150 |
Node<K,V> old = null; |
1151 |
if (size > threshold || (tab = table) == null || |
1152 |
(n = tab.length) == 0) |
1153 |
n = (tab = resize()).length; |
1154 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1155 |
if (first instanceof TreeNode) |
1156 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1157 |
else { |
1158 |
Node<K,V> e = first; K k; |
1159 |
do { |
1160 |
if (e.hash == hash && |
1161 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1162 |
old = e; |
1163 |
break; |
1164 |
} |
1165 |
++binCount; |
1166 |
} while ((e = e.next) != null); |
1167 |
} |
1168 |
} |
1169 |
V oldValue = (old == null) ? null : old.value; |
1170 |
V v = remappingFunction.apply(key, oldValue); |
1171 |
if (old != null) { |
1172 |
if (v != null) { |
1173 |
old.value = v; |
1174 |
afterNodeAccess(old); |
1175 |
} |
1176 |
else |
1177 |
removeNode(hash, key, null, false, true); |
1178 |
} |
1179 |
else if (v != null) { |
1180 |
if (t != null) |
1181 |
t.putTreeVal(this, tab, hash, key, v); |
1182 |
else { |
1183 |
tab[i] = newNode(hash, key, v, first); |
1184 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1185 |
treeifyBin(tab, hash); |
1186 |
} |
1187 |
++modCount; |
1188 |
++size; |
1189 |
afterNodeInsertion(true); |
1190 |
} |
1191 |
return v; |
1192 |
} |
1193 |
|
1194 |
public V merge(K key, V value, |
1195 |
BiFunction<? super V, ? super V, ? extends V> remappingFunction) { |
1196 |
if (remappingFunction == null) |
1197 |
throw new NullPointerException(); |
1198 |
int hash = hash(key); |
1199 |
Node<K,V>[] tab; Node<K,V> first; int n, i; |
1200 |
int binCount = 0; |
1201 |
TreeNode<K,V> t = null; |
1202 |
Node<K,V> old = null; |
1203 |
if (size > threshold || (tab = table) == null || |
1204 |
(n = tab.length) == 0) |
1205 |
n = (tab = resize()).length; |
1206 |
if ((first = tab[i = (n - 1) & hash]) != null) { |
1207 |
if (first instanceof TreeNode) |
1208 |
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); |
1209 |
else { |
1210 |
Node<K,V> e = first; K k; |
1211 |
do { |
1212 |
if (e.hash == hash && |
1213 |
((k = e.key) == key || (key != null && key.equals(k)))) { |
1214 |
old = e; |
1215 |
break; |
1216 |
} |
1217 |
++binCount; |
1218 |
} while ((e = e.next) != null); |
1219 |
} |
1220 |
} |
1221 |
if (old != null) { |
1222 |
V v = remappingFunction.apply(old.value, value); |
1223 |
if (v != null) { |
1224 |
old.value = v; |
1225 |
afterNodeAccess(old); |
1226 |
} |
1227 |
else |
1228 |
removeNode(hash, key, null, false, true); |
1229 |
return v; |
1230 |
} |
1231 |
if (value != null) { |
1232 |
if (t != null) |
1233 |
t.putTreeVal(this, tab, hash, key, value); |
1234 |
else { |
1235 |
tab[i] = newNode(hash, key, value, first); |
1236 |
if (binCount >= TREEIFY_THRESHOLD - 1) |
1237 |
treeifyBin(tab, hash); |
1238 |
} |
1239 |
++modCount; |
1240 |
++size; |
1241 |
afterNodeInsertion(true); |
1242 |
} |
1243 |
return value; |
1244 |
} |
1245 |
|
1246 |
public void forEach(BiConsumer<? super K, ? super V> action) { |
1247 |
Node<K,V>[] tab; |
1248 |
if (action == null) |
1249 |
throw new NullPointerException(); |
1250 |
if (size > 0 && (tab = table) != null) { |
1251 |
int mc = modCount; |
1252 |
for (int i = 0; i < tab.length; ++i) { |
1253 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) |
1254 |
action.accept(e.key, e.value); |
1255 |
} |
1256 |
if (modCount != mc) |
1257 |
throw new ConcurrentModificationException(); |
1258 |
} |
1259 |
} |
1260 |
|
1261 |
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { |
1262 |
Node<K,V>[] tab; |
1263 |
if (function == null) |
1264 |
throw new NullPointerException(); |
1265 |
if (size > 0 && (tab = table) != null) { |
1266 |
int mc = modCount; |
1267 |
for (int i = 0; i < tab.length; ++i) { |
1268 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) { |
1269 |
e.value = function.apply(e.key, e.value); |
1270 |
} |
1271 |
} |
1272 |
if (modCount != mc) |
1273 |
throw new ConcurrentModificationException(); |
1274 |
} |
1275 |
} |
1276 |
|
1277 |
/* ------------------------------------------------------------ */ |
1278 |
// Cloning and serialization |
1279 |
|
1280 |
/** |
1281 |
* Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and |
1282 |
* values themselves are not cloned. |
1283 |
* |
1284 |
* @return a shallow copy of this map |
1285 |
*/ |
1286 |
@SuppressWarnings("unchecked") |
1287 |
public Object clone() { |
1288 |
HashMap<K,V> result = null; |
1289 |
try { |
1290 |
result = (HashMap<K,V>)super.clone(); |
1291 |
} catch (CloneNotSupportedException e) { |
1292 |
// assert false; |
1293 |
} |
1294 |
result.reinitialize(); |
1295 |
result.putMapEntries(this, false); |
1296 |
return result; |
1297 |
} |
1298 |
|
1299 |
// These methods are also used when serializing HashSets |
1300 |
final float loadFactor() { return loadFactor; } |
1301 |
final int capacity() { |
1302 |
return (table != null ? table.length : |
1303 |
(threshold > 0 ? threshold : DEFAULT_INITIAL_CAPACITY)); |
1304 |
} |
1305 |
|
1306 |
/** |
1307 |
* Save the state of the <tt>HashMap</tt> instance to a stream (i.e., |
1308 |
* serialize it). |
1309 |
* |
1310 |
* @serialData The <i>capacity</i> of the HashMap (the length of the |
1311 |
* bucket array) is emitted (int), followed by the |
1312 |
* <i>size</i> (an int, the number of key-value |
1313 |
* mappings), followed by the key (Object) and value (Object) |
1314 |
* for each key-value mapping. The key-value mappings are |
1315 |
* emitted in no particular order. |
1316 |
*/ |
1317 |
private void writeObject(java.io.ObjectOutputStream s) |
1318 |
throws IOException { |
1319 |
int buckets = capacity(); |
1320 |
// Write out the threshold, loadfactor, and any hidden stuff |
1321 |
s.defaultWriteObject(); |
1322 |
s.writeInt(buckets); |
1323 |
s.writeInt(size); |
1324 |
internalWriteEntries(s); |
1325 |
} |
1326 |
|
1327 |
/** |
1328 |
* Reconstitute the {@code HashMap} instance from a stream (i.e., |
1329 |
* deserialize it). |
1330 |
*/ |
1331 |
private void readObject(java.io.ObjectInputStream s) |
1332 |
throws IOException, ClassNotFoundException { |
1333 |
// Read in the threshold (ignored), loadfactor, and any hidden stuff |
1334 |
s.defaultReadObject(); |
1335 |
reinitialize(); |
1336 |
if (loadFactor <= 0 || Float.isNaN(loadFactor)) |
1337 |
throw new InvalidObjectException("Illegal load factor: " + |
1338 |
loadFactor); |
1339 |
s.readInt(); // Read and ignore number of buckets |
1340 |
int mappings = s.readInt(); // Read number of mappings (size) |
1341 |
if (mappings < 0) |
1342 |
throw new InvalidObjectException("Illegal mappings count: " + |
1343 |
mappings); |
1344 |
else if (mappings > 0) { // (if zero, use defaults) |
1345 |
// Size the table using given load factor only if within |
1346 |
// range of 0.25...4.0 |
1347 |
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); |
1348 |
float fc = (float)mappings / lf + 1.0f; |
1349 |
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? |
1350 |
DEFAULT_INITIAL_CAPACITY : |
1351 |
(fc >= MAXIMUM_CAPACITY) ? |
1352 |
MAXIMUM_CAPACITY : |
1353 |
tableSizeFor((int)fc)); |
1354 |
float ft = (float)cap * lf; |
1355 |
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? |
1356 |
(int)ft : Integer.MAX_VALUE); |
1357 |
@SuppressWarnings({"rawtypes","unchecked"}) |
1358 |
Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; |
1359 |
table = tab; |
1360 |
|
1361 |
// Read the keys and values, and put the mappings in the HashMap |
1362 |
for (int i = 0; i < mappings; i++) { |
1363 |
@SuppressWarnings("unchecked") |
1364 |
K key = (K) s.readObject(); |
1365 |
@SuppressWarnings("unchecked") |
1366 |
V value = (V) s.readObject(); |
1367 |
putVal(hash(key), key, value, false, false); |
1368 |
} |
1369 |
} |
1370 |
} |
1371 |
|
1372 |
/* ------------------------------------------------------------ */ |
1373 |
// iterators |
1374 |
|
1375 |
abstract class HashIterator { |
1376 |
Node<K,V> next; // next entry to return |
1377 |
Node<K,V> current; // current entry |
1378 |
int expectedModCount; // for fast-fail |
1379 |
int index; // current slot |
1380 |
|
1381 |
HashIterator() { |
1382 |
expectedModCount = modCount; |
1383 |
Node<K,V>[] t = table; |
1384 |
current = next = null; |
1385 |
index = 0; |
1386 |
if (t != null && size > 0) { // advance to first entry |
1387 |
do {} while (index < t.length && (next = t[index++]) == null); |
1388 |
} |
1389 |
} |
1390 |
|
1391 |
public final boolean hasNext() { |
1392 |
return next != null; |
1393 |
} |
1394 |
|
1395 |
final Node<K,V> nextNode() { |
1396 |
Node<K,V>[] t; |
1397 |
Node<K,V> e = next; |
1398 |
if (modCount != expectedModCount) |
1399 |
throw new ConcurrentModificationException(); |
1400 |
if (e == null) |
1401 |
throw new NoSuchElementException(); |
1402 |
current = e; |
1403 |
if ((next = e.next) == null && (t = table) != null) { |
1404 |
do {} while (index < t.length && (next = t[index++]) == null); |
1405 |
} |
1406 |
return e; |
1407 |
} |
1408 |
|
1409 |
public final void remove() { |
1410 |
Node<K,V> p = current; |
1411 |
if (p == null) |
1412 |
throw new IllegalStateException(); |
1413 |
if (modCount != expectedModCount) |
1414 |
throw new ConcurrentModificationException(); |
1415 |
current = null; |
1416 |
K key = p.key; |
1417 |
removeNode(hash(key), key, null, false, false); |
1418 |
expectedModCount = modCount; |
1419 |
} |
1420 |
|
1421 |
} |
1422 |
|
1423 |
final class KeyIterator extends HashIterator |
1424 |
implements Iterator<K> { |
1425 |
public final K next() { return nextNode().key; } |
1426 |
} |
1427 |
|
1428 |
final class ValueIterator extends HashIterator |
1429 |
implements Iterator<V> { |
1430 |
public final V next() { return nextNode().value; } |
1431 |
} |
1432 |
|
1433 |
final class EntryIterator extends HashIterator |
1434 |
implements Iterator<Map.Entry<K,V>> { |
1435 |
public final Map.Entry<K,V> next() { return nextNode(); } |
1436 |
} |
1437 |
|
1438 |
/* ------------------------------------------------------------ */ |
1439 |
// spliterators |
1440 |
|
1441 |
static class HashMapSpliterator<K,V> { |
1442 |
final HashMap<K,V> map; |
1443 |
Node<K,V> current; // current node |
1444 |
int index; // current index, modified on advance/split |
1445 |
int fence; // one past last index |
1446 |
int est; // size estimate |
1447 |
int expectedModCount; // for comodification checks |
1448 |
|
1449 |
HashMapSpliterator(HashMap<K,V> m, int origin, |
1450 |
int fence, int est, |
1451 |
int expectedModCount) { |
1452 |
this.map = m; |
1453 |
this.index = origin; |
1454 |
this.fence = fence; |
1455 |
this.est = est; |
1456 |
this.expectedModCount = expectedModCount; |
1457 |
} |
1458 |
|
1459 |
final int getFence() { // initialize fence and size on first use |
1460 |
int hi; |
1461 |
if ((hi = fence) < 0) { |
1462 |
HashMap<K,V> m = map; |
1463 |
est = m.size; |
1464 |
expectedModCount = m.modCount; |
1465 |
Node<K,V>[] tab = m.table; |
1466 |
hi = fence = (tab == null) ? 0 : tab.length; |
1467 |
} |
1468 |
return hi; |
1469 |
} |
1470 |
|
1471 |
public final long estimateSize() { |
1472 |
getFence(); // force init |
1473 |
return (long) est; |
1474 |
} |
1475 |
} |
1476 |
|
1477 |
static final class KeySpliterator<K,V> |
1478 |
extends HashMapSpliterator<K,V> |
1479 |
implements Spliterator<K> { |
1480 |
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1481 |
int expectedModCount) { |
1482 |
super(m, origin, fence, est, expectedModCount); |
1483 |
} |
1484 |
|
1485 |
public KeySpliterator<K,V> trySplit() { |
1486 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1487 |
return (lo >= mid || current != null) ? null : |
1488 |
new KeySpliterator<K,V>(map, lo, index = mid, est >>>= 1, |
1489 |
expectedModCount); |
1490 |
} |
1491 |
|
1492 |
public void forEachRemaining(Consumer<? super K> action) { |
1493 |
int i, hi, mc; |
1494 |
if (action == null) |
1495 |
throw new NullPointerException(); |
1496 |
HashMap<K,V> m = map; |
1497 |
Node<K,V>[] tab = m.table; |
1498 |
if ((hi = fence) < 0) { |
1499 |
mc = expectedModCount = m.modCount; |
1500 |
hi = fence = (tab == null) ? 0 : tab.length; |
1501 |
} |
1502 |
else |
1503 |
mc = expectedModCount; |
1504 |
if (tab != null && tab.length >= hi && |
1505 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1506 |
Node<K,V> p = current; |
1507 |
current = null; |
1508 |
do { |
1509 |
if (p == null) |
1510 |
p = tab[i++]; |
1511 |
else { |
1512 |
action.accept(p.key); |
1513 |
p = p.next; |
1514 |
} |
1515 |
} while (p != null || i < hi); |
1516 |
if (m.modCount != mc) |
1517 |
throw new ConcurrentModificationException(); |
1518 |
} |
1519 |
} |
1520 |
|
1521 |
public boolean tryAdvance(Consumer<? super K> action) { |
1522 |
int hi; |
1523 |
if (action == null) |
1524 |
throw new NullPointerException(); |
1525 |
Node<K,V>[] tab = map.table; |
1526 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1527 |
while (current != null || index < hi) { |
1528 |
if (current == null) |
1529 |
current = tab[index++]; |
1530 |
else { |
1531 |
K k = current.key; |
1532 |
current = current.next; |
1533 |
action.accept(k); |
1534 |
if (map.modCount != expectedModCount) |
1535 |
throw new ConcurrentModificationException(); |
1536 |
return true; |
1537 |
} |
1538 |
} |
1539 |
} |
1540 |
return false; |
1541 |
} |
1542 |
|
1543 |
public int characteristics() { |
1544 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | |
1545 |
Spliterator.DISTINCT; |
1546 |
} |
1547 |
} |
1548 |
|
1549 |
static final class ValueSpliterator<K,V> |
1550 |
extends HashMapSpliterator<K,V> |
1551 |
implements Spliterator<V> { |
1552 |
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1553 |
int expectedModCount) { |
1554 |
super(m, origin, fence, est, expectedModCount); |
1555 |
} |
1556 |
|
1557 |
public ValueSpliterator<K,V> trySplit() { |
1558 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1559 |
return (lo >= mid || current != null) ? null : |
1560 |
new ValueSpliterator<K,V>(map, lo, index = mid, est >>>= 1, |
1561 |
expectedModCount); |
1562 |
} |
1563 |
|
1564 |
public void forEachRemaining(Consumer<? super V> action) { |
1565 |
int i, hi, mc; |
1566 |
if (action == null) |
1567 |
throw new NullPointerException(); |
1568 |
HashMap<K,V> m = map; |
1569 |
Node<K,V>[] tab = m.table; |
1570 |
if ((hi = fence) < 0) { |
1571 |
mc = expectedModCount = m.modCount; |
1572 |
hi = fence = (tab == null) ? 0 : tab.length; |
1573 |
} |
1574 |
else |
1575 |
mc = expectedModCount; |
1576 |
if (tab != null && tab.length >= hi && |
1577 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1578 |
Node<K,V> p = current; |
1579 |
current = null; |
1580 |
do { |
1581 |
if (p == null) |
1582 |
p = tab[i++]; |
1583 |
else { |
1584 |
action.accept(p.value); |
1585 |
p = p.next; |
1586 |
} |
1587 |
} while (p != null || i < hi); |
1588 |
if (m.modCount != mc) |
1589 |
throw new ConcurrentModificationException(); |
1590 |
} |
1591 |
} |
1592 |
|
1593 |
public boolean tryAdvance(Consumer<? super V> action) { |
1594 |
int hi; |
1595 |
if (action == null) |
1596 |
throw new NullPointerException(); |
1597 |
Node<K,V>[] tab = map.table; |
1598 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1599 |
while (current != null || index < hi) { |
1600 |
if (current == null) |
1601 |
current = tab[index++]; |
1602 |
else { |
1603 |
V v = current.value; |
1604 |
current = current.next; |
1605 |
action.accept(v); |
1606 |
if (map.modCount != expectedModCount) |
1607 |
throw new ConcurrentModificationException(); |
1608 |
return true; |
1609 |
} |
1610 |
} |
1611 |
} |
1612 |
return false; |
1613 |
} |
1614 |
|
1615 |
public int characteristics() { |
1616 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); |
1617 |
} |
1618 |
} |
1619 |
|
1620 |
static final class EntrySpliterator<K,V> |
1621 |
extends HashMapSpliterator<K,V> |
1622 |
implements Spliterator<Map.Entry<K,V>> { |
1623 |
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, |
1624 |
int expectedModCount) { |
1625 |
super(m, origin, fence, est, expectedModCount); |
1626 |
} |
1627 |
|
1628 |
public EntrySpliterator<K,V> trySplit() { |
1629 |
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; |
1630 |
return (lo >= mid || current != null) ? null : |
1631 |
new EntrySpliterator<K,V>(map, lo, index = mid, est >>>= 1, |
1632 |
expectedModCount); |
1633 |
} |
1634 |
|
1635 |
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { |
1636 |
int i, hi, mc; |
1637 |
if (action == null) |
1638 |
throw new NullPointerException(); |
1639 |
HashMap<K,V> m = map; |
1640 |
Node<K,V>[] tab = m.table; |
1641 |
if ((hi = fence) < 0) { |
1642 |
mc = expectedModCount = m.modCount; |
1643 |
hi = fence = (tab == null) ? 0 : tab.length; |
1644 |
} |
1645 |
else |
1646 |
mc = expectedModCount; |
1647 |
if (tab != null && tab.length >= hi && |
1648 |
(i = index) >= 0 && (i < (index = hi) || current != null)) { |
1649 |
Node<K,V> p = current; |
1650 |
current = null; |
1651 |
do { |
1652 |
if (p == null) |
1653 |
p = tab[i++]; |
1654 |
else { |
1655 |
action.accept(p); |
1656 |
p = p.next; |
1657 |
} |
1658 |
} while (p != null || i < hi); |
1659 |
if (m.modCount != mc) |
1660 |
throw new ConcurrentModificationException(); |
1661 |
} |
1662 |
} |
1663 |
|
1664 |
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { |
1665 |
int hi; |
1666 |
if (action == null) |
1667 |
throw new NullPointerException(); |
1668 |
Node<K,V>[] tab = map.table; |
1669 |
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { |
1670 |
while (current != null || index < hi) { |
1671 |
if (current == null) |
1672 |
current = tab[index++]; |
1673 |
else { |
1674 |
Node<K,V> e = current; |
1675 |
current = current.next; |
1676 |
action.accept(e); |
1677 |
if (map.modCount != expectedModCount) |
1678 |
throw new ConcurrentModificationException(); |
1679 |
return true; |
1680 |
} |
1681 |
} |
1682 |
} |
1683 |
return false; |
1684 |
} |
1685 |
|
1686 |
public int characteristics() { |
1687 |
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | |
1688 |
Spliterator.DISTINCT; |
1689 |
} |
1690 |
} |
1691 |
|
1692 |
/* ------------------------------------------------------------ */ |
1693 |
// LinkedHashMap support |
1694 |
|
1695 |
/** |
1696 |
* Entry for LinkedHashMap entries. Created only from |
1697 |
* LinkedHashMap, but must be defined here. |
1698 |
*/ |
1699 |
static class LinkedNode<K,V> extends Node<K,V> { |
1700 |
LinkedNode<K,V> before, after; |
1701 |
LinkedNode(int hash, K key, V value, Node<K,V> next) { |
1702 |
super(hash, key, value, next); |
1703 |
} |
1704 |
} |
1705 |
|
1706 |
/* |
1707 |
* The following package-protected methods are designed to be |
1708 |
* overridden by LinkedHashMap, but not by any other subclass. |
1709 |
* Nearly all other internal methods are also package-protected |
1710 |
* but are declared final, so can be used by LinkedHashMap, view |
1711 |
* classes, and HashSet. |
1712 |
*/ |
1713 |
|
1714 |
// Create a regular (non-tree) node |
1715 |
Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { |
1716 |
return new Node<K,V>(hash, key, value, next); |
1717 |
} |
1718 |
|
1719 |
// For conversion from TreeNodes to plain nodes |
1720 |
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { |
1721 |
return new Node<K,V>(p.hash, p.key, p.value, next); |
1722 |
} |
1723 |
|
1724 |
// Create a tree bin node |
1725 |
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { |
1726 |
return new TreeNode<K,V>(hash, key, value, next); |
1727 |
} |
1728 |
|
1729 |
// For treeifyBin |
1730 |
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { |
1731 |
return new TreeNode<K,V>(p.hash, p.key, p.value, next); |
1732 |
} |
1733 |
|
1734 |
/** |
1735 |
* Reset to initial default state. Called by clone and readObject. |
1736 |
*/ |
1737 |
void reinitialize() { |
1738 |
table = null; |
1739 |
entrySet = null; |
1740 |
keySet = null; |
1741 |
values = null; |
1742 |
modCount = 0; |
1743 |
threshold = 0; |
1744 |
size = 0; |
1745 |
} |
1746 |
|
1747 |
// Callbacks to allow LinkedHashMap post-actions |
1748 |
void afterNodeAccess(Node<K,V> p) { } |
1749 |
void afterNodeInsertion(boolean evict) { } |
1750 |
void afterNodeRemoval(Node<K,V> p) { } |
1751 |
|
1752 |
// Called only from writeObject, to ensure compatible ordering. |
1753 |
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { |
1754 |
Node<K,V>[] tab; |
1755 |
if (size > 0 && (tab = table) != null) { |
1756 |
for (int i = 0; i < tab.length; ++i) { |
1757 |
for (Node<K,V> e = tab[i]; e != null; e = e.next) { |
1758 |
s.writeObject(e.key); |
1759 |
s.writeObject(e.value); |
1760 |
} |
1761 |
} |
1762 |
} |
1763 |
} |
1764 |
|
1765 |
/* ------------------------------------------------------------ */ |
1766 |
// Tree bins |
1767 |
|
1768 |
/** |
1769 |
* Entry for Tree bins. Subclasses LinkedNode so can be used as |
1770 |
* either extension of regular or linked node. |
1771 |
*/ |
1772 |
static final class TreeNode<K,V> extends LinkedNode<K,V> { |
1773 |
TreeNode<K,V> parent; // red-black tree links |
1774 |
TreeNode<K,V> left; |
1775 |
TreeNode<K,V> right; |
1776 |
TreeNode<K,V> prev; // needed to unlink next upon deletion |
1777 |
boolean red; |
1778 |
TreeNode(int hash, K key, V val, Node<K,V> next) { |
1779 |
super(hash, key, val, next); |
1780 |
} |
1781 |
|
1782 |
final TreeNode<K,V> root() { |
1783 |
for (TreeNode<K,V> r = this, p;;) { |
1784 |
if ((p = r.parent) == null) |
1785 |
return r; |
1786 |
r = p; |
1787 |
} |
1788 |
} |
1789 |
|
1790 |
/** |
1791 |
* Ensures that the given root is the first node of its bin |
1792 |
*/ |
1793 |
static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { |
1794 |
int n; |
1795 |
if (root != null && tab != null && (n = tab.length) > 0) { |
1796 |
int index = (n - 1) & root.hash; |
1797 |
TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; |
1798 |
if (root != first) { |
1799 |
tab[index] = root; |
1800 |
TreeNode<K,V> rp = root.prev; |
1801 |
Node<K,V> rn = root.next; |
1802 |
if (rn != null) |
1803 |
((TreeNode<K,V>)rn).prev = rp; |
1804 |
if (rp != null) |
1805 |
rp.next = rn; |
1806 |
if (first != null) |
1807 |
first.prev = root; |
1808 |
root.next = first; |
1809 |
root.prev = null; |
1810 |
} |
1811 |
} |
1812 |
} |
1813 |
|
1814 |
/** |
1815 |
* Finds the node starting at root p with the given hash and key. |
1816 |
* The kc argument caches comparableClassFor(key) upon first use |
1817 |
* comparing keys. |
1818 |
*/ |
1819 |
final TreeNode<K,V> find(int h, Object k, Class<?> kc) { |
1820 |
TreeNode<K,V> p = this; |
1821 |
do { |
1822 |
int ph, dir; K pk; |
1823 |
TreeNode<K,V> pl = p.left, pr = p.right, q; |
1824 |
if ((ph = p.hash) > h) |
1825 |
p = pl; |
1826 |
else if (ph < h) |
1827 |
p = pr; |
1828 |
else if ((pk = p.key) == k || (k != null && k.equals(pk))) |
1829 |
return p; |
1830 |
else if (pl == null && pr == null) |
1831 |
break; |
1832 |
else if ((kc != null || (kc = comparableClassFor(k)) != null) && |
1833 |
(dir = compareComparables(kc, k, pk)) != 0) |
1834 |
p = (dir < 0) ? pl : pr; |
1835 |
else if (pl == null) |
1836 |
p = pr; |
1837 |
else if (pr == null || (q = pr.find(h, k, kc)) == null) |
1838 |
p = pl; |
1839 |
else |
1840 |
return q; |
1841 |
} while (p != null); |
1842 |
return null; |
1843 |
} |
1844 |
|
1845 |
/** |
1846 |
* Calls find for root node. |
1847 |
*/ |
1848 |
final TreeNode<K,V> getTreeNode(int h, Object k) { |
1849 |
return ((parent != null) ? root() : this).find(h, k, null); |
1850 |
} |
1851 |
|
1852 |
/** |
1853 |
* Forms tree of the nodes linked from this node. |
1854 |
* @return root of tree |
1855 |
*/ |
1856 |
final void treeify(Node<K,V>[] tab) { |
1857 |
TreeNode<K,V> root = null; |
1858 |
for (TreeNode<K,V> x = this, next; x != null; x = next) { |
1859 |
next = (TreeNode<K,V>)x.next; |
1860 |
x.left = x.right = null; |
1861 |
if (root == null) { |
1862 |
x.parent = null; |
1863 |
x.red = false; |
1864 |
root = x; |
1865 |
} |
1866 |
else { |
1867 |
K k = x.key; |
1868 |
int h = x.hash; |
1869 |
Class<?> kc = null; |
1870 |
for (TreeNode<K,V> p = root;;) { |
1871 |
int dir, ph; |
1872 |
if ((ph = p.hash) > h) |
1873 |
dir = -1; |
1874 |
else if (ph < h) |
1875 |
dir = 1; |
1876 |
else if ((kc != null || |
1877 |
(kc = comparableClassFor(k)) != null)) |
1878 |
dir = compareComparables(kc, k, p.key); |
1879 |
else |
1880 |
dir = 0; |
1881 |
TreeNode<K,V> xp = p; |
1882 |
if ((p = (dir <= 0) ? p.left : p.right) == null) { |
1883 |
x.parent = xp; |
1884 |
if (dir <= 0) |
1885 |
xp.left = x; |
1886 |
else |
1887 |
xp.right = x; |
1888 |
root = balanceInsertion(root, x); |
1889 |
break; |
1890 |
} |
1891 |
} |
1892 |
} |
1893 |
} |
1894 |
moveRootToFront(tab, root); |
1895 |
assert checkInvariants(root); |
1896 |
} |
1897 |
|
1898 |
/** |
1899 |
* Returns a list on non-TreeNodes replacing those linked from |
1900 |
* this node. |
1901 |
*/ |
1902 |
final Node<K,V> untreeify(HashMap<K,V> map) { |
1903 |
Node<K,V> hd = null, tl = null; |
1904 |
for (Node<K,V> q = this; q != null; q = q.next) { |
1905 |
Node<K,V> p = map.replacementNode(q, null); |
1906 |
if (tl == null) |
1907 |
hd = p; |
1908 |
else |
1909 |
tl.next = p; |
1910 |
tl = p; |
1911 |
} |
1912 |
return hd; |
1913 |
} |
1914 |
|
1915 |
/** |
1916 |
* Tree version of putVal. |
1917 |
*/ |
1918 |
final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, |
1919 |
int h, K k, V v) { |
1920 |
Class<?> kc = null; |
1921 |
TreeNode<K,V> root = (parent != null) ? root() : this; |
1922 |
for (TreeNode<K,V> p = root;;) { |
1923 |
int ph, dir; K pk; TreeNode<K,V> pr, q; |
1924 |
if ((ph = p.hash) > h) |
1925 |
dir = -1; |
1926 |
else if (ph < h) |
1927 |
dir = 1; |
1928 |
else if ((pk = p.key) == k || (k != null && k.equals(pk))) |
1929 |
return p; |
1930 |
else if ((kc == null && (kc = comparableClassFor(k)) == null) || |
1931 |
(dir = compareComparables(kc, k, pk)) == 0) { |
1932 |
if (p.left == null) |
1933 |
dir = 1; |
1934 |
else if ((pr = p.right) == null || |
1935 |
(q = pr.find(h, k, kc)) == null) |
1936 |
dir = -1; |
1937 |
else |
1938 |
return q; |
1939 |
} |
1940 |
TreeNode<K,V> xp = p; |
1941 |
if ((p = (dir < 0) ? p.left : p.right) == null) { |
1942 |
Node<K,V> xpn = xp.next; |
1943 |
TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); |
1944 |
if (dir < 0) |
1945 |
xp.left = x; |
1946 |
else |
1947 |
xp.right = x; |
1948 |
xp.next = x; |
1949 |
x.parent = x.prev = xp; |
1950 |
if (xpn != null) |
1951 |
((TreeNode<K,V>)xpn).prev = x; |
1952 |
TreeNode<K,V> r = balanceInsertion(root, x); |
1953 |
moveRootToFront(tab, r); |
1954 |
assert checkInvariants(r); |
1955 |
return null; |
1956 |
} |
1957 |
} |
1958 |
} |
1959 |
|
1960 |
/** |
1961 |
* Removes the given node, that must be present before this call. |
1962 |
* This is messier than typical red-black deletion code because we |
1963 |
* cannot swap the contents of an interior node with a leaf |
1964 |
* successor that is pinned by "next" pointers that are accessible |
1965 |
* independently during traversal. So instead we swap the tree |
1966 |
* linkages. If the current tree appears to have too few nodes, |
1967 |
* the bin is converted back to a plain bin. (The test triggers |
1968 |
* somewhere between 2 and 6 nodes, depending on tree structure). |
1969 |
*/ |
1970 |
final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, |
1971 |
boolean movable) { |
1972 |
int n; |
1973 |
if (tab == null || (n = tab.length) == 0) |
1974 |
return; |
1975 |
int index = (n - 1) & hash; |
1976 |
TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; |
1977 |
TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; |
1978 |
if (pred == null) |
1979 |
tab[index] = first = succ; |
1980 |
else |
1981 |
pred.next = succ; |
1982 |
if (succ != null) |
1983 |
succ.prev = pred; |
1984 |
if (first == null) |
1985 |
return; |
1986 |
if (root.parent != null) |
1987 |
root = root.root(); |
1988 |
if (root == null || root.right == null || |
1989 |
(rl = root.left) == null || rl.left == null) { |
1990 |
tab[index] = first.untreeify(map); // too small |
1991 |
return; |
1992 |
} |
1993 |
TreeNode<K,V> p = this, pl = left, pr = right, replacement; |
1994 |
if (pl != null && pr != null) { |
1995 |
TreeNode<K,V> s = pr, sl; |
1996 |
while ((sl = s.left) != null) // find successor |
1997 |
s = sl; |
1998 |
boolean c = s.red; s.red = p.red; p.red = c; // swap colors |
1999 |
TreeNode<K,V> sr = s.right; |
2000 |
TreeNode<K,V> pp = p.parent; |
2001 |
if (s == pr) { // p was s's direct parent |
2002 |
p.parent = s; |
2003 |
s.right = p; |
2004 |
} |
2005 |
else { |
2006 |
TreeNode<K,V> sp = s.parent; |
2007 |
if ((p.parent = sp) != null) { |
2008 |
if (s == sp.left) |
2009 |
sp.left = p; |
2010 |
else |
2011 |
sp.right = p; |
2012 |
} |
2013 |
if ((s.right = pr) != null) |
2014 |
pr.parent = s; |
2015 |
} |
2016 |
p.left = null; |
2017 |
if ((p.right = sr) != null) |
2018 |
sr.parent = p; |
2019 |
if ((s.left = pl) != null) |
2020 |
pl.parent = s; |
2021 |
if ((s.parent = pp) == null) |
2022 |
root = s; |
2023 |
else if (p == pp.left) |
2024 |
pp.left = s; |
2025 |
else |
2026 |
pp.right = s; |
2027 |
if (sr != null) |
2028 |
replacement = sr; |
2029 |
else |
2030 |
replacement = p; |
2031 |
} |
2032 |
else if (pl != null) |
2033 |
replacement = pl; |
2034 |
else if (pr != null) |
2035 |
replacement = pr; |
2036 |
else |
2037 |
replacement = p; |
2038 |
if (replacement != p) { |
2039 |
TreeNode<K,V> pp = replacement.parent = p.parent; |
2040 |
if (pp == null) |
2041 |
root = replacement; |
2042 |
else if (p == pp.left) |
2043 |
pp.left = replacement; |
2044 |
else |
2045 |
pp.right = replacement; |
2046 |
p.left = p.right = p.parent = null; |
2047 |
} |
2048 |
|
2049 |
TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); |
2050 |
|
2051 |
if (replacement == p) { // detach |
2052 |
TreeNode<K,V> pp = p.parent; |
2053 |
p.parent = null; |
2054 |
if (pp != null) { |
2055 |
if (p == pp.left) |
2056 |
pp.left = null; |
2057 |
else if (p == pp.right) |
2058 |
pp.right = null; |
2059 |
} |
2060 |
} |
2061 |
if (movable) |
2062 |
moveRootToFront(tab, r); |
2063 |
assert checkInvariants(r); |
2064 |
} |
2065 |
|
2066 |
/** |
2067 |
* Splits nodes in a tree bin into lower and upper tree bins, |
2068 |
* or untreeifies if now too small. |
2069 |
*/ |
2070 |
final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) { |
2071 |
TreeNode<K,V> b = this; |
2072 |
// Relink into lo and hi lists, preserving order |
2073 |
TreeNode<K,V> loHead = null, loTail = null; |
2074 |
TreeNode<K,V> hiHead = null, hiTail = null; |
2075 |
int lc = 0, hc = 0; |
2076 |
for (TreeNode<K,V> e = b, next; e != null; e = next) { |
2077 |
next = (TreeNode<K,V>)e.next; |
2078 |
e.next = null; |
2079 |
if ((e.hash & bit) == 0) { |
2080 |
if ((e.prev = loTail) == null) |
2081 |
loHead = e; |
2082 |
else |
2083 |
loTail.next = e; |
2084 |
loTail = e; |
2085 |
++lc; |
2086 |
} |
2087 |
else { |
2088 |
if ((e.prev = hiTail) == null) |
2089 |
hiHead = e; |
2090 |
else |
2091 |
hiTail.next = e; |
2092 |
hiTail = e; |
2093 |
++hc; |
2094 |
} |
2095 |
} |
2096 |
|
2097 |
if (loHead != null) { |
2098 |
if (lc <= UNTREEIFY_THRESHOLD) |
2099 |
tab[index] = loHead.untreeify(map); |
2100 |
else { |
2101 |
tab[index] = loHead; |
2102 |
if (hiHead != null) // (else is already treeified) |
2103 |
loHead.treeify(tab); |
2104 |
} |
2105 |
} |
2106 |
if (hiHead != null) { |
2107 |
if (hc <= UNTREEIFY_THRESHOLD) |
2108 |
tab[index + bit] = hiHead.untreeify(map); |
2109 |
else { |
2110 |
tab[index + bit] = hiHead; |
2111 |
if (loHead != null) |
2112 |
hiHead.treeify(tab); |
2113 |
} |
2114 |
} |
2115 |
} |
2116 |
|
2117 |
/* ------------------------------------------------------------ */ |
2118 |
// Red-black tree methods, all adapted from CLR |
2119 |
|
2120 |
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, |
2121 |
TreeNode<K,V> p) { |
2122 |
if (p != null) { |
2123 |
TreeNode<K,V> r = p.right, pp, rl; |
2124 |
if ((rl = p.right = r.left) != null) |
2125 |
rl.parent = p; |
2126 |
if ((pp = r.parent = p.parent) == null) |
2127 |
(root = r).red = false; |
2128 |
else if (pp.left == p) |
2129 |
pp.left = r; |
2130 |
else |
2131 |
pp.right = r; |
2132 |
r.left = p; |
2133 |
p.parent = r; |
2134 |
} |
2135 |
return root; |
2136 |
} |
2137 |
|
2138 |
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, |
2139 |
TreeNode<K,V> p) { |
2140 |
if (p != null) { |
2141 |
TreeNode<K,V> l = p.left, pp, lr; |
2142 |
if ((lr = p.left = l.right) != null) |
2143 |
lr.parent = p; |
2144 |
if ((pp = l.parent = p.parent) == null) |
2145 |
(root = l).red = false; |
2146 |
else if (pp.right == p) |
2147 |
pp.right = l; |
2148 |
else |
2149 |
pp.left = l; |
2150 |
l.right = p; |
2151 |
p.parent = l; |
2152 |
} |
2153 |
return root; |
2154 |
} |
2155 |
|
2156 |
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, |
2157 |
TreeNode<K,V> x) { |
2158 |
x.red = true; |
2159 |
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { |
2160 |
if ((xp = x.parent) == null) { |
2161 |
x.red = false; |
2162 |
return x; |
2163 |
} |
2164 |
else if (!xp.red || (xpp = xp.parent) == null) |
2165 |
return root; |
2166 |
if (xp == (xppl = xpp.left)) { |
2167 |
if ((xppr = xpp.right) != null && xppr.red) { |
2168 |
xppr.red = false; |
2169 |
xp.red = false; |
2170 |
xpp.red = true; |
2171 |
x = xpp; |
2172 |
} |
2173 |
else { |
2174 |
if (x == xp.right) { |
2175 |
root = rotateLeft(root, x = xp); |
2176 |
xpp = (xp = x.parent) == null ? null : xp.parent; |
2177 |
} |
2178 |
if (xp != null) { |
2179 |
xp.red = false; |
2180 |
if (xpp != null) { |
2181 |
xpp.red = true; |
2182 |
root = rotateRight(root, xpp); |
2183 |
} |
2184 |
} |
2185 |
} |
2186 |
} |
2187 |
else { |
2188 |
if (xppl != null && xppl.red) { |
2189 |
xppl.red = false; |
2190 |
xp.red = false; |
2191 |
xpp.red = true; |
2192 |
x = xpp; |
2193 |
} |
2194 |
else { |
2195 |
if (x == xp.left) { |
2196 |
root = rotateRight(root, x = xp); |
2197 |
xpp = (xp = x.parent) == null ? null : xp.parent; |
2198 |
} |
2199 |
if (xp != null) { |
2200 |
xp.red = false; |
2201 |
if (xpp != null) { |
2202 |
xpp.red = true; |
2203 |
root = rotateLeft(root, xpp); |
2204 |
} |
2205 |
} |
2206 |
} |
2207 |
} |
2208 |
} |
2209 |
} |
2210 |
|
2211 |
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, |
2212 |
TreeNode<K,V> x) { |
2213 |
for (TreeNode<K,V> xp, xpl, xpr;;) { |
2214 |
if (x == null || x == root) |
2215 |
return root; |
2216 |
else if ((xp = x.parent) == null) { |
2217 |
x.red = false; |
2218 |
return x; |
2219 |
} |
2220 |
else if (x.red) { |
2221 |
x.red = false; |
2222 |
return root; |
2223 |
} |
2224 |
else if ((xpl = xp.left) == x) { |
2225 |
if ((xpr = xp.right) != null && xpr.red) { |
2226 |
xpr.red = false; |
2227 |
xp.red = true; |
2228 |
root = rotateLeft(root, xp); |
2229 |
xpr = (xp = x.parent) == null ? null : xp.right; |
2230 |
} |
2231 |
if (xpr == null) |
2232 |
x = xp; |
2233 |
else { |
2234 |
TreeNode<K,V> sl = xpr.left, sr = xpr.right; |
2235 |
if ((sr == null || !sr.red) && |
2236 |
(sl == null || !sl.red)) { |
2237 |
xpr.red = true; |
2238 |
x = xp; |
2239 |
} |
2240 |
else { |
2241 |
if (sr == null || !sr.red) { |
2242 |
if (sl != null) |
2243 |
sl.red = false; |
2244 |
xpr.red = true; |
2245 |
root = rotateRight(root, xpr); |
2246 |
xpr = (xp = x.parent) == null ? |
2247 |
null : xp.right; |
2248 |
} |
2249 |
if (xpr != null) { |
2250 |
xpr.red = (xp == null) ? false : xp.red; |
2251 |
if ((sr = xpr.right) != null) |
2252 |
sr.red = false; |
2253 |
} |
2254 |
if (xp != null) { |
2255 |
xp.red = false; |
2256 |
root = rotateLeft(root, xp); |
2257 |
} |
2258 |
x = root; |
2259 |
} |
2260 |
} |
2261 |
} |
2262 |
else { // symmetric |
2263 |
if (xpl != null && xpl.red) { |
2264 |
xpl.red = false; |
2265 |
xp.red = true; |
2266 |
root = rotateRight(root, xp); |
2267 |
xpl = (xp = x.parent) == null ? null : xp.left; |
2268 |
} |
2269 |
if (xpl == null) |
2270 |
x = xp; |
2271 |
else { |
2272 |
TreeNode<K,V> sl = xpl.left, sr = xpl.right; |
2273 |
if ((sl == null || !sl.red) && |
2274 |
(sr == null || !sr.red)) { |
2275 |
xpl.red = true; |
2276 |
x = xp; |
2277 |
} |
2278 |
else { |
2279 |
if (sl == null || !sl.red) { |
2280 |
if (sr != null) |
2281 |
sr.red = false; |
2282 |
xpl.red = true; |
2283 |
root = rotateLeft(root, xpl); |
2284 |
xpl = (xp = x.parent) == null ? |
2285 |
null : xp.left; |
2286 |
} |
2287 |
if (xpl != null) { |
2288 |
xpl.red = (xp == null) ? false : xp.red; |
2289 |
if ((sl = xpl.left) != null) |
2290 |
sl.red = false; |
2291 |
} |
2292 |
if (xp != null) { |
2293 |
xp.red = false; |
2294 |
root = rotateRight(root, xp); |
2295 |
} |
2296 |
x = root; |
2297 |
} |
2298 |
} |
2299 |
} |
2300 |
} |
2301 |
} |
2302 |
|
2303 |
/** |
2304 |
* Recursive invariant check |
2305 |
*/ |
2306 |
static <K,V> boolean checkInvariants(TreeNode<K,V> t) { |
2307 |
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, |
2308 |
tb = t.prev, tn = (TreeNode<K,V>)t.next; |
2309 |
if (tb != null && tb.next != t) |
2310 |
return false; |
2311 |
if (tn != null && tn.prev != t) |
2312 |
return false; |
2313 |
if (tp != null && t != tp.left && t != tp.right) |
2314 |
return false; |
2315 |
if (tl != null && (tl.parent != t || tl.hash > t.hash)) |
2316 |
return false; |
2317 |
if (tr != null && (tr.parent != t || tr.hash < t.hash)) |
2318 |
return false; |
2319 |
if (t.red && tl != null && tl.red && tr != null && tr.red) |
2320 |
return false; |
2321 |
if (tl != null && !checkInvariants(tl)) |
2322 |
return false; |
2323 |
if (tr != null && !checkInvariants(tr)) |
2324 |
return false; |
2325 |
return true; |
2326 |
} |
2327 |
} |
2328 |
|
2329 |
} |