ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/HashMap.java
Revision: 1.1
Committed: Wed Aug 23 05:33:00 2017 UTC (6 years, 8 months ago) by jsr166
Branch: MAIN
Log Message:
8186171: HashMap: Entry.setValue may not work after Iterator.remove() called for previous entries

File Contents

# User Rev Content
1 jsr166 1.1 /*
2     * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3     * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4     *
5     * This code is free software; you can redistribute it and/or modify it
6     * under the terms of the GNU General Public License version 2 only, as
7     * published by the Free Software Foundation. Oracle designates this
8     * particular file as subject to the "Classpath" exception as provided
9     * by Oracle in the LICENSE file that accompanied this code.
10     *
11     * This code is distributed in the hope that it will be useful, but WITHOUT
12     * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13     * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14     * version 2 for more details (a copy is included in the LICENSE file that
15     * accompanied this code).
16     *
17     * You should have received a copy of the GNU General Public License version
18     * 2 along with this work; if not, write to the Free Software Foundation,
19     * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20     *
21     * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22     * or visit www.oracle.com if you need additional information or have any
23     * questions.
24     */
25    
26     package java.util;
27    
28     import java.io.IOException;
29     import java.io.InvalidObjectException;
30     import java.io.Serializable;
31     import java.lang.reflect.ParameterizedType;
32     import java.lang.reflect.Type;
33     import java.util.function.BiConsumer;
34     import java.util.function.BiFunction;
35     import java.util.function.Consumer;
36     import java.util.function.Function;
37    
38     /**
39     * Hash table based implementation of the {@code Map} interface. This
40     * implementation provides all of the optional map operations, and permits
41     * {@code null} values and the {@code null} key. (The {@code HashMap}
42     * class is roughly equivalent to {@code Hashtable}, except that it is
43     * unsynchronized and permits nulls.) This class makes no guarantees as to
44     * the order of the map; in particular, it does not guarantee that the order
45     * will remain constant over time.
46     *
47     * <p>This implementation provides constant-time performance for the basic
48     * operations ({@code get} and {@code put}), assuming the hash function
49     * disperses the elements properly among the buckets. Iteration over
50     * collection views requires time proportional to the "capacity" of the
51     * {@code HashMap} instance (the number of buckets) plus its size (the number
52     * of key-value mappings). Thus, it's very important not to set the initial
53     * capacity too high (or the load factor too low) if iteration performance is
54     * important.
55     *
56     * <p>An instance of {@code HashMap} has two parameters that affect its
57     * performance: <i>initial capacity</i> and <i>load factor</i>. The
58     * <i>capacity</i> is the number of buckets in the hash table, and the initial
59     * capacity is simply the capacity at the time the hash table is created. The
60     * <i>load factor</i> is a measure of how full the hash table is allowed to
61     * get before its capacity is automatically increased. When the number of
62     * entries in the hash table exceeds the product of the load factor and the
63     * current capacity, the hash table is <i>rehashed</i> (that is, internal data
64     * structures are rebuilt) so that the hash table has approximately twice the
65     * number of buckets.
66     *
67     * <p>As a general rule, the default load factor (.75) offers a good
68     * tradeoff between time and space costs. Higher values decrease the
69     * space overhead but increase the lookup cost (reflected in most of
70     * the operations of the {@code HashMap} class, including
71     * {@code get} and {@code put}). The expected number of entries in
72     * the map and its load factor should be taken into account when
73     * setting its initial capacity, so as to minimize the number of
74     * rehash operations. If the initial capacity is greater than the
75     * maximum number of entries divided by the load factor, no rehash
76     * operations will ever occur.
77     *
78     * <p>If many mappings are to be stored in a {@code HashMap}
79     * instance, creating it with a sufficiently large capacity will allow
80     * the mappings to be stored more efficiently than letting it perform
81     * automatic rehashing as needed to grow the table. Note that using
82     * many keys with the same {@code hashCode()} is a sure way to slow
83     * down performance of any hash table. To ameliorate impact, when keys
84     * are {@link Comparable}, this class may use comparison order among
85     * keys to help break ties.
86     *
87     * <p><strong>Note that this implementation is not synchronized.</strong>
88     * If multiple threads access a hash map concurrently, and at least one of
89     * the threads modifies the map structurally, it <i>must</i> be
90     * synchronized externally. (A structural modification is any operation
91     * that adds or deletes one or more mappings; merely changing the value
92     * associated with a key that an instance already contains is not a
93     * structural modification.) This is typically accomplished by
94     * synchronizing on some object that naturally encapsulates the map.
95     *
96     * If no such object exists, the map should be "wrapped" using the
97     * {@link Collections#synchronizedMap Collections.synchronizedMap}
98     * method. This is best done at creation time, to prevent accidental
99     * unsynchronized access to the map:<pre>
100     * Map m = Collections.synchronizedMap(new HashMap(...));</pre>
101     *
102     * <p>The iterators returned by all of this class's "collection view methods"
103     * are <i>fail-fast</i>: if the map is structurally modified at any time after
104     * the iterator is created, in any way except through the iterator's own
105     * {@code remove} method, the iterator will throw a
106     * {@link ConcurrentModificationException}. Thus, in the face of concurrent
107     * modification, the iterator fails quickly and cleanly, rather than risking
108     * arbitrary, non-deterministic behavior at an undetermined time in the
109     * future.
110     *
111     * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed
112     * as it is, generally speaking, impossible to make any hard guarantees in the
113     * presence of unsynchronized concurrent modification. Fail-fast iterators
114     * throw {@code ConcurrentModificationException} on a best-effort basis.
115     * Therefore, it would be wrong to write a program that depended on this
116     * exception for its correctness: <i>the fail-fast behavior of iterators
117     * should be used only to detect bugs.</i>
118     *
119     * <p>This class is a member of the
120     * <a href="{@docRoot}/java/util/package-summary.html#CollectionsFramework">
121     * Java Collections Framework</a>.
122     *
123     * @param <K> the type of keys maintained by this map
124     * @param <V> the type of mapped values
125     *
126     * @author Doug Lea
127     * @author Josh Bloch
128     * @author Arthur van Hoff
129     * @author Neal Gafter
130     * @see Object#hashCode()
131     * @see Collection
132     * @see Map
133     * @see TreeMap
134     * @see Hashtable
135     * @since 1.2
136     */
137     public class HashMap<K,V> extends AbstractMap<K,V>
138     implements Map<K,V>, Cloneable, Serializable {
139    
140     private static final long serialVersionUID = 362498820763181265L;
141    
142     /*
143     * Implementation notes.
144     *
145     * This map usually acts as a binned (bucketed) hash table, but
146     * when bins get too large, they are transformed into bins of
147     * TreeNodes, each structured similarly to those in
148     * java.util.TreeMap. Most methods try to use normal bins, but
149     * relay to TreeNode methods when applicable (simply by checking
150     * instanceof a node). Bins of TreeNodes may be traversed and
151     * used like any others, but additionally support faster lookup
152     * when overpopulated. However, since the vast majority of bins in
153     * normal use are not overpopulated, checking for existence of
154     * tree bins may be delayed in the course of table methods.
155     *
156     * Tree bins (i.e., bins whose elements are all TreeNodes) are
157     * ordered primarily by hashCode, but in the case of ties, if two
158     * elements are of the same "class C implements Comparable<C>",
159     * type then their compareTo method is used for ordering. (We
160     * conservatively check generic types via reflection to validate
161     * this -- see method comparableClassFor). The added complexity
162     * of tree bins is worthwhile in providing worst-case O(log n)
163     * operations when keys either have distinct hashes or are
164     * orderable, Thus, performance degrades gracefully under
165     * accidental or malicious usages in which hashCode() methods
166     * return values that are poorly distributed, as well as those in
167     * which many keys share a hashCode, so long as they are also
168     * Comparable. (If neither of these apply, we may waste about a
169     * factor of two in time and space compared to taking no
170     * precautions. But the only known cases stem from poor user
171     * programming practices that are already so slow that this makes
172     * little difference.)
173     *
174     * Because TreeNodes are about twice the size of regular nodes, we
175     * use them only when bins contain enough nodes to warrant use
176     * (see TREEIFY_THRESHOLD). And when they become too small (due to
177     * removal or resizing) they are converted back to plain bins. In
178     * usages with well-distributed user hashCodes, tree bins are
179     * rarely used. Ideally, under random hashCodes, the frequency of
180     * nodes in bins follows a Poisson distribution
181     * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
182     * parameter of about 0.5 on average for the default resizing
183     * threshold of 0.75, although with a large variance because of
184     * resizing granularity. Ignoring variance, the expected
185     * occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
186     * factorial(k)). The first values are:
187     *
188     * 0: 0.60653066
189     * 1: 0.30326533
190     * 2: 0.07581633
191     * 3: 0.01263606
192     * 4: 0.00157952
193     * 5: 0.00015795
194     * 6: 0.00001316
195     * 7: 0.00000094
196     * 8: 0.00000006
197     * more: less than 1 in ten million
198     *
199     * The root of a tree bin is normally its first node. However,
200     * sometimes (currently only upon Iterator.remove), the root might
201     * be elsewhere, but can be recovered following parent links
202     * (method TreeNode.root()).
203     *
204     * All applicable internal methods accept a hash code as an
205     * argument (as normally supplied from a public method), allowing
206     * them to call each other without recomputing user hashCodes.
207     * Most internal methods also accept a "tab" argument, that is
208     * normally the current table, but may be a new or old one when
209     * resizing or converting.
210     *
211     * When bin lists are treeified, split, or untreeified, we keep
212     * them in the same relative access/traversal order (i.e., field
213     * Node.next) to better preserve locality, and to slightly
214     * simplify handling of splits and traversals that invoke
215     * iterator.remove. When using comparators on insertion, to keep a
216     * total ordering (or as close as is required here) across
217     * rebalancings, we compare classes and identityHashCodes as
218     * tie-breakers.
219     *
220     * The use and transitions among plain vs tree modes is
221     * complicated by the existence of subclass LinkedHashMap. See
222     * below for hook methods defined to be invoked upon insertion,
223     * removal and access that allow LinkedHashMap internals to
224     * otherwise remain independent of these mechanics. (This also
225     * requires that a map instance be passed to some utility methods
226     * that may create new nodes.)
227     *
228     * The concurrent-programming-like SSA-based coding style helps
229     * avoid aliasing errors amid all of the twisty pointer operations.
230     */
231    
232     /**
233     * The default initial capacity - MUST be a power of two.
234     */
235     static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
236    
237     /**
238     * The maximum capacity, used if a higher value is implicitly specified
239     * by either of the constructors with arguments.
240     * MUST be a power of two <= 1<<30.
241     */
242     static final int MAXIMUM_CAPACITY = 1 << 30;
243    
244     /**
245     * The load factor used when none specified in constructor.
246     */
247     static final float DEFAULT_LOAD_FACTOR = 0.75f;
248    
249     /**
250     * The bin count threshold for using a tree rather than list for a
251     * bin. Bins are converted to trees when adding an element to a
252     * bin with at least this many nodes. The value must be greater
253     * than 2 and should be at least 8 to mesh with assumptions in
254     * tree removal about conversion back to plain bins upon
255     * shrinkage.
256     */
257     static final int TREEIFY_THRESHOLD = 8;
258    
259     /**
260     * The bin count threshold for untreeifying a (split) bin during a
261     * resize operation. Should be less than TREEIFY_THRESHOLD, and at
262     * most 6 to mesh with shrinkage detection under removal.
263     */
264     static final int UNTREEIFY_THRESHOLD = 6;
265    
266     /**
267     * The smallest table capacity for which bins may be treeified.
268     * (Otherwise the table is resized if too many nodes in a bin.)
269     * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
270     * between resizing and treeification thresholds.
271     */
272     static final int MIN_TREEIFY_CAPACITY = 64;
273    
274     /**
275     * Basic hash bin node, used for most entries. (See below for
276     * TreeNode subclass, and in LinkedHashMap for its Entry subclass.)
277     */
278     static class Node<K,V> implements Map.Entry<K,V> {
279     final int hash;
280     final K key;
281     V value;
282     Node<K,V> next;
283    
284     Node(int hash, K key, V value, Node<K,V> next) {
285     this.hash = hash;
286     this.key = key;
287     this.value = value;
288     this.next = next;
289     }
290    
291     public final K getKey() { return key; }
292     public final V getValue() { return value; }
293     public final String toString() { return key + "=" + value; }
294    
295     public final int hashCode() {
296     return Objects.hashCode(key) ^ Objects.hashCode(value);
297     }
298    
299     public final V setValue(V newValue) {
300     V oldValue = value;
301     value = newValue;
302     return oldValue;
303     }
304    
305     public final boolean equals(Object o) {
306     if (o == this)
307     return true;
308     if (o instanceof Map.Entry) {
309     Map.Entry<?,?> e = (Map.Entry<?,?>)o;
310     if (Objects.equals(key, e.getKey()) &&
311     Objects.equals(value, e.getValue()))
312     return true;
313     }
314     return false;
315     }
316     }
317    
318     /* ---------------- Static utilities -------------- */
319    
320     /**
321     * Computes key.hashCode() and spreads (XORs) higher bits of hash
322     * to lower. Because the table uses power-of-two masking, sets of
323     * hashes that vary only in bits above the current mask will
324     * always collide. (Among known examples are sets of Float keys
325     * holding consecutive whole numbers in small tables.) So we
326     * apply a transform that spreads the impact of higher bits
327     * downward. There is a tradeoff between speed, utility, and
328     * quality of bit-spreading. Because many common sets of hashes
329     * are already reasonably distributed (so don't benefit from
330     * spreading), and because we use trees to handle large sets of
331     * collisions in bins, we just XOR some shifted bits in the
332     * cheapest possible way to reduce systematic lossage, as well as
333     * to incorporate impact of the highest bits that would otherwise
334     * never be used in index calculations because of table bounds.
335     */
336     static final int hash(Object key) {
337     int h;
338     return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
339     }
340    
341     /**
342     * Returns x's Class if it is of the form "class C implements
343     * Comparable<C>", else null.
344     */
345     static Class<?> comparableClassFor(Object x) {
346     if (x instanceof Comparable) {
347     Class<?> c; Type[] ts, as; ParameterizedType p;
348     if ((c = x.getClass()) == String.class) // bypass checks
349     return c;
350     if ((ts = c.getGenericInterfaces()) != null) {
351     for (Type t : ts) {
352     if ((t instanceof ParameterizedType) &&
353     ((p = (ParameterizedType) t).getRawType() ==
354     Comparable.class) &&
355     (as = p.getActualTypeArguments()) != null &&
356     as.length == 1 && as[0] == c) // type arg is c
357     return c;
358     }
359     }
360     }
361     return null;
362     }
363    
364     /**
365     * Returns k.compareTo(x) if x matches kc (k's screened comparable
366     * class), else 0.
367     */
368     @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
369     static int compareComparables(Class<?> kc, Object k, Object x) {
370     return (x == null || x.getClass() != kc ? 0 :
371     ((Comparable)k).compareTo(x));
372     }
373    
374     /**
375     * Returns a power of two size for the given target capacity.
376     */
377     static final int tableSizeFor(int cap) {
378     int n = cap - 1;
379     n |= n >>> 1;
380     n |= n >>> 2;
381     n |= n >>> 4;
382     n |= n >>> 8;
383     n |= n >>> 16;
384     return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
385     }
386    
387     /* ---------------- Fields -------------- */
388    
389     /**
390     * The table, initialized on first use, and resized as
391     * necessary. When allocated, length is always a power of two.
392     * (We also tolerate length zero in some operations to allow
393     * bootstrapping mechanics that are currently not needed.)
394     */
395     transient Node<K,V>[] table;
396    
397     /**
398     * Holds cached entrySet(). Note that AbstractMap fields are used
399     * for keySet() and values().
400     */
401     transient Set<Map.Entry<K,V>> entrySet;
402    
403     /**
404     * The number of key-value mappings contained in this map.
405     */
406     transient int size;
407    
408     /**
409     * The number of times this HashMap has been structurally modified
410     * Structural modifications are those that change the number of mappings in
411     * the HashMap or otherwise modify its internal structure (e.g.,
412     * rehash). This field is used to make iterators on Collection-views of
413     * the HashMap fail-fast. (See ConcurrentModificationException).
414     */
415     transient int modCount;
416    
417     /**
418     * The next size value at which to resize (capacity * load factor).
419     *
420     * @serial
421     */
422     // (The javadoc description is true upon serialization.
423     // Additionally, if the table array has not been allocated, this
424     // field holds the initial array capacity, or zero signifying
425     // DEFAULT_INITIAL_CAPACITY.)
426     int threshold;
427    
428     /**
429     * The load factor for the hash table.
430     *
431     * @serial
432     */
433     final float loadFactor;
434    
435     /* ---------------- Public operations -------------- */
436    
437     /**
438     * Constructs an empty {@code HashMap} with the specified initial
439     * capacity and load factor.
440     *
441     * @param initialCapacity the initial capacity
442     * @param loadFactor the load factor
443     * @throws IllegalArgumentException if the initial capacity is negative
444     * or the load factor is nonpositive
445     */
446     public HashMap(int initialCapacity, float loadFactor) {
447     if (initialCapacity < 0)
448     throw new IllegalArgumentException("Illegal initial capacity: " +
449     initialCapacity);
450     if (initialCapacity > MAXIMUM_CAPACITY)
451     initialCapacity = MAXIMUM_CAPACITY;
452     if (loadFactor <= 0 || Float.isNaN(loadFactor))
453     throw new IllegalArgumentException("Illegal load factor: " +
454     loadFactor);
455     this.loadFactor = loadFactor;
456     this.threshold = tableSizeFor(initialCapacity);
457     }
458    
459     /**
460     * Constructs an empty {@code HashMap} with the specified initial
461     * capacity and the default load factor (0.75).
462     *
463     * @param initialCapacity the initial capacity.
464     * @throws IllegalArgumentException if the initial capacity is negative.
465     */
466     public HashMap(int initialCapacity) {
467     this(initialCapacity, DEFAULT_LOAD_FACTOR);
468     }
469    
470     /**
471     * Constructs an empty {@code HashMap} with the default initial capacity
472     * (16) and the default load factor (0.75).
473     */
474     public HashMap() {
475     this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
476     }
477    
478     /**
479     * Constructs a new {@code HashMap} with the same mappings as the
480     * specified {@code Map}. The {@code HashMap} is created with
481     * default load factor (0.75) and an initial capacity sufficient to
482     * hold the mappings in the specified {@code Map}.
483     *
484     * @param m the map whose mappings are to be placed in this map
485     * @throws NullPointerException if the specified map is null
486     */
487     public HashMap(Map<? extends K, ? extends V> m) {
488     this.loadFactor = DEFAULT_LOAD_FACTOR;
489     putMapEntries(m, false);
490     }
491    
492     /**
493     * Implements Map.putAll and Map constructor.
494     *
495     * @param m the map
496     * @param evict false when initially constructing this map, else
497     * true (relayed to method afterNodeInsertion).
498     */
499     final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
500     int s = m.size();
501     if (s > 0) {
502     if (table == null) { // pre-size
503     float ft = ((float)s / loadFactor) + 1.0F;
504     int t = ((ft < (float)MAXIMUM_CAPACITY) ?
505     (int)ft : MAXIMUM_CAPACITY);
506     if (t > threshold)
507     threshold = tableSizeFor(t);
508     }
509     else if (s > threshold)
510     resize();
511     for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
512     K key = e.getKey();
513     V value = e.getValue();
514     putVal(hash(key), key, value, false, evict);
515     }
516     }
517     }
518    
519     /**
520     * Returns the number of key-value mappings in this map.
521     *
522     * @return the number of key-value mappings in this map
523     */
524     public int size() {
525     return size;
526     }
527    
528     /**
529     * Returns {@code true} if this map contains no key-value mappings.
530     *
531     * @return {@code true} if this map contains no key-value mappings
532     */
533     public boolean isEmpty() {
534     return size == 0;
535     }
536    
537     /**
538     * Returns the value to which the specified key is mapped,
539     * or {@code null} if this map contains no mapping for the key.
540     *
541     * <p>More formally, if this map contains a mapping from a key
542     * {@code k} to a value {@code v} such that {@code (key==null ? k==null :
543     * key.equals(k))}, then this method returns {@code v}; otherwise
544     * it returns {@code null}. (There can be at most one such mapping.)
545     *
546     * <p>A return value of {@code null} does not <i>necessarily</i>
547     * indicate that the map contains no mapping for the key; it's also
548     * possible that the map explicitly maps the key to {@code null}.
549     * The {@link #containsKey containsKey} operation may be used to
550     * distinguish these two cases.
551     *
552     * @see #put(Object, Object)
553     */
554     public V get(Object key) {
555     Node<K,V> e;
556     return (e = getNode(hash(key), key)) == null ? null : e.value;
557     }
558    
559     /**
560     * Implements Map.get and related methods.
561     *
562     * @param hash hash for key
563     * @param key the key
564     * @return the node, or null if none
565     */
566     final Node<K,V> getNode(int hash, Object key) {
567     Node<K,V>[] tab; Node<K,V> first, e; int n; K k;
568     if ((tab = table) != null && (n = tab.length) > 0 &&
569     (first = tab[(n - 1) & hash]) != null) {
570     if (first.hash == hash && // always check first node
571     ((k = first.key) == key || (key != null && key.equals(k))))
572     return first;
573     if ((e = first.next) != null) {
574     if (first instanceof TreeNode)
575     return ((TreeNode<K,V>)first).getTreeNode(hash, key);
576     do {
577     if (e.hash == hash &&
578     ((k = e.key) == key || (key != null && key.equals(k))))
579     return e;
580     } while ((e = e.next) != null);
581     }
582     }
583     return null;
584     }
585    
586     /**
587     * Returns {@code true} if this map contains a mapping for the
588     * specified key.
589     *
590     * @param key The key whose presence in this map is to be tested
591     * @return {@code true} if this map contains a mapping for the specified
592     * key.
593     */
594     public boolean containsKey(Object key) {
595     return getNode(hash(key), key) != null;
596     }
597    
598     /**
599     * Associates the specified value with the specified key in this map.
600     * If the map previously contained a mapping for the key, the old
601     * value is replaced.
602     *
603     * @param key key with which the specified value is to be associated
604     * @param value value to be associated with the specified key
605     * @return the previous value associated with {@code key}, or
606     * {@code null} if there was no mapping for {@code key}.
607     * (A {@code null} return can also indicate that the map
608     * previously associated {@code null} with {@code key}.)
609     */
610     public V put(K key, V value) {
611     return putVal(hash(key), key, value, false, true);
612     }
613    
614     /**
615     * Implements Map.put and related methods.
616     *
617     * @param hash hash for key
618     * @param key the key
619     * @param value the value to put
620     * @param onlyIfAbsent if true, don't change existing value
621     * @param evict if false, the table is in creation mode.
622     * @return previous value, or null if none
623     */
624     final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
625     boolean evict) {
626     Node<K,V>[] tab; Node<K,V> p; int n, i;
627     if ((tab = table) == null || (n = tab.length) == 0)
628     n = (tab = resize()).length;
629     if ((p = tab[i = (n - 1) & hash]) == null)
630     tab[i] = newNode(hash, key, value, null);
631     else {
632     Node<K,V> e; K k;
633     if (p.hash == hash &&
634     ((k = p.key) == key || (key != null && key.equals(k))))
635     e = p;
636     else if (p instanceof TreeNode)
637     e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
638     else {
639     for (int binCount = 0; ; ++binCount) {
640     if ((e = p.next) == null) {
641     p.next = newNode(hash, key, value, null);
642     if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
643     treeifyBin(tab, hash);
644     break;
645     }
646     if (e.hash == hash &&
647     ((k = e.key) == key || (key != null && key.equals(k))))
648     break;
649     p = e;
650     }
651     }
652     if (e != null) { // existing mapping for key
653     V oldValue = e.value;
654     if (!onlyIfAbsent || oldValue == null)
655     e.value = value;
656     afterNodeAccess(e);
657     return oldValue;
658     }
659     }
660     ++modCount;
661     if (++size > threshold)
662     resize();
663     afterNodeInsertion(evict);
664     return null;
665     }
666    
667     /**
668     * Initializes or doubles table size. If null, allocates in
669     * accord with initial capacity target held in field threshold.
670     * Otherwise, because we are using power-of-two expansion, the
671     * elements from each bin must either stay at same index, or move
672     * with a power of two offset in the new table.
673     *
674     * @return the table
675     */
676     final Node<K,V>[] resize() {
677     Node<K,V>[] oldTab = table;
678     int oldCap = (oldTab == null) ? 0 : oldTab.length;
679     int oldThr = threshold;
680     int newCap, newThr = 0;
681     if (oldCap > 0) {
682     if (oldCap >= MAXIMUM_CAPACITY) {
683     threshold = Integer.MAX_VALUE;
684     return oldTab;
685     }
686     else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
687     oldCap >= DEFAULT_INITIAL_CAPACITY)
688     newThr = oldThr << 1; // double threshold
689     }
690     else if (oldThr > 0) // initial capacity was placed in threshold
691     newCap = oldThr;
692     else { // zero initial threshold signifies using defaults
693     newCap = DEFAULT_INITIAL_CAPACITY;
694     newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
695     }
696     if (newThr == 0) {
697     float ft = (float)newCap * loadFactor;
698     newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
699     (int)ft : Integer.MAX_VALUE);
700     }
701     threshold = newThr;
702     @SuppressWarnings({"rawtypes","unchecked"})
703     Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
704     table = newTab;
705     if (oldTab != null) {
706     for (int j = 0; j < oldCap; ++j) {
707     Node<K,V> e;
708     if ((e = oldTab[j]) != null) {
709     oldTab[j] = null;
710     if (e.next == null)
711     newTab[e.hash & (newCap - 1)] = e;
712     else if (e instanceof TreeNode)
713     ((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
714     else { // preserve order
715     Node<K,V> loHead = null, loTail = null;
716     Node<K,V> hiHead = null, hiTail = null;
717     Node<K,V> next;
718     do {
719     next = e.next;
720     if ((e.hash & oldCap) == 0) {
721     if (loTail == null)
722     loHead = e;
723     else
724     loTail.next = e;
725     loTail = e;
726     }
727     else {
728     if (hiTail == null)
729     hiHead = e;
730     else
731     hiTail.next = e;
732     hiTail = e;
733     }
734     } while ((e = next) != null);
735     if (loTail != null) {
736     loTail.next = null;
737     newTab[j] = loHead;
738     }
739     if (hiTail != null) {
740     hiTail.next = null;
741     newTab[j + oldCap] = hiHead;
742     }
743     }
744     }
745     }
746     }
747     return newTab;
748     }
749    
750     /**
751     * Replaces all linked nodes in bin at index for given hash unless
752     * table is too small, in which case resizes instead.
753     */
754     final void treeifyBin(Node<K,V>[] tab, int hash) {
755     int n, index; Node<K,V> e;
756     if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
757     resize();
758     else if ((e = tab[index = (n - 1) & hash]) != null) {
759     TreeNode<K,V> hd = null, tl = null;
760     do {
761     TreeNode<K,V> p = replacementTreeNode(e, null);
762     if (tl == null)
763     hd = p;
764     else {
765     p.prev = tl;
766     tl.next = p;
767     }
768     tl = p;
769     } while ((e = e.next) != null);
770     if ((tab[index] = hd) != null)
771     hd.treeify(tab);
772     }
773     }
774    
775     /**
776     * Copies all of the mappings from the specified map to this map.
777     * These mappings will replace any mappings that this map had for
778     * any of the keys currently in the specified map.
779     *
780     * @param m mappings to be stored in this map
781     * @throws NullPointerException if the specified map is null
782     */
783     public void putAll(Map<? extends K, ? extends V> m) {
784     putMapEntries(m, true);
785     }
786    
787     /**
788     * Removes the mapping for the specified key from this map if present.
789     *
790     * @param key key whose mapping is to be removed from the map
791     * @return the previous value associated with {@code key}, or
792     * {@code null} if there was no mapping for {@code key}.
793     * (A {@code null} return can also indicate that the map
794     * previously associated {@code null} with {@code key}.)
795     */
796     public V remove(Object key) {
797     Node<K,V> e;
798     return (e = removeNode(hash(key), key, null, false, true)) == null ?
799     null : e.value;
800     }
801    
802     /**
803     * Implements Map.remove and related methods.
804     *
805     * @param hash hash for key
806     * @param key the key
807     * @param value the value to match if matchValue, else ignored
808     * @param matchValue if true only remove if value is equal
809     * @param movable if false do not move other nodes while removing
810     * @return the node, or null if none
811     */
812     final Node<K,V> removeNode(int hash, Object key, Object value,
813     boolean matchValue, boolean movable) {
814     Node<K,V>[] tab; Node<K,V> p; int n, index;
815     if ((tab = table) != null && (n = tab.length) > 0 &&
816     (p = tab[index = (n - 1) & hash]) != null) {
817     Node<K,V> node = null, e; K k; V v;
818     if (p.hash == hash &&
819     ((k = p.key) == key || (key != null && key.equals(k))))
820     node = p;
821     else if ((e = p.next) != null) {
822     if (p instanceof TreeNode)
823     node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
824     else {
825     do {
826     if (e.hash == hash &&
827     ((k = e.key) == key ||
828     (key != null && key.equals(k)))) {
829     node = e;
830     break;
831     }
832     p = e;
833     } while ((e = e.next) != null);
834     }
835     }
836     if (node != null && (!matchValue || (v = node.value) == value ||
837     (value != null && value.equals(v)))) {
838     if (node instanceof TreeNode)
839     ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
840     else if (node == p)
841     tab[index] = node.next;
842     else
843     p.next = node.next;
844     ++modCount;
845     --size;
846     afterNodeRemoval(node);
847     return node;
848     }
849     }
850     return null;
851     }
852    
853     /**
854     * Removes all of the mappings from this map.
855     * The map will be empty after this call returns.
856     */
857     public void clear() {
858     Node<K,V>[] tab;
859     modCount++;
860     if ((tab = table) != null && size > 0) {
861     size = 0;
862     for (int i = 0; i < tab.length; ++i)
863     tab[i] = null;
864     }
865     }
866    
867     /**
868     * Returns {@code true} if this map maps one or more keys to the
869     * specified value.
870     *
871     * @param value value whose presence in this map is to be tested
872     * @return {@code true} if this map maps one or more keys to the
873     * specified value
874     */
875     public boolean containsValue(Object value) {
876     Node<K,V>[] tab; V v;
877     if ((tab = table) != null && size > 0) {
878     for (Node<K,V> e : tab) {
879     for (; e != null; e = e.next) {
880     if ((v = e.value) == value ||
881     (value != null && value.equals(v)))
882     return true;
883     }
884     }
885     }
886     return false;
887     }
888    
889     /**
890     * Returns a {@link Set} view of the keys contained in this map.
891     * The set is backed by the map, so changes to the map are
892     * reflected in the set, and vice-versa. If the map is modified
893     * while an iteration over the set is in progress (except through
894     * the iterator's own {@code remove} operation), the results of
895     * the iteration are undefined. The set supports element removal,
896     * which removes the corresponding mapping from the map, via the
897     * {@code Iterator.remove}, {@code Set.remove},
898     * {@code removeAll}, {@code retainAll}, and {@code clear}
899     * operations. It does not support the {@code add} or {@code addAll}
900     * operations.
901     *
902     * @return a set view of the keys contained in this map
903     */
904     public Set<K> keySet() {
905     Set<K> ks = keySet;
906     if (ks == null) {
907     ks = new KeySet();
908     keySet = ks;
909     }
910     return ks;
911     }
912    
913     final class KeySet extends AbstractSet<K> {
914     public final int size() { return size; }
915     public final void clear() { HashMap.this.clear(); }
916     public final Iterator<K> iterator() { return new KeyIterator(); }
917     public final boolean contains(Object o) { return containsKey(o); }
918     public final boolean remove(Object key) {
919     return removeNode(hash(key), key, null, false, true) != null;
920     }
921     public final Spliterator<K> spliterator() {
922     return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0);
923     }
924     public final void forEach(Consumer<? super K> action) {
925     Node<K,V>[] tab;
926     if (action == null)
927     throw new NullPointerException();
928     if (size > 0 && (tab = table) != null) {
929     int mc = modCount;
930     for (Node<K,V> e : tab) {
931     for (; e != null; e = e.next)
932     action.accept(e.key);
933     }
934     if (modCount != mc)
935     throw new ConcurrentModificationException();
936     }
937     }
938     }
939    
940     /**
941     * Returns a {@link Collection} view of the values contained in this map.
942     * The collection is backed by the map, so changes to the map are
943     * reflected in the collection, and vice-versa. If the map is
944     * modified while an iteration over the collection is in progress
945     * (except through the iterator's own {@code remove} operation),
946     * the results of the iteration are undefined. The collection
947     * supports element removal, which removes the corresponding
948     * mapping from the map, via the {@code Iterator.remove},
949     * {@code Collection.remove}, {@code removeAll},
950     * {@code retainAll} and {@code clear} operations. It does not
951     * support the {@code add} or {@code addAll} operations.
952     *
953     * @return a view of the values contained in this map
954     */
955     public Collection<V> values() {
956     Collection<V> vs = values;
957     if (vs == null) {
958     vs = new Values();
959     values = vs;
960     }
961     return vs;
962     }
963    
964     final class Values extends AbstractCollection<V> {
965     public final int size() { return size; }
966     public final void clear() { HashMap.this.clear(); }
967     public final Iterator<V> iterator() { return new ValueIterator(); }
968     public final boolean contains(Object o) { return containsValue(o); }
969     public final Spliterator<V> spliterator() {
970     return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0);
971     }
972     public final void forEach(Consumer<? super V> action) {
973     Node<K,V>[] tab;
974     if (action == null)
975     throw new NullPointerException();
976     if (size > 0 && (tab = table) != null) {
977     int mc = modCount;
978     for (Node<K,V> e : tab) {
979     for (; e != null; e = e.next)
980     action.accept(e.value);
981     }
982     if (modCount != mc)
983     throw new ConcurrentModificationException();
984     }
985     }
986     }
987    
988     /**
989     * Returns a {@link Set} view of the mappings contained in this map.
990     * The set is backed by the map, so changes to the map are
991     * reflected in the set, and vice-versa. If the map is modified
992     * while an iteration over the set is in progress (except through
993     * the iterator's own {@code remove} operation, or through the
994     * {@code setValue} operation on a map entry returned by the
995     * iterator) the results of the iteration are undefined. The set
996     * supports element removal, which removes the corresponding
997     * mapping from the map, via the {@code Iterator.remove},
998     * {@code Set.remove}, {@code removeAll}, {@code retainAll} and
999     * {@code clear} operations. It does not support the
1000     * {@code add} or {@code addAll} operations.
1001     *
1002     * @return a set view of the mappings contained in this map
1003     */
1004     public Set<Map.Entry<K,V>> entrySet() {
1005     Set<Map.Entry<K,V>> es;
1006     return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
1007     }
1008    
1009     final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
1010     public final int size() { return size; }
1011     public final void clear() { HashMap.this.clear(); }
1012     public final Iterator<Map.Entry<K,V>> iterator() {
1013     return new EntryIterator();
1014     }
1015     public final boolean contains(Object o) {
1016     if (!(o instanceof Map.Entry))
1017     return false;
1018     Map.Entry<?,?> e = (Map.Entry<?,?>) o;
1019     Object key = e.getKey();
1020     Node<K,V> candidate = getNode(hash(key), key);
1021     return candidate != null && candidate.equals(e);
1022     }
1023     public final boolean remove(Object o) {
1024     if (o instanceof Map.Entry) {
1025     Map.Entry<?,?> e = (Map.Entry<?,?>) o;
1026     Object key = e.getKey();
1027     Object value = e.getValue();
1028     return removeNode(hash(key), key, value, true, true) != null;
1029     }
1030     return false;
1031     }
1032     public final Spliterator<Map.Entry<K,V>> spliterator() {
1033     return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);
1034     }
1035     public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
1036     Node<K,V>[] tab;
1037     if (action == null)
1038     throw new NullPointerException();
1039     if (size > 0 && (tab = table) != null) {
1040     int mc = modCount;
1041     for (Node<K,V> e : tab) {
1042     for (; e != null; e = e.next)
1043     action.accept(e);
1044     }
1045     if (modCount != mc)
1046     throw new ConcurrentModificationException();
1047     }
1048     }
1049     }
1050    
1051     // Overrides of JDK8 Map extension methods
1052    
1053     @Override
1054     public V getOrDefault(Object key, V defaultValue) {
1055     Node<K,V> e;
1056     return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
1057     }
1058    
1059     @Override
1060     public V putIfAbsent(K key, V value) {
1061     return putVal(hash(key), key, value, true, true);
1062     }
1063    
1064     @Override
1065     public boolean remove(Object key, Object value) {
1066     return removeNode(hash(key), key, value, true, true) != null;
1067     }
1068    
1069     @Override
1070     public boolean replace(K key, V oldValue, V newValue) {
1071     Node<K,V> e; V v;
1072     if ((e = getNode(hash(key), key)) != null &&
1073     ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
1074     e.value = newValue;
1075     afterNodeAccess(e);
1076     return true;
1077     }
1078     return false;
1079     }
1080    
1081     @Override
1082     public V replace(K key, V value) {
1083     Node<K,V> e;
1084     if ((e = getNode(hash(key), key)) != null) {
1085     V oldValue = e.value;
1086     e.value = value;
1087     afterNodeAccess(e);
1088     return oldValue;
1089     }
1090     return null;
1091     }
1092    
1093     /**
1094     * {@inheritDoc}
1095     *
1096     * <p>This method will, on a best-effort basis, throw a
1097     * {@link ConcurrentModificationException} if it is detected that the
1098     * mapping function modifies this map during computation.
1099     *
1100     * @throws ConcurrentModificationException if it is detected that the
1101     * mapping function modified this map
1102     */
1103     @Override
1104     public V computeIfAbsent(K key,
1105     Function<? super K, ? extends V> mappingFunction) {
1106     if (mappingFunction == null)
1107     throw new NullPointerException();
1108     int hash = hash(key);
1109     Node<K,V>[] tab; Node<K,V> first; int n, i;
1110     int binCount = 0;
1111     TreeNode<K,V> t = null;
1112     Node<K,V> old = null;
1113     if (size > threshold || (tab = table) == null ||
1114     (n = tab.length) == 0)
1115     n = (tab = resize()).length;
1116     if ((first = tab[i = (n - 1) & hash]) != null) {
1117     if (first instanceof TreeNode)
1118     old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1119     else {
1120     Node<K,V> e = first; K k;
1121     do {
1122     if (e.hash == hash &&
1123     ((k = e.key) == key || (key != null && key.equals(k)))) {
1124     old = e;
1125     break;
1126     }
1127     ++binCount;
1128     } while ((e = e.next) != null);
1129     }
1130     V oldValue;
1131     if (old != null && (oldValue = old.value) != null) {
1132     afterNodeAccess(old);
1133     return oldValue;
1134     }
1135     }
1136     int mc = modCount;
1137     V v = mappingFunction.apply(key);
1138     if (mc != modCount) { throw new ConcurrentModificationException(); }
1139     if (v == null) {
1140     return null;
1141     } else if (old != null) {
1142     old.value = v;
1143     afterNodeAccess(old);
1144     return v;
1145     }
1146     else if (t != null)
1147     t.putTreeVal(this, tab, hash, key, v);
1148     else {
1149     tab[i] = newNode(hash, key, v, first);
1150     if (binCount >= TREEIFY_THRESHOLD - 1)
1151     treeifyBin(tab, hash);
1152     }
1153     modCount = mc + 1;
1154     ++size;
1155     afterNodeInsertion(true);
1156     return v;
1157     }
1158    
1159     /**
1160     * {@inheritDoc}
1161     *
1162     * <p>This method will, on a best-effort basis, throw a
1163     * {@link ConcurrentModificationException} if it is detected that the
1164     * remapping function modifies this map during computation.
1165     *
1166     * @throws ConcurrentModificationException if it is detected that the
1167     * remapping function modified this map
1168     */
1169     @Override
1170     public V computeIfPresent(K key,
1171     BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1172     if (remappingFunction == null)
1173     throw new NullPointerException();
1174     Node<K,V> e; V oldValue;
1175     int hash = hash(key);
1176     if ((e = getNode(hash, key)) != null &&
1177     (oldValue = e.value) != null) {
1178     int mc = modCount;
1179     V v = remappingFunction.apply(key, oldValue);
1180     if (mc != modCount) { throw new ConcurrentModificationException(); }
1181     if (v != null) {
1182     e.value = v;
1183     afterNodeAccess(e);
1184     return v;
1185     }
1186     else
1187     removeNode(hash, key, null, false, true);
1188     }
1189     return null;
1190     }
1191    
1192     /**
1193     * {@inheritDoc}
1194     *
1195     * <p>This method will, on a best-effort basis, throw a
1196     * {@link ConcurrentModificationException} if it is detected that the
1197     * remapping function modifies this map during computation.
1198     *
1199     * @throws ConcurrentModificationException if it is detected that the
1200     * remapping function modified this map
1201     */
1202     @Override
1203     public V compute(K key,
1204     BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1205     if (remappingFunction == null)
1206     throw new NullPointerException();
1207     int hash = hash(key);
1208     Node<K,V>[] tab; Node<K,V> first; int n, i;
1209     int binCount = 0;
1210     TreeNode<K,V> t = null;
1211     Node<K,V> old = null;
1212     if (size > threshold || (tab = table) == null ||
1213     (n = tab.length) == 0)
1214     n = (tab = resize()).length;
1215     if ((first = tab[i = (n - 1) & hash]) != null) {
1216     if (first instanceof TreeNode)
1217     old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1218     else {
1219     Node<K,V> e = first; K k;
1220     do {
1221     if (e.hash == hash &&
1222     ((k = e.key) == key || (key != null && key.equals(k)))) {
1223     old = e;
1224     break;
1225     }
1226     ++binCount;
1227     } while ((e = e.next) != null);
1228     }
1229     }
1230     V oldValue = (old == null) ? null : old.value;
1231     int mc = modCount;
1232     V v = remappingFunction.apply(key, oldValue);
1233     if (mc != modCount) { throw new ConcurrentModificationException(); }
1234     if (old != null) {
1235     if (v != null) {
1236     old.value = v;
1237     afterNodeAccess(old);
1238     }
1239     else
1240     removeNode(hash, key, null, false, true);
1241     }
1242     else if (v != null) {
1243     if (t != null)
1244     t.putTreeVal(this, tab, hash, key, v);
1245     else {
1246     tab[i] = newNode(hash, key, v, first);
1247     if (binCount >= TREEIFY_THRESHOLD - 1)
1248     treeifyBin(tab, hash);
1249     }
1250     modCount = mc + 1;
1251     ++size;
1252     afterNodeInsertion(true);
1253     }
1254     return v;
1255     }
1256    
1257     /**
1258     * {@inheritDoc}
1259     *
1260     * <p>This method will, on a best-effort basis, throw a
1261     * {@link ConcurrentModificationException} if it is detected that the
1262     * remapping function modifies this map during computation.
1263     *
1264     * @throws ConcurrentModificationException if it is detected that the
1265     * remapping function modified this map
1266     */
1267     @Override
1268     public V merge(K key, V value,
1269     BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1270     if (value == null)
1271     throw new NullPointerException();
1272     if (remappingFunction == null)
1273     throw new NullPointerException();
1274     int hash = hash(key);
1275     Node<K,V>[] tab; Node<K,V> first; int n, i;
1276     int binCount = 0;
1277     TreeNode<K,V> t = null;
1278     Node<K,V> old = null;
1279     if (size > threshold || (tab = table) == null ||
1280     (n = tab.length) == 0)
1281     n = (tab = resize()).length;
1282     if ((first = tab[i = (n - 1) & hash]) != null) {
1283     if (first instanceof TreeNode)
1284     old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1285     else {
1286     Node<K,V> e = first; K k;
1287     do {
1288     if (e.hash == hash &&
1289     ((k = e.key) == key || (key != null && key.equals(k)))) {
1290     old = e;
1291     break;
1292     }
1293     ++binCount;
1294     } while ((e = e.next) != null);
1295     }
1296     }
1297     if (old != null) {
1298     V v;
1299     if (old.value != null) {
1300     int mc = modCount;
1301     v = remappingFunction.apply(old.value, value);
1302     if (mc != modCount) {
1303     throw new ConcurrentModificationException();
1304     }
1305     } else {
1306     v = value;
1307     }
1308     if (v != null) {
1309     old.value = v;
1310     afterNodeAccess(old);
1311     }
1312     else
1313     removeNode(hash, key, null, false, true);
1314     return v;
1315     }
1316     if (value != null) {
1317     if (t != null)
1318     t.putTreeVal(this, tab, hash, key, value);
1319     else {
1320     tab[i] = newNode(hash, key, value, first);
1321     if (binCount >= TREEIFY_THRESHOLD - 1)
1322     treeifyBin(tab, hash);
1323     }
1324     ++modCount;
1325     ++size;
1326     afterNodeInsertion(true);
1327     }
1328     return value;
1329     }
1330    
1331     @Override
1332     public void forEach(BiConsumer<? super K, ? super V> action) {
1333     Node<K,V>[] tab;
1334     if (action == null)
1335     throw new NullPointerException();
1336     if (size > 0 && (tab = table) != null) {
1337     int mc = modCount;
1338     for (Node<K,V> e : tab) {
1339     for (; e != null; e = e.next)
1340     action.accept(e.key, e.value);
1341     }
1342     if (modCount != mc)
1343     throw new ConcurrentModificationException();
1344     }
1345     }
1346    
1347     @Override
1348     public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1349     Node<K,V>[] tab;
1350     if (function == null)
1351     throw new NullPointerException();
1352     if (size > 0 && (tab = table) != null) {
1353     int mc = modCount;
1354     for (Node<K,V> e : tab) {
1355     for (; e != null; e = e.next) {
1356     e.value = function.apply(e.key, e.value);
1357     }
1358     }
1359     if (modCount != mc)
1360     throw new ConcurrentModificationException();
1361     }
1362     }
1363    
1364     /* ------------------------------------------------------------ */
1365     // Cloning and serialization
1366    
1367     /**
1368     * Returns a shallow copy of this {@code HashMap} instance: the keys and
1369     * values themselves are not cloned.
1370     *
1371     * @return a shallow copy of this map
1372     */
1373     @SuppressWarnings("unchecked")
1374     @Override
1375     public Object clone() {
1376     HashMap<K,V> result;
1377     try {
1378     result = (HashMap<K,V>)super.clone();
1379     } catch (CloneNotSupportedException e) {
1380     // this shouldn't happen, since we are Cloneable
1381     throw new InternalError(e);
1382     }
1383     result.reinitialize();
1384     result.putMapEntries(this, false);
1385     return result;
1386     }
1387    
1388     // These methods are also used when serializing HashSets
1389     final float loadFactor() { return loadFactor; }
1390     final int capacity() {
1391     return (table != null) ? table.length :
1392     (threshold > 0) ? threshold :
1393     DEFAULT_INITIAL_CAPACITY;
1394     }
1395    
1396     /**
1397     * Save the state of the {@code HashMap} instance to a stream (i.e.,
1398     * serialize it).
1399     *
1400     * @serialData The <i>capacity</i> of the HashMap (the length of the
1401     * bucket array) is emitted (int), followed by the
1402     * <i>size</i> (an int, the number of key-value
1403     * mappings), followed by the key (Object) and value (Object)
1404     * for each key-value mapping. The key-value mappings are
1405     * emitted in no particular order.
1406     */
1407     private void writeObject(java.io.ObjectOutputStream s)
1408     throws IOException {
1409     int buckets = capacity();
1410     // Write out the threshold, loadfactor, and any hidden stuff
1411     s.defaultWriteObject();
1412     s.writeInt(buckets);
1413     s.writeInt(size);
1414     internalWriteEntries(s);
1415     }
1416    
1417     /**
1418     * Reconstitute the {@code HashMap} instance from a stream (i.e.,
1419     * deserialize it).
1420     */
1421     private void readObject(java.io.ObjectInputStream s)
1422     throws IOException, ClassNotFoundException {
1423     // Read in the threshold (ignored), loadfactor, and any hidden stuff
1424     s.defaultReadObject();
1425     reinitialize();
1426     if (loadFactor <= 0 || Float.isNaN(loadFactor))
1427     throw new InvalidObjectException("Illegal load factor: " +
1428     loadFactor);
1429     s.readInt(); // Read and ignore number of buckets
1430     int mappings = s.readInt(); // Read number of mappings (size)
1431     if (mappings < 0)
1432     throw new InvalidObjectException("Illegal mappings count: " +
1433     mappings);
1434     else if (mappings > 0) { // (if zero, use defaults)
1435     // Size the table using given load factor only if within
1436     // range of 0.25...4.0
1437     float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
1438     float fc = (float)mappings / lf + 1.0f;
1439     int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
1440     DEFAULT_INITIAL_CAPACITY :
1441     (fc >= MAXIMUM_CAPACITY) ?
1442     MAXIMUM_CAPACITY :
1443     tableSizeFor((int)fc));
1444     float ft = (float)cap * lf;
1445     threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
1446     (int)ft : Integer.MAX_VALUE);
1447     @SuppressWarnings({"rawtypes","unchecked"})
1448     Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
1449     table = tab;
1450    
1451     // Read the keys and values, and put the mappings in the HashMap
1452     for (int i = 0; i < mappings; i++) {
1453     @SuppressWarnings("unchecked")
1454     K key = (K) s.readObject();
1455     @SuppressWarnings("unchecked")
1456     V value = (V) s.readObject();
1457     putVal(hash(key), key, value, false, false);
1458     }
1459     }
1460     }
1461    
1462     /* ------------------------------------------------------------ */
1463     // iterators
1464    
1465     abstract class HashIterator {
1466     Node<K,V> next; // next entry to return
1467     Node<K,V> current; // current entry
1468     int expectedModCount; // for fast-fail
1469     int index; // current slot
1470    
1471     HashIterator() {
1472     expectedModCount = modCount;
1473     Node<K,V>[] t = table;
1474     current = next = null;
1475     index = 0;
1476     if (t != null && size > 0) { // advance to first entry
1477     do {} while (index < t.length && (next = t[index++]) == null);
1478     }
1479     }
1480    
1481     public final boolean hasNext() {
1482     return next != null;
1483     }
1484    
1485     final Node<K,V> nextNode() {
1486     Node<K,V>[] t;
1487     Node<K,V> e = next;
1488     if (modCount != expectedModCount)
1489     throw new ConcurrentModificationException();
1490     if (e == null)
1491     throw new NoSuchElementException();
1492     if ((next = (current = e).next) == null && (t = table) != null) {
1493     do {} while (index < t.length && (next = t[index++]) == null);
1494     }
1495     return e;
1496     }
1497    
1498     public final void remove() {
1499     Node<K,V> p = current;
1500     if (p == null)
1501     throw new IllegalStateException();
1502     if (modCount != expectedModCount)
1503     throw new ConcurrentModificationException();
1504     current = null;
1505     removeNode(p.hash, p.key, null, false, false);
1506     expectedModCount = modCount;
1507     }
1508     }
1509    
1510     final class KeyIterator extends HashIterator
1511     implements Iterator<K> {
1512     public final K next() { return nextNode().key; }
1513     }
1514    
1515     final class ValueIterator extends HashIterator
1516     implements Iterator<V> {
1517     public final V next() { return nextNode().value; }
1518     }
1519    
1520     final class EntryIterator extends HashIterator
1521     implements Iterator<Map.Entry<K,V>> {
1522     public final Map.Entry<K,V> next() { return nextNode(); }
1523     }
1524    
1525     /* ------------------------------------------------------------ */
1526     // spliterators
1527    
1528     static class HashMapSpliterator<K,V> {
1529     final HashMap<K,V> map;
1530     Node<K,V> current; // current node
1531     int index; // current index, modified on advance/split
1532     int fence; // one past last index
1533     int est; // size estimate
1534     int expectedModCount; // for comodification checks
1535    
1536     HashMapSpliterator(HashMap<K,V> m, int origin,
1537     int fence, int est,
1538     int expectedModCount) {
1539     this.map = m;
1540     this.index = origin;
1541     this.fence = fence;
1542     this.est = est;
1543     this.expectedModCount = expectedModCount;
1544     }
1545    
1546     final int getFence() { // initialize fence and size on first use
1547     int hi;
1548     if ((hi = fence) < 0) {
1549     HashMap<K,V> m = map;
1550     est = m.size;
1551     expectedModCount = m.modCount;
1552     Node<K,V>[] tab = m.table;
1553     hi = fence = (tab == null) ? 0 : tab.length;
1554     }
1555     return hi;
1556     }
1557    
1558     public final long estimateSize() {
1559     getFence(); // force init
1560     return (long) est;
1561     }
1562     }
1563    
1564     static final class KeySpliterator<K,V>
1565     extends HashMapSpliterator<K,V>
1566     implements Spliterator<K> {
1567     KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
1568     int expectedModCount) {
1569     super(m, origin, fence, est, expectedModCount);
1570     }
1571    
1572     public KeySpliterator<K,V> trySplit() {
1573     int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1574     return (lo >= mid || current != null) ? null :
1575     new KeySpliterator<>(map, lo, index = mid, est >>>= 1,
1576     expectedModCount);
1577     }
1578    
1579     public void forEachRemaining(Consumer<? super K> action) {
1580     int i, hi, mc;
1581     if (action == null)
1582     throw new NullPointerException();
1583     HashMap<K,V> m = map;
1584     Node<K,V>[] tab = m.table;
1585     if ((hi = fence) < 0) {
1586     mc = expectedModCount = m.modCount;
1587     hi = fence = (tab == null) ? 0 : tab.length;
1588     }
1589     else
1590     mc = expectedModCount;
1591     if (tab != null && tab.length >= hi &&
1592     (i = index) >= 0 && (i < (index = hi) || current != null)) {
1593     Node<K,V> p = current;
1594     current = null;
1595     do {
1596     if (p == null)
1597     p = tab[i++];
1598     else {
1599     action.accept(p.key);
1600     p = p.next;
1601     }
1602     } while (p != null || i < hi);
1603     if (m.modCount != mc)
1604     throw new ConcurrentModificationException();
1605     }
1606     }
1607    
1608     public boolean tryAdvance(Consumer<? super K> action) {
1609     int hi;
1610     if (action == null)
1611     throw new NullPointerException();
1612     Node<K,V>[] tab = map.table;
1613     if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1614     while (current != null || index < hi) {
1615     if (current == null)
1616     current = tab[index++];
1617     else {
1618     K k = current.key;
1619     current = current.next;
1620     action.accept(k);
1621     if (map.modCount != expectedModCount)
1622     throw new ConcurrentModificationException();
1623     return true;
1624     }
1625     }
1626     }
1627     return false;
1628     }
1629    
1630     public int characteristics() {
1631     return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
1632     Spliterator.DISTINCT;
1633     }
1634     }
1635    
1636     static final class ValueSpliterator<K,V>
1637     extends HashMapSpliterator<K,V>
1638     implements Spliterator<V> {
1639     ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
1640     int expectedModCount) {
1641     super(m, origin, fence, est, expectedModCount);
1642     }
1643    
1644     public ValueSpliterator<K,V> trySplit() {
1645     int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1646     return (lo >= mid || current != null) ? null :
1647     new ValueSpliterator<>(map, lo, index = mid, est >>>= 1,
1648     expectedModCount);
1649     }
1650    
1651     public void forEachRemaining(Consumer<? super V> action) {
1652     int i, hi, mc;
1653     if (action == null)
1654     throw new NullPointerException();
1655     HashMap<K,V> m = map;
1656     Node<K,V>[] tab = m.table;
1657     if ((hi = fence) < 0) {
1658     mc = expectedModCount = m.modCount;
1659     hi = fence = (tab == null) ? 0 : tab.length;
1660     }
1661     else
1662     mc = expectedModCount;
1663     if (tab != null && tab.length >= hi &&
1664     (i = index) >= 0 && (i < (index = hi) || current != null)) {
1665     Node<K,V> p = current;
1666     current = null;
1667     do {
1668     if (p == null)
1669     p = tab[i++];
1670     else {
1671     action.accept(p.value);
1672     p = p.next;
1673     }
1674     } while (p != null || i < hi);
1675     if (m.modCount != mc)
1676     throw new ConcurrentModificationException();
1677     }
1678     }
1679    
1680     public boolean tryAdvance(Consumer<? super V> action) {
1681     int hi;
1682     if (action == null)
1683     throw new NullPointerException();
1684     Node<K,V>[] tab = map.table;
1685     if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1686     while (current != null || index < hi) {
1687     if (current == null)
1688     current = tab[index++];
1689     else {
1690     V v = current.value;
1691     current = current.next;
1692     action.accept(v);
1693     if (map.modCount != expectedModCount)
1694     throw new ConcurrentModificationException();
1695     return true;
1696     }
1697     }
1698     }
1699     return false;
1700     }
1701    
1702     public int characteristics() {
1703     return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
1704     }
1705     }
1706    
1707     static final class EntrySpliterator<K,V>
1708     extends HashMapSpliterator<K,V>
1709     implements Spliterator<Map.Entry<K,V>> {
1710     EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
1711     int expectedModCount) {
1712     super(m, origin, fence, est, expectedModCount);
1713     }
1714    
1715     public EntrySpliterator<K,V> trySplit() {
1716     int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1717     return (lo >= mid || current != null) ? null :
1718     new EntrySpliterator<>(map, lo, index = mid, est >>>= 1,
1719     expectedModCount);
1720     }
1721    
1722     public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
1723     int i, hi, mc;
1724     if (action == null)
1725     throw new NullPointerException();
1726     HashMap<K,V> m = map;
1727     Node<K,V>[] tab = m.table;
1728     if ((hi = fence) < 0) {
1729     mc = expectedModCount = m.modCount;
1730     hi = fence = (tab == null) ? 0 : tab.length;
1731     }
1732     else
1733     mc = expectedModCount;
1734     if (tab != null && tab.length >= hi &&
1735     (i = index) >= 0 && (i < (index = hi) || current != null)) {
1736     Node<K,V> p = current;
1737     current = null;
1738     do {
1739     if (p == null)
1740     p = tab[i++];
1741     else {
1742     action.accept(p);
1743     p = p.next;
1744     }
1745     } while (p != null || i < hi);
1746     if (m.modCount != mc)
1747     throw new ConcurrentModificationException();
1748     }
1749     }
1750    
1751     public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
1752     int hi;
1753     if (action == null)
1754     throw new NullPointerException();
1755     Node<K,V>[] tab = map.table;
1756     if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1757     while (current != null || index < hi) {
1758     if (current == null)
1759     current = tab[index++];
1760     else {
1761     Node<K,V> e = current;
1762     current = current.next;
1763     action.accept(e);
1764     if (map.modCount != expectedModCount)
1765     throw new ConcurrentModificationException();
1766     return true;
1767     }
1768     }
1769     }
1770     return false;
1771     }
1772    
1773     public int characteristics() {
1774     return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
1775     Spliterator.DISTINCT;
1776     }
1777     }
1778    
1779     /* ------------------------------------------------------------ */
1780     // LinkedHashMap support
1781    
1782    
1783     /*
1784     * The following package-protected methods are designed to be
1785     * overridden by LinkedHashMap, but not by any other subclass.
1786     * Nearly all other internal methods are also package-protected
1787     * but are declared final, so can be used by LinkedHashMap, view
1788     * classes, and HashSet.
1789     */
1790    
1791     // Create a regular (non-tree) node
1792     Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {
1793     return new Node<>(hash, key, value, next);
1794     }
1795    
1796     // For conversion from TreeNodes to plain nodes
1797     Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
1798     return new Node<>(p.hash, p.key, p.value, next);
1799     }
1800    
1801     // Create a tree bin node
1802     TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
1803     return new TreeNode<>(hash, key, value, next);
1804     }
1805    
1806     // For treeifyBin
1807     TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
1808     return new TreeNode<>(p.hash, p.key, p.value, next);
1809     }
1810    
1811     /**
1812     * Reset to initial default state. Called by clone and readObject.
1813     */
1814     void reinitialize() {
1815     table = null;
1816     entrySet = null;
1817     keySet = null;
1818     values = null;
1819     modCount = 0;
1820     threshold = 0;
1821     size = 0;
1822     }
1823    
1824     // Callbacks to allow LinkedHashMap post-actions
1825     void afterNodeAccess(Node<K,V> p) { }
1826     void afterNodeInsertion(boolean evict) { }
1827     void afterNodeRemoval(Node<K,V> p) { }
1828    
1829     // Called only from writeObject, to ensure compatible ordering.
1830     void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
1831     Node<K,V>[] tab;
1832     if (size > 0 && (tab = table) != null) {
1833     for (Node<K,V> e : tab) {
1834     for (; e != null; e = e.next) {
1835     s.writeObject(e.key);
1836     s.writeObject(e.value);
1837     }
1838     }
1839     }
1840     }
1841    
1842     /* ------------------------------------------------------------ */
1843     // Tree bins
1844    
1845     /**
1846     * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
1847     * extends Node) so can be used as extension of either regular or
1848     * linked node.
1849     */
1850     static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
1851     TreeNode<K,V> parent; // red-black tree links
1852     TreeNode<K,V> left;
1853     TreeNode<K,V> right;
1854     TreeNode<K,V> prev; // needed to unlink next upon deletion
1855     boolean red;
1856     TreeNode(int hash, K key, V val, Node<K,V> next) {
1857     super(hash, key, val, next);
1858     }
1859    
1860     /**
1861     * Returns root of tree containing this node.
1862     */
1863     final TreeNode<K,V> root() {
1864     for (TreeNode<K,V> r = this, p;;) {
1865     if ((p = r.parent) == null)
1866     return r;
1867     r = p;
1868     }
1869     }
1870    
1871     /**
1872     * Ensures that the given root is the first node of its bin.
1873     */
1874     static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {
1875     int n;
1876     if (root != null && tab != null && (n = tab.length) > 0) {
1877     int index = (n - 1) & root.hash;
1878     TreeNode<K,V> first = (TreeNode<K,V>)tab[index];
1879     if (root != first) {
1880     Node<K,V> rn;
1881     tab[index] = root;
1882     TreeNode<K,V> rp = root.prev;
1883     if ((rn = root.next) != null)
1884     ((TreeNode<K,V>)rn).prev = rp;
1885     if (rp != null)
1886     rp.next = rn;
1887     if (first != null)
1888     first.prev = root;
1889     root.next = first;
1890     root.prev = null;
1891     }
1892     assert checkInvariants(root);
1893     }
1894     }
1895    
1896     /**
1897     * Finds the node starting at root p with the given hash and key.
1898     * The kc argument caches comparableClassFor(key) upon first use
1899     * comparing keys.
1900     */
1901     final TreeNode<K,V> find(int h, Object k, Class<?> kc) {
1902     TreeNode<K,V> p = this;
1903     do {
1904     int ph, dir; K pk;
1905     TreeNode<K,V> pl = p.left, pr = p.right, q;
1906     if ((ph = p.hash) > h)
1907     p = pl;
1908     else if (ph < h)
1909     p = pr;
1910     else if ((pk = p.key) == k || (k != null && k.equals(pk)))
1911     return p;
1912     else if (pl == null)
1913     p = pr;
1914     else if (pr == null)
1915     p = pl;
1916     else if ((kc != null ||
1917     (kc = comparableClassFor(k)) != null) &&
1918     (dir = compareComparables(kc, k, pk)) != 0)
1919     p = (dir < 0) ? pl : pr;
1920     else if ((q = pr.find(h, k, kc)) != null)
1921     return q;
1922     else
1923     p = pl;
1924     } while (p != null);
1925     return null;
1926     }
1927    
1928     /**
1929     * Calls find for root node.
1930     */
1931     final TreeNode<K,V> getTreeNode(int h, Object k) {
1932     return ((parent != null) ? root() : this).find(h, k, null);
1933     }
1934    
1935     /**
1936     * Tie-breaking utility for ordering insertions when equal
1937     * hashCodes and non-comparable. We don't require a total
1938     * order, just a consistent insertion rule to maintain
1939     * equivalence across rebalancings. Tie-breaking further than
1940     * necessary simplifies testing a bit.
1941     */
1942     static int tieBreakOrder(Object a, Object b) {
1943     int d;
1944     if (a == null || b == null ||
1945     (d = a.getClass().getName().
1946     compareTo(b.getClass().getName())) == 0)
1947     d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
1948     -1 : 1);
1949     return d;
1950     }
1951    
1952     /**
1953     * Forms tree of the nodes linked from this node.
1954     */
1955     final void treeify(Node<K,V>[] tab) {
1956     TreeNode<K,V> root = null;
1957     for (TreeNode<K,V> x = this, next; x != null; x = next) {
1958     next = (TreeNode<K,V>)x.next;
1959     x.left = x.right = null;
1960     if (root == null) {
1961     x.parent = null;
1962     x.red = false;
1963     root = x;
1964     }
1965     else {
1966     K k = x.key;
1967     int h = x.hash;
1968     Class<?> kc = null;
1969     for (TreeNode<K,V> p = root;;) {
1970     int dir, ph;
1971     K pk = p.key;
1972     if ((ph = p.hash) > h)
1973     dir = -1;
1974     else if (ph < h)
1975     dir = 1;
1976     else if ((kc == null &&
1977     (kc = comparableClassFor(k)) == null) ||
1978     (dir = compareComparables(kc, k, pk)) == 0)
1979     dir = tieBreakOrder(k, pk);
1980    
1981     TreeNode<K,V> xp = p;
1982     if ((p = (dir <= 0) ? p.left : p.right) == null) {
1983     x.parent = xp;
1984     if (dir <= 0)
1985     xp.left = x;
1986     else
1987     xp.right = x;
1988     root = balanceInsertion(root, x);
1989     break;
1990     }
1991     }
1992     }
1993     }
1994     moveRootToFront(tab, root);
1995     }
1996    
1997     /**
1998     * Returns a list of non-TreeNodes replacing those linked from
1999     * this node.
2000     */
2001     final Node<K,V> untreeify(HashMap<K,V> map) {
2002     Node<K,V> hd = null, tl = null;
2003     for (Node<K,V> q = this; q != null; q = q.next) {
2004     Node<K,V> p = map.replacementNode(q, null);
2005     if (tl == null)
2006     hd = p;
2007     else
2008     tl.next = p;
2009     tl = p;
2010     }
2011     return hd;
2012     }
2013    
2014     /**
2015     * Tree version of putVal.
2016     */
2017     final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,
2018     int h, K k, V v) {
2019     Class<?> kc = null;
2020     boolean searched = false;
2021     TreeNode<K,V> root = (parent != null) ? root() : this;
2022     for (TreeNode<K,V> p = root;;) {
2023     int dir, ph; K pk;
2024     if ((ph = p.hash) > h)
2025     dir = -1;
2026     else if (ph < h)
2027     dir = 1;
2028     else if ((pk = p.key) == k || (k != null && k.equals(pk)))
2029     return p;
2030     else if ((kc == null &&
2031     (kc = comparableClassFor(k)) == null) ||
2032     (dir = compareComparables(kc, k, pk)) == 0) {
2033     if (!searched) {
2034     TreeNode<K,V> q, ch;
2035     searched = true;
2036     if (((ch = p.left) != null &&
2037     (q = ch.find(h, k, kc)) != null) ||
2038     ((ch = p.right) != null &&
2039     (q = ch.find(h, k, kc)) != null))
2040     return q;
2041     }
2042     dir = tieBreakOrder(k, pk);
2043     }
2044    
2045     TreeNode<K,V> xp = p;
2046     if ((p = (dir <= 0) ? p.left : p.right) == null) {
2047     Node<K,V> xpn = xp.next;
2048     TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);
2049     if (dir <= 0)
2050     xp.left = x;
2051     else
2052     xp.right = x;
2053     xp.next = x;
2054     x.parent = x.prev = xp;
2055     if (xpn != null)
2056     ((TreeNode<K,V>)xpn).prev = x;
2057     moveRootToFront(tab, balanceInsertion(root, x));
2058     return null;
2059     }
2060     }
2061     }
2062    
2063     /**
2064     * Removes the given node, that must be present before this call.
2065     * This is messier than typical red-black deletion code because we
2066     * cannot swap the contents of an interior node with a leaf
2067     * successor that is pinned by "next" pointers that are accessible
2068     * independently during traversal. So instead we swap the tree
2069     * linkages. If the current tree appears to have too few nodes,
2070     * the bin is converted back to a plain bin. (The test triggers
2071     * somewhere between 2 and 6 nodes, depending on tree structure).
2072     */
2073     final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,
2074     boolean movable) {
2075     int n;
2076     if (tab == null || (n = tab.length) == 0)
2077     return;
2078     int index = (n - 1) & hash;
2079     TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;
2080     TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;
2081     if (pred == null)
2082     tab[index] = first = succ;
2083     else
2084     pred.next = succ;
2085     if (succ != null)
2086     succ.prev = pred;
2087     if (first == null)
2088     return;
2089     if (root.parent != null)
2090     root = root.root();
2091     if (root == null
2092     || (movable
2093     && (root.right == null
2094     || (rl = root.left) == null
2095     || rl.left == null))) {
2096     tab[index] = first.untreeify(map); // too small
2097     return;
2098     }
2099     TreeNode<K,V> p = this, pl = left, pr = right, replacement;
2100     if (pl != null && pr != null) {
2101     TreeNode<K,V> s = pr, sl;
2102     while ((sl = s.left) != null) // find successor
2103     s = sl;
2104     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2105     TreeNode<K,V> sr = s.right;
2106     TreeNode<K,V> pp = p.parent;
2107     if (s == pr) { // p was s's direct parent
2108     p.parent = s;
2109     s.right = p;
2110     }
2111     else {
2112     TreeNode<K,V> sp = s.parent;
2113     if ((p.parent = sp) != null) {
2114     if (s == sp.left)
2115     sp.left = p;
2116     else
2117     sp.right = p;
2118     }
2119     if ((s.right = pr) != null)
2120     pr.parent = s;
2121     }
2122     p.left = null;
2123     if ((p.right = sr) != null)
2124     sr.parent = p;
2125     if ((s.left = pl) != null)
2126     pl.parent = s;
2127     if ((s.parent = pp) == null)
2128     root = s;
2129     else if (p == pp.left)
2130     pp.left = s;
2131     else
2132     pp.right = s;
2133     if (sr != null)
2134     replacement = sr;
2135     else
2136     replacement = p;
2137     }
2138     else if (pl != null)
2139     replacement = pl;
2140     else if (pr != null)
2141     replacement = pr;
2142     else
2143     replacement = p;
2144     if (replacement != p) {
2145     TreeNode<K,V> pp = replacement.parent = p.parent;
2146     if (pp == null)
2147     root = replacement;
2148     else if (p == pp.left)
2149     pp.left = replacement;
2150     else
2151     pp.right = replacement;
2152     p.left = p.right = p.parent = null;
2153     }
2154    
2155     TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);
2156    
2157     if (replacement == p) { // detach
2158     TreeNode<K,V> pp = p.parent;
2159     p.parent = null;
2160     if (pp != null) {
2161     if (p == pp.left)
2162     pp.left = null;
2163     else if (p == pp.right)
2164     pp.right = null;
2165     }
2166     }
2167     if (movable)
2168     moveRootToFront(tab, r);
2169     }
2170    
2171     /**
2172     * Splits nodes in a tree bin into lower and upper tree bins,
2173     * or untreeifies if now too small. Called only from resize;
2174     * see above discussion about split bits and indices.
2175     *
2176     * @param map the map
2177     * @param tab the table for recording bin heads
2178     * @param index the index of the table being split
2179     * @param bit the bit of hash to split on
2180     */
2181     final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {
2182     TreeNode<K,V> b = this;
2183     // Relink into lo and hi lists, preserving order
2184     TreeNode<K,V> loHead = null, loTail = null;
2185     TreeNode<K,V> hiHead = null, hiTail = null;
2186     int lc = 0, hc = 0;
2187     for (TreeNode<K,V> e = b, next; e != null; e = next) {
2188     next = (TreeNode<K,V>)e.next;
2189     e.next = null;
2190     if ((e.hash & bit) == 0) {
2191     if ((e.prev = loTail) == null)
2192     loHead = e;
2193     else
2194     loTail.next = e;
2195     loTail = e;
2196     ++lc;
2197     }
2198     else {
2199     if ((e.prev = hiTail) == null)
2200     hiHead = e;
2201     else
2202     hiTail.next = e;
2203     hiTail = e;
2204     ++hc;
2205     }
2206     }
2207    
2208     if (loHead != null) {
2209     if (lc <= UNTREEIFY_THRESHOLD)
2210     tab[index] = loHead.untreeify(map);
2211     else {
2212     tab[index] = loHead;
2213     if (hiHead != null) // (else is already treeified)
2214     loHead.treeify(tab);
2215     }
2216     }
2217     if (hiHead != null) {
2218     if (hc <= UNTREEIFY_THRESHOLD)
2219     tab[index + bit] = hiHead.untreeify(map);
2220     else {
2221     tab[index + bit] = hiHead;
2222     if (loHead != null)
2223     hiHead.treeify(tab);
2224     }
2225     }
2226     }
2227    
2228     /* ------------------------------------------------------------ */
2229     // Red-black tree methods, all adapted from CLR
2230    
2231     static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2232     TreeNode<K,V> p) {
2233     TreeNode<K,V> r, pp, rl;
2234     if (p != null && (r = p.right) != null) {
2235     if ((rl = p.right = r.left) != null)
2236     rl.parent = p;
2237     if ((pp = r.parent = p.parent) == null)
2238     (root = r).red = false;
2239     else if (pp.left == p)
2240     pp.left = r;
2241     else
2242     pp.right = r;
2243     r.left = p;
2244     p.parent = r;
2245     }
2246     return root;
2247     }
2248    
2249     static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2250     TreeNode<K,V> p) {
2251     TreeNode<K,V> l, pp, lr;
2252     if (p != null && (l = p.left) != null) {
2253     if ((lr = p.left = l.right) != null)
2254     lr.parent = p;
2255     if ((pp = l.parent = p.parent) == null)
2256     (root = l).red = false;
2257     else if (pp.right == p)
2258     pp.right = l;
2259     else
2260     pp.left = l;
2261     l.right = p;
2262     p.parent = l;
2263     }
2264     return root;
2265     }
2266    
2267     static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2268     TreeNode<K,V> x) {
2269     x.red = true;
2270     for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2271     if ((xp = x.parent) == null) {
2272     x.red = false;
2273     return x;
2274     }
2275     else if (!xp.red || (xpp = xp.parent) == null)
2276     return root;
2277     if (xp == (xppl = xpp.left)) {
2278     if ((xppr = xpp.right) != null && xppr.red) {
2279     xppr.red = false;
2280     xp.red = false;
2281     xpp.red = true;
2282     x = xpp;
2283     }
2284     else {
2285     if (x == xp.right) {
2286     root = rotateLeft(root, x = xp);
2287     xpp = (xp = x.parent) == null ? null : xp.parent;
2288     }
2289     if (xp != null) {
2290     xp.red = false;
2291     if (xpp != null) {
2292     xpp.red = true;
2293     root = rotateRight(root, xpp);
2294     }
2295     }
2296     }
2297     }
2298     else {
2299     if (xppl != null && xppl.red) {
2300     xppl.red = false;
2301     xp.red = false;
2302     xpp.red = true;
2303     x = xpp;
2304     }
2305     else {
2306     if (x == xp.left) {
2307     root = rotateRight(root, x = xp);
2308     xpp = (xp = x.parent) == null ? null : xp.parent;
2309     }
2310     if (xp != null) {
2311     xp.red = false;
2312     if (xpp != null) {
2313     xpp.red = true;
2314     root = rotateLeft(root, xpp);
2315     }
2316     }
2317     }
2318     }
2319     }
2320     }
2321    
2322     static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2323     TreeNode<K,V> x) {
2324     for (TreeNode<K,V> xp, xpl, xpr;;) {
2325     if (x == null || x == root)
2326     return root;
2327     else if ((xp = x.parent) == null) {
2328     x.red = false;
2329     return x;
2330     }
2331     else if (x.red) {
2332     x.red = false;
2333     return root;
2334     }
2335     else if ((xpl = xp.left) == x) {
2336     if ((xpr = xp.right) != null && xpr.red) {
2337     xpr.red = false;
2338     xp.red = true;
2339     root = rotateLeft(root, xp);
2340     xpr = (xp = x.parent) == null ? null : xp.right;
2341     }
2342     if (xpr == null)
2343     x = xp;
2344     else {
2345     TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2346     if ((sr == null || !sr.red) &&
2347     (sl == null || !sl.red)) {
2348     xpr.red = true;
2349     x = xp;
2350     }
2351     else {
2352     if (sr == null || !sr.red) {
2353     if (sl != null)
2354     sl.red = false;
2355     xpr.red = true;
2356     root = rotateRight(root, xpr);
2357     xpr = (xp = x.parent) == null ?
2358     null : xp.right;
2359     }
2360     if (xpr != null) {
2361     xpr.red = (xp == null) ? false : xp.red;
2362     if ((sr = xpr.right) != null)
2363     sr.red = false;
2364     }
2365     if (xp != null) {
2366     xp.red = false;
2367     root = rotateLeft(root, xp);
2368     }
2369     x = root;
2370     }
2371     }
2372     }
2373     else { // symmetric
2374     if (xpl != null && xpl.red) {
2375     xpl.red = false;
2376     xp.red = true;
2377     root = rotateRight(root, xp);
2378     xpl = (xp = x.parent) == null ? null : xp.left;
2379     }
2380     if (xpl == null)
2381     x = xp;
2382     else {
2383     TreeNode<K,V> sl = xpl.left, sr = xpl.right;
2384     if ((sl == null || !sl.red) &&
2385     (sr == null || !sr.red)) {
2386     xpl.red = true;
2387     x = xp;
2388     }
2389     else {
2390     if (sl == null || !sl.red) {
2391     if (sr != null)
2392     sr.red = false;
2393     xpl.red = true;
2394     root = rotateLeft(root, xpl);
2395     xpl = (xp = x.parent) == null ?
2396     null : xp.left;
2397     }
2398     if (xpl != null) {
2399     xpl.red = (xp == null) ? false : xp.red;
2400     if ((sl = xpl.left) != null)
2401     sl.red = false;
2402     }
2403     if (xp != null) {
2404     xp.red = false;
2405     root = rotateRight(root, xp);
2406     }
2407     x = root;
2408     }
2409     }
2410     }
2411     }
2412     }
2413    
2414     /**
2415     * Recursive invariant check
2416     */
2417     static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
2418     TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
2419     tb = t.prev, tn = (TreeNode<K,V>)t.next;
2420     if (tb != null && tb.next != t)
2421     return false;
2422     if (tn != null && tn.prev != t)
2423     return false;
2424     if (tp != null && t != tp.left && t != tp.right)
2425     return false;
2426     if (tl != null && (tl.parent != t || tl.hash > t.hash))
2427     return false;
2428     if (tr != null && (tr.parent != t || tr.hash < t.hash))
2429     return false;
2430     if (t.red && tl != null && tl.red && tr != null && tr.red)
2431     return false;
2432     if (tl != null && !checkInvariants(tl))
2433     return false;
2434     if (tr != null && !checkInvariants(tr))
2435     return false;
2436     return true;
2437     }
2438     }
2439    
2440     }