/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ package java.util.concurrent; import java.util.concurrent.locks.*; import java.util.*; import java.io.Serializable; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; /** * A hash table supporting full concurrency of retrievals and * adjustable expected concurrency for updates. This class obeys the * same functional specification as {@link java.util.Hashtable}, and * includes versions of methods corresponding to each method of * Hashtable. However, even though all operations are * thread-safe, retrieval operations do not entail locking, * and there is not any support for locking the entire table * in a way that prevents all access. This class is fully * interoperable with Hashtable in programs that rely on its * thread safety but not on its synchronization details. * *

Retrieval operations (including get) generally do not * block, so may overlap with update operations (including * put and remove). Retrievals reflect the results * of the most recently completed update operations holding * upon their onset. For aggregate operations such as putAll * and clear, concurrent retrievals may reflect insertion or * removal of only some entries. Similarly, Iterators and * Enumerations return elements reflecting the state of the hash table * at some point at or since the creation of the iterator/enumeration. * They do not throw * {@link ConcurrentModificationException}. However, iterators are * designed to be used by only one thread at a time. * *

The allowed concurrency among update operations is guided by * the optional concurrencyLevel constructor argument * (default 16), which is used as a hint for internal sizing. The * table is internally partitioned to try to permit the indicated * number of concurrent updates without contention. Because placement * in hash tables is essentially random, the actual concurrency will * vary. Ideally, you should choose a value to accommodate as many * threads as will ever concurrently modify the table. Using a * significantly higher value than you need can waste space and time, * and a significantly lower value can lead to thread contention. But * overestimates and underestimates within an order of magnitude do * not usually have much noticeable impact. A value of one is * appropriate when it is known that only one thread will modify * and all others will only read. * *

This class implements all of the optional methods * of the {@link Map} and {@link Iterator} interfaces. * *

Like {@link java.util.Hashtable} but unlike {@link * java.util.HashMap}, this class does NOT allow null to be * used as a key or value. * *

This class is a member of the * * Java Collections Framework. * * @since 1.5 * @author Doug Lea * @param the type of keys maintained by this map * @param the type of mapped values */ public class ConcurrentHashMap extends AbstractMap implements ConcurrentMap, Cloneable, Serializable { private static final long serialVersionUID = 7249069246763182397L; /* * The basic strategy is to subdivide the table among Segments, * each of which itself is a concurrently readable hash table. */ /* ---------------- Constants -------------- */ /** * The default initial number of table slots for this table. * Used when not otherwise specified in constructor. */ static int DEFAULT_INITIAL_CAPACITY = 16; /** * The maximum capacity, used if a higher value is implicitly * specified by either of the constructors with arguments. MUST * be a power of two <= 1<<30 to ensure that entries are indexible * using ints. */ static final int MAXIMUM_CAPACITY = 1 << 30; /** * The default load factor for this table. Used when not * otherwise specified in constructor. */ static final float DEFAULT_LOAD_FACTOR = 0.75f; /** * The default number of concurrency control segments. **/ static final int DEFAULT_SEGMENTS = 16; /** * The maximum number of segments to allow; used to bound * constructor arguments. */ static final int MAX_SEGMENTS = 1 << 16; // slightly conservative /* ---------------- Fields -------------- */ /** * Mask value for indexing into segments. The upper bits of a * key's hash code are used to choose the segment. **/ final int segmentMask; /** * Shift value for indexing within segments. **/ final int segmentShift; /** * The segments, each of which is a specialized hash table */ final Segment[] segments; transient Set keySet; transient Set> entrySet; transient Collection values; /* ---------------- Small Utilities -------------- */ /** * Return a hash code for non-null Object x. * Uses the same hash code spreader as most other java.util hash tables. * @param x the object serving as a key * @return the hash code */ static int hash(Object x) { int h = x.hashCode(); h += ~(h << 9); h ^= (h >>> 14); h += (h << 4); h ^= (h >>> 10); return h; } /** * Return the segment that should be used for key with given hash */ final Segment segmentFor(int hash) { return (Segment) segments[(hash >>> segmentShift) & segmentMask]; } /* ---------------- Inner Classes -------------- */ /** * Segments are specialized versions of hash tables. This * subclasses from ReentrantLock opportunistically, just to * simplify some locking and avoid separate construction. **/ static final class Segment extends ReentrantLock implements Serializable { /* * Segments maintain a table of entry lists that are ALWAYS * kept in a consistent state, so can be read without locking. * Next fields of nodes are immutable (final). All list * additions are performed at the front of each bin. This * makes it easy to check changes, and also fast to traverse. * When nodes would otherwise be changed, new nodes are * created to replace them. This works well for hash tables * since the bin lists tend to be short. (The average length * is less than two for the default load factor threshold.) * * Read operations can thus proceed without locking, but rely * on a memory barrier to ensure that completed write * operations performed by other threads are * noticed. Conveniently, the "count" field, tracking the * number of elements, can also serve as the volatile variable * providing proper read/write barriers. This is convenient * because this field needs to be read in many read operations * anyway. * * Implementors note. The basic rules for all this are: * * - All unsynchronized read operations must first read the * "count" field, and should not look at table entries if * it is 0. * * - All synchronized write operations should write to * the "count" field after updating. The operations must not * take any action that could even momentarily cause * a concurrent read operation to see inconsistent * data. This is made easier by the nature of the read * operations in Map. For example, no operation * can reveal that the table has grown but the threshold * has not yet been updated, so there are no atomicity * requirements for this with respect to reads. * * As a guide, all critical volatile reads and writes are marked * in code comments. */ private static final long serialVersionUID = 2249069246763182397L; /** * The number of elements in this segment's region. **/ transient volatile int count; /** * Number of updates; used for checking lack of modifications * in bulk-read methods. */ transient int modCount; /** * The table is rehashed when its size exceeds this threshold. * (The value of this field is always (int)(capacity * * loadFactor).) */ transient int threshold; /** * The per-segment table */ transient HashEntry[] table; /** * The load factor for the hash table. Even though this value * is same for all segments, it is replicated to avoid needing * links to outer object. * @serial */ final float loadFactor; Segment(int initialCapacity, float lf) { loadFactor = lf; setTable(new HashEntry[initialCapacity]); } /** * Set table to new HashEntry array. * Call only while holding lock or in constructor. **/ void setTable(HashEntry[] newTable) { table = newTable; threshold = (int)(newTable.length * loadFactor); count = count; // write-volatile } /* Specialized implementations of map methods */ V get(Object key, int hash) { if (count != 0) { // read-volatile HashEntry[] tab = table; int index = hash & (tab.length - 1); HashEntry e = (HashEntry) tab[index]; while (e != null) { if (e.hash == hash && key.equals(e.key)) return e.value; e = e.next; } } return null; } boolean containsKey(Object key, int hash) { if (count != 0) { // read-volatile HashEntry[] tab = table; int index = hash & (tab.length - 1); HashEntry e = (HashEntry) tab[index]; while (e != null) { if (e.hash == hash && key.equals(e.key)) return true; e = e.next; } } return false; } boolean containsValue(Object value) { if (count != 0) { // read-volatile HashEntry[] tab = table; int len = tab.length; for (int i = 0 ; i < len; i++) for (HashEntry e = (HashEntry)tab[i] ; e != null ; e = e.next) if (value.equals(e.value)) return true; } return false; } boolean replace(K key, int hash, V oldValue, V newValue) { lock(); try { int c = count; HashEntry[] tab = table; int index = hash & (tab.length - 1); HashEntry first = (HashEntry) tab[index]; HashEntry e = first; for (;;) { if (e == null) return false; if (e.hash == hash && key.equals(e.key)) break; e = e.next; } V v = e.value; if (v == null || !oldValue.equals(v)) return false; e.value = newValue; count = c; // write-volatile return true; } finally { unlock(); } } V replace(K key, int hash, V newValue) { lock(); try { int c = count; HashEntry[] tab = table; int index = hash & (tab.length - 1); HashEntry first = (HashEntry) tab[index]; HashEntry e = first; for (;;) { if (e == null) return null; if (e.hash == hash && key.equals(e.key)) break; e = e.next; } V v = e.value; e.value = newValue; count = c; // write-volatile return v; } finally { unlock(); } } V put(K key, int hash, V value, boolean onlyIfAbsent) { lock(); try { int c = count; HashEntry[] tab = table; int index = hash & (tab.length - 1); HashEntry first = (HashEntry) tab[index]; for (HashEntry e = first; e != null; e = (HashEntry) e.next) { if (e.hash == hash && key.equals(e.key)) { V oldValue = e.value; if (!onlyIfAbsent) e.value = value; ++modCount; count = c; // write-volatile return oldValue; } } tab[index] = new HashEntry(hash, key, value, first); ++modCount; ++c; count = c; // write-volatile if (c > threshold) setTable(rehash(tab)); return null; } finally { unlock(); } } HashEntry[] rehash(HashEntry[] oldTable) { int oldCapacity = oldTable.length; if (oldCapacity >= MAXIMUM_CAPACITY) return oldTable; /* * Reclassify nodes in each list to new Map. Because we are * using power-of-two expansion, the elements from each bin * must either stay at same index, or move with a power of two * offset. We eliminate unnecessary node creation by catching * cases where old nodes can be reused because their next * fields won't change. Statistically, at the default * threshold, only about one-sixth of them need cloning when * a table doubles. The nodes they replace will be garbage * collectable as soon as they are no longer referenced by any * reader thread that may be in the midst of traversing table * right now. */ HashEntry[] newTable = new HashEntry[oldCapacity << 1]; int sizeMask = newTable.length - 1; for (int i = 0; i < oldCapacity ; i++) { // We need to guarantee that any existing reads of old Map can // proceed. So we cannot yet null out each bin. HashEntry e = (HashEntry)oldTable[i]; if (e != null) { HashEntry next = e.next; int idx = e.hash & sizeMask; // Single node on list if (next == null) newTable[idx] = e; else { // Reuse trailing consecutive sequence at same slot HashEntry lastRun = e; int lastIdx = idx; for (HashEntry last = next; last != null; last = last.next) { int k = last.hash & sizeMask; if (k != lastIdx) { lastIdx = k; lastRun = last; } } newTable[lastIdx] = lastRun; // Clone all remaining nodes for (HashEntry p = e; p != lastRun; p = p.next) { int k = p.hash & sizeMask; newTable[k] = new HashEntry(p.hash, p.key, p.value, (HashEntry) newTable[k]); } } } } return newTable; } /** * Remove; match on key only if value null, else match both. */ V remove(Object key, int hash, Object value) { lock(); try { int c = count; HashEntry[] tab = table; int index = hash & (tab.length - 1); HashEntry first = (HashEntry)tab[index]; HashEntry e = first; for (;;) { if (e == null) return null; if (e.hash == hash && key.equals(e.key)) break; e = e.next; } V oldValue = e.value; if (value != null && !value.equals(oldValue)) return null; // All entries following removed node can stay in list, but // all preceding ones need to be cloned. HashEntry newFirst = e.next; for (HashEntry p = first; p != e; p = p.next) newFirst = new HashEntry(p.hash, p.key, p.value, newFirst); tab[index] = newFirst; ++modCount; count = c-1; // write-volatile return oldValue; } finally { unlock(); } } void clear() { lock(); try { HashEntry[] tab = table; for (int i = 0; i < tab.length ; i++) tab[i] = null; ++modCount; count = 0; // write-volatile } finally { unlock(); } } } /** * ConcurrentHashMap list entry. Note that this is never exported * out as a user-visible Map.Entry */ static final class HashEntry { final K key; V value; final int hash; final HashEntry next; HashEntry(int hash, K key, V value, HashEntry next) { this.value = value; this.hash = hash; this.key = key; this.next = next; } } /* ---------------- Public operations -------------- */ /** * Constructs a new, empty map with the specified initial * capacity and the specified load factor. * * @param initialCapacity the initial capacity. The implementation * performs internal sizing to accommodate this many elements. * @param loadFactor the load factor threshold, used to control resizing. * @param concurrencyLevel the estimated number of concurrently * updating threads. The implementation performs internal sizing * to try to accommodate this many threads. * @throws IllegalArgumentException if the initial capacity is * negative or the load factor or concurrencyLevel are * nonpositive. */ public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) { if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) throw new IllegalArgumentException(); if (concurrencyLevel > MAX_SEGMENTS) concurrencyLevel = MAX_SEGMENTS; // Find power-of-two sizes best matching arguments int sshift = 0; int ssize = 1; while (ssize < concurrencyLevel) { ++sshift; ssize <<= 1; } segmentShift = 32 - sshift; segmentMask = ssize - 1; this.segments = new Segment[ssize]; if (initialCapacity > MAXIMUM_CAPACITY) initialCapacity = MAXIMUM_CAPACITY; int c = initialCapacity / ssize; if (c * ssize < initialCapacity) ++c; int cap = 1; while (cap < c) cap <<= 1; for (int i = 0; i < this.segments.length; ++i) this.segments[i] = new Segment(cap, loadFactor); } /** * Constructs a new, empty map with the specified initial * capacity, and with default load factor and concurrencyLevel. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. * @throws IllegalArgumentException if the initial capacity of * elements is negative. */ public ConcurrentHashMap(int initialCapacity) { this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS); } /** * Constructs a new, empty map with a default initial capacity, * load factor, and concurrencyLevel. */ public ConcurrentHashMap() { this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS); } /** * Constructs a new map with the same mappings as the given map. The * map is created with a capacity of twice the number of mappings in * the given map or 11 (whichever is greater), and a default load factor. * @param t the map */ public ConcurrentHashMap(Map t) { this(Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1, 11), DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS); putAll(t); } // inherit Map javadoc public boolean isEmpty() { final Segment[] segments = this.segments; /* * We need to keep track of per-segment modCounts to avoid ABA * problems in which an element in one segment was added and * in another removed during traversal, in which case the * table was never actually empty at any point. Note the * similar use of modCounts in the size() and containsValue() * methods, which are the only other methods also susceptible * to ABA problems. */ int[] mc = new int[segments.length]; int mcsum = 0; for (int i = 0; i < segments.length; ++i) { if (segments[i].count != 0) return false; else mcsum += mc[i] = segments[i].modCount; } // If mcsum happens to be zero, then we know we got a snapshot // before any modifications at all were made. This is // probably common enough to bother tracking. if (mcsum != 0) { for (int i = 0; i < segments.length; ++i) { if (segments[i].count != 0 || mc[i] != segments[i].modCount) return false; } } return true; } // inherit Map javadoc public int size() { final Segment[] segments = this.segments; int[] mc = new int[segments.length]; for (;;) { long sum = 0; int mcsum = 0; for (int i = 0; i < segments.length; ++i) { sum += segments[i].count; mcsum += mc[i] = segments[i].modCount; } int check = 0; if (mcsum != 0) { for (int i = 0; i < segments.length; ++i) { check += segments[i].count; if (mc[i] != segments[i].modCount) { check = -1; // force retry break; } } } if (check == sum) { if (sum > Integer.MAX_VALUE) return Integer.MAX_VALUE; else return (int)sum; } } } /** * Returns the value to which the specified key is mapped in this table. * * @param key a key in the table. * @return the value to which the key is mapped in this table; * null if the key is not mapped to any value in * this table. * @throws NullPointerException if the key is * null. */ public V get(Object key) { int hash = hash(key); // throws NullPointerException if key null return segmentFor(hash).get(key, hash); } /** * Tests if the specified object is a key in this table. * * @param key possible key. * @return true if and only if the specified object * is a key in this table, as determined by the * equals method; false otherwise. * @throws NullPointerException if the key is * null. */ public boolean containsKey(Object key) { int hash = hash(key); // throws NullPointerException if key null return segmentFor(hash).containsKey(key, hash); } /** * Returns true if this map maps one or more keys to the * specified value. Note: This method requires a full internal * traversal of the hash table, and so is much slower than * method containsKey. * * @param value value whose presence in this map is to be tested. * @return true if this map maps one or more keys to the * specified value. * @throws NullPointerException if the value is null. */ public boolean containsValue(Object value) { if (value == null) throw new NullPointerException(); final Segment[] segments = this.segments; int[] mc = new int[segments.length]; for (;;) { int sum = 0; int mcsum = 0; for (int i = 0; i < segments.length; ++i) { int c = segments[i].count; mcsum += mc[i] = segments[i].modCount; if (segments[i].containsValue(value)) return true; } boolean cleanSweep = true; if (mcsum != 0) { for (int i = 0; i < segments.length; ++i) { int c = segments[i].count; if (mc[i] != segments[i].modCount) { cleanSweep = false; break; } } } if (cleanSweep) return false; } } /** * Legacy method testing if some key maps into the specified value * in this table. This method is identical in functionality to * {@link #containsValue}, and exists solely to ensure * full compatibility with class {@link java.util.Hashtable}, * which supported this method prior to introduction of the * Java Collections framework. * @param value a value to search for. * @return true if and only if some key maps to the * value argument in this table as * determined by the equals method; * false otherwise. * @throws NullPointerException if the value is null. */ public boolean contains(Object value) { return containsValue(value); } /** * Maps the specified key to the specified * value in this table. Neither the key nor the * value can be null.

* * The value can be retrieved by calling the get method * with a key that is equal to the original key. * * @param key the table key. * @param value the value. * @return the previous value of the specified key in this table, * or null if it did not have one. * @throws NullPointerException if the key or value is * null. */ public V put(K key, V value) { if (value == null) throw new NullPointerException(); int hash = hash(key); return segmentFor(hash).put(key, hash, value, false); } /** * If the specified key is not already associated * with a value, associate it with the given value. * This is equivalent to *

     *   if (!map.containsKey(key)) 
     *      return map.put(key, value);
     *   else
     *      return map.get(key);
     * 
* Except that the action is performed atomically. * @param key key with which the specified value is to be associated. * @param value value to be associated with the specified key. * @return previous value associated with specified key, or null * if there was no mapping for key. A null return can * also indicate that the map previously associated null * with the specified key, if the implementation supports * null values. * * @throws UnsupportedOperationException if the put operation is * not supported by this map. * @throws ClassCastException if the class of the specified key or value * prevents it from being stored in this map. * @throws NullPointerException if the specified key or value is * null. * **/ public V putIfAbsent(K key, V value) { if (value == null) throw new NullPointerException(); int hash = hash(key); return segmentFor(hash).put(key, hash, value, true); } /** * Copies all of the mappings from the specified map to this one. * * These mappings replace any mappings that this map had for any of the * keys currently in the specified Map. * * @param t Mappings to be stored in this map. */ public void putAll(Map t) { for (Iterator> it = (Iterator>) t.entrySet().iterator(); it.hasNext(); ) { Entry e = it.next(); put(e.getKey(), e.getValue()); } } /** * Removes the key (and its corresponding value) from this * table. This method does nothing if the key is not in the table. * * @param key the key that needs to be removed. * @return the value to which the key had been mapped in this table, * or null if the key did not have a mapping. * @throws NullPointerException if the key is * null. */ public V remove(Object key) { int hash = hash(key); return segmentFor(hash).remove(key, hash, null); } /** * Remove entry for key only if currently mapped to given value. * Acts as *
 
     *  if (map.get(key).equals(value)) {
     *     map.remove(key);
     *     return true;
     * } else return false;
     * 
* except that the action is performed atomically. * @param key key with which the specified value is associated. * @param value value associated with the specified key. * @return true if the value was removed * @throws NullPointerException if the specified key is * null. */ public boolean remove(Object key, Object value) { int hash = hash(key); return segmentFor(hash).remove(key, hash, value) != null; } /** * Replace entry for key only if currently mapped to given value. * Acts as *
 
     *  if (map.get(key).equals(oldValue)) {
     *     map.put(key, newValue);
     *     return true;
     * } else return false;
     * 
* except that the action is performed atomically. * @param key key with which the specified value is associated. * @param oldValue value expected to be associated with the specified key. * @param newValue value to be associated with the specified key. * @return true if the value was replaced * @throws NullPointerException if the specified key or values are * null. */ public boolean replace(K key, V oldValue, V newValue) { if (oldValue == null || newValue == null) throw new NullPointerException(); int hash = hash(key); return segmentFor(hash).replace(key, hash, oldValue, newValue); } /** * Replace entry for key only if currently mapped to some value. * Acts as *
 
     *  if ((map.containsKey(key)) {
     *     return map.put(key, value);
     * } else return null;
     * 
* except that the action is performed atomically. * @param key key with which the specified value is associated. * @param value value to be associated with the specified key. * @return previous value associated with specified key, or null * if there was no mapping for key. * @throws NullPointerException if the specified key or value is * null. */ public V replace(K key, V value) { if (value == null) throw new NullPointerException(); int hash = hash(key); return segmentFor(hash).replace(key, hash, value); } /** * Removes all mappings from this map. */ public void clear() { for (int i = 0; i < segments.length; ++i) segments[i].clear(); } /** * Returns a shallow copy of this * ConcurrentHashMap instance: the keys and * values themselves are not cloned. * * @return a shallow copy of this map. */ public Object clone() { // We cannot call super.clone, since it would share final // segments array, and there's no way to reassign finals. float lf = segments[0].loadFactor; int segs = segments.length; int cap = (int)(size() / lf); if (cap < segs) cap = segs; ConcurrentHashMap t = new ConcurrentHashMap(cap, lf, segs); t.putAll(this); return t; } /** * Returns a set view of the keys contained in this map. The set is * backed by the map, so changes to the map are reflected in the set, and * vice-versa. The set supports element removal, which removes the * corresponding mapping from this map, via the Iterator.remove, * Set.remove, removeAll, retainAll, and * clear operations. It does not support the add or * addAll operations. * The returned iterator is a "weakly consistent" iterator that * will never throw {@link java.util.ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * @return a set view of the keys contained in this map. */ public Set keySet() { Set ks = keySet; return (ks != null) ? ks : (keySet = new KeySet()); } /** * Returns a collection view of the values contained in this map. The * collection is backed by the map, so changes to the map are reflected in * the collection, and vice-versa. The collection supports element * removal, which removes the corresponding mapping from this map, via the * Iterator.remove, Collection.remove, * removeAll, retainAll, and clear operations. * It does not support the add or addAll operations. * The returned iterator is a "weakly consistent" iterator that * will never throw {@link java.util.ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * @return a collection view of the values contained in this map. */ public Collection values() { Collection vs = values; return (vs != null) ? vs : (values = new Values()); } /** * Returns a collection view of the mappings contained in this map. Each * element in the returned collection is a Map.Entry. The * collection is backed by the map, so changes to the map are reflected in * the collection, and vice-versa. The collection supports element * removal, which removes the corresponding mapping from the map, via the * Iterator.remove, Collection.remove, * removeAll, retainAll, and clear operations. * It does not support the add or addAll operations. * The returned iterator is a "weakly consistent" iterator that * will never throw {@link java.util.ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * @return a collection view of the mappings contained in this map. */ public Set> entrySet() { Set> es = entrySet; return (es != null) ? es : (entrySet = (Set>) (Set) new EntrySet()); } /** * Returns an enumeration of the keys in this table. * * @return an enumeration of the keys in this table. * @see #keySet */ public Enumeration keys() { return new KeyIterator(); } /** * Returns an enumeration of the values in this table. * * @return an enumeration of the values in this table. * @see #values */ public Enumeration elements() { return new ValueIterator(); } /* ---------------- Iterator Support -------------- */ abstract class HashIterator { int nextSegmentIndex; int nextTableIndex; HashEntry[] currentTable; HashEntry nextEntry; HashEntry lastReturned; HashIterator() { nextSegmentIndex = segments.length - 1; nextTableIndex = -1; advance(); } public boolean hasMoreElements() { return hasNext(); } final void advance() { if (nextEntry != null && (nextEntry = nextEntry.next) != null) return; while (nextTableIndex >= 0) { if ( (nextEntry = (HashEntry)currentTable[nextTableIndex--]) != null) return; } while (nextSegmentIndex >= 0) { Segment seg = (Segment)segments[nextSegmentIndex--]; if (seg.count != 0) { currentTable = seg.table; for (int j = currentTable.length - 1; j >= 0; --j) { if ( (nextEntry = (HashEntry)currentTable[j]) != null) { nextTableIndex = j - 1; return; } } } } } public boolean hasNext() { return nextEntry != null; } HashEntry nextEntry() { if (nextEntry == null) throw new NoSuchElementException(); lastReturned = nextEntry; advance(); return lastReturned; } public void remove() { if (lastReturned == null) throw new IllegalStateException(); ConcurrentHashMap.this.remove(lastReturned.key); lastReturned = null; } } final class KeyIterator extends HashIterator implements Iterator, Enumeration { public K next() { return super.nextEntry().key; } public K nextElement() { return super.nextEntry().key; } } final class ValueIterator extends HashIterator implements Iterator, Enumeration { public V next() { return super.nextEntry().value; } public V nextElement() { return super.nextEntry().value; } } /** * Entry iterator. Exported Entry objects must write-through * changes in setValue, even if the nodes have been cloned. So we * cannot return internal HashEntry objects. Instead, the iterator * itself acts as a forwarding pseudo-entry. */ final class EntryIterator extends HashIterator implements Map.Entry, Iterator> { public Map.Entry next() { nextEntry(); return this; } public K getKey() { if (lastReturned == null) throw new IllegalStateException("Entry was removed"); return lastReturned.key; } public V getValue() { if (lastReturned == null) throw new IllegalStateException("Entry was removed"); return ConcurrentHashMap.this.get(lastReturned.key); } public V setValue(V value) { if (lastReturned == null) throw new IllegalStateException("Entry was removed"); return ConcurrentHashMap.this.put(lastReturned.key, value); } public boolean equals(Object o) { // If not acting as entry, just use default. if (lastReturned == null) return super.equals(o); if (!(o instanceof Map.Entry)) return false; Map.Entry e = (Map.Entry)o; return eq(getKey(), e.getKey()) && eq(getValue(), e.getValue()); } public int hashCode() { // If not acting as entry, just use default. if (lastReturned == null) return super.hashCode(); Object k = getKey(); Object v = getValue(); return ((k == null) ? 0 : k.hashCode()) ^ ((v == null) ? 0 : v.hashCode()); } public String toString() { // If not acting as entry, just use default. if (lastReturned == null) return super.toString(); else return getKey() + "=" + getValue(); } boolean eq(Object o1, Object o2) { return (o1 == null ? o2 == null : o1.equals(o2)); } } final class KeySet extends AbstractSet { public Iterator iterator() { return new KeyIterator(); } public int size() { return ConcurrentHashMap.this.size(); } public boolean contains(Object o) { return ConcurrentHashMap.this.containsKey(o); } public boolean remove(Object o) { return ConcurrentHashMap.this.remove(o) != null; } public void clear() { ConcurrentHashMap.this.clear(); } } final class Values extends AbstractCollection { public Iterator iterator() { return new ValueIterator(); } public int size() { return ConcurrentHashMap.this.size(); } public boolean contains(Object o) { return ConcurrentHashMap.this.containsValue(o); } public void clear() { ConcurrentHashMap.this.clear(); } } final class EntrySet extends AbstractSet> { public Iterator> iterator() { return new EntryIterator(); } public boolean contains(Object o) { if (!(o instanceof Map.Entry)) return false; Map.Entry e = (Map.Entry)o; V v = ConcurrentHashMap.this.get(e.getKey()); return v != null && v.equals(e.getValue()); } public boolean remove(Object o) { if (!(o instanceof Map.Entry)) return false; Map.Entry e = (Map.Entry)o; return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()); } public int size() { return ConcurrentHashMap.this.size(); } public void clear() { ConcurrentHashMap.this.clear(); } public Object[] toArray() { // Since we don't ordinarily have distinct Entry objects, we // must pack elements using exportable SimpleEntry Collection> c = new ArrayList>(size()); for (Iterator> i = iterator(); i.hasNext(); ) c.add(new SimpleEntry(i.next())); return c.toArray(); } public T[] toArray(T[] a) { Collection> c = new ArrayList>(size()); for (Iterator> i = iterator(); i.hasNext(); ) c.add(new SimpleEntry(i.next())); return c.toArray(a); } } /** * This duplicates java.util.AbstractMap.SimpleEntry until this class * is made accessible. */ static final class SimpleEntry implements Entry { K key; V value; public SimpleEntry(K key, V value) { this.key = key; this.value = value; } public SimpleEntry(Entry e) { this.key = e.getKey(); this.value = e.getValue(); } public K getKey() { return key; } public V getValue() { return value; } public V setValue(V value) { V oldValue = this.value; this.value = value; return oldValue; } public boolean equals(Object o) { if (!(o instanceof Map.Entry)) return false; Map.Entry e = (Map.Entry)o; return eq(key, e.getKey()) && eq(value, e.getValue()); } public int hashCode() { return ((key == null) ? 0 : key.hashCode()) ^ ((value == null) ? 0 : value.hashCode()); } public String toString() { return key + "=" + value; } static boolean eq(Object o1, Object o2) { return (o1 == null ? o2 == null : o1.equals(o2)); } } /* ---------------- Serialization Support -------------- */ /** * Save the state of the ConcurrentHashMap * instance to a stream (i.e., * serialize it). * @param s the stream * @serialData * the key (Object) and value (Object) * for each key-value mapping, followed by a null pair. * The key-value mappings are emitted in no particular order. */ private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); for (int k = 0; k < segments.length; ++k) { Segment seg = (Segment)segments[k]; seg.lock(); try { HashEntry[] tab = seg.table; for (int i = 0; i < tab.length; ++i) { for (HashEntry e = (HashEntry)tab[i]; e != null; e = e.next) { s.writeObject(e.key); s.writeObject(e.value); } } } finally { seg.unlock(); } } s.writeObject(null); s.writeObject(null); } /** * Reconstitute the ConcurrentHashMap * instance from a stream (i.e., * deserialize it). * @param s the stream */ private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Initialize each segment to be minimally sized, and let grow. for (int i = 0; i < segments.length; ++i) { segments[i].setTable(new HashEntry[1]); } // Read the keys and values, and put the mappings in the table for (;;) { K key = (K) s.readObject(); V value = (V) s.readObject(); if (key == null) break; put(key, value); } } }