ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
(Generate patch)

Comparing jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java (file contents):
Revision 1.33 by dl, Sat Dec 6 00:16:20 2003 UTC vs.
Revision 1.101 by jsr166, Thu Apr 14 01:17:58 2011 UTC

# Line 1 | Line 1
1   /*
2   * Written by Doug Lea with assistance from members of JCP JSR-166
3 < * Expert Group and released to the public domain. Use, modify, and
4 < * redistribute this code in any way without acknowledgement.
3 > * Expert Group and released to the public domain, as explained at
4 > * http://creativecommons.org/publicdomain/zero/1.0/
5   */
6  
7   package java.util.concurrent;
# Line 33 | Line 33 | import java.io.ObjectOutputStream;
33   * removal of only some entries.  Similarly, Iterators and
34   * Enumerations return elements reflecting the state of the hash table
35   * at some point at or since the creation of the iterator/enumeration.
36 < * They do <em>not</em> throw
37 < * {@link ConcurrentModificationException}.  However, iterators are
38 < * designed to be used by only one thread at a time.
36 > * They do <em>not</em> throw {@link ConcurrentModificationException}.
37 > * However, iterators are designed to be used by only one thread at a time.
38   *
39   * <p> The allowed concurrency among update operations is guided by
40   * the optional <tt>concurrencyLevel</tt> constructor argument
41 < * (default 16), which is used as a hint for internal sizing.  The
41 > * (default <tt>16</tt>), which is used as a hint for internal sizing.  The
42   * table is internally partitioned to try to permit the indicated
43   * number of concurrent updates without contention. Because placement
44   * in hash tables is essentially random, the actual concurrency will
# Line 49 | Line 48 | import java.io.ObjectOutputStream;
48   * and a significantly lower value can lead to thread contention. But
49   * overestimates and underestimates within an order of magnitude do
50   * not usually have much noticeable impact. A value of one is
51 < * appropriate when it is known that only one thread will modify
52 < * and all others will only read.
51 > * appropriate when it is known that only one thread will modify and
52 > * all others will only read. Also, resizing this or any other kind of
53 > * hash table is a relatively slow operation, so, when possible, it is
54 > * a good idea to provide estimates of expected table sizes in
55 > * constructors.
56   *
57 < * <p>This class implements all of the <em>optional</em> methods
58 < * of the {@link Map} and {@link Iterator} interfaces.
57 > * <p>This class and its views and iterators implement all of the
58 > * <em>optional</em> methods of the {@link Map} and {@link Iterator}
59 > * interfaces.
60   *
61 < * <p> Like {@link java.util.Hashtable} but unlike {@link
62 < * java.util.HashMap}, this class does NOT allow <tt>null</tt> to be
63 < * used as a key or value.
61 > * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
62 > * does <em>not</em> allow <tt>null</tt> to be used as a key or value.
63 > *
64 > * <p>This class is a member of the
65 > * <a href="{@docRoot}/../technotes/guides/collections/index.html">
66 > * Java Collections Framework</a>.
67   *
68   * @since 1.5
69   * @author Doug Lea
70   * @param <K> the type of keys maintained by this map
71 < * @param <V> the type of mapped values
71 > * @param <V> the type of mapped values
72   */
73   public class ConcurrentHashMap<K, V> extends AbstractMap<K, V>
74 <        implements ConcurrentMap<K, V>, Cloneable, Serializable {
74 >        implements ConcurrentMap<K, V>, Serializable {
75      private static final long serialVersionUID = 7249069246763182397L;
76  
77      /*
78       * The basic strategy is to subdivide the table among Segments,
79 <     * each of which itself is a concurrently readable hash table.
79 >     * each of which itself is a concurrently readable hash table.  To
80 >     * reduce footprint, all but one segments are constructed only
81 >     * when first needed (see ensureSegment). To maintain visibility
82 >     * in the presence of lazy construction, accesses to segments as
83 >     * well as elements of segment's table must use volatile access,
84 >     * which is done via Unsafe within methods segmentAt etc
85 >     * below. These provide the functionality of AtomicReferenceArrays
86 >     * but reduce the levels of indirection. Additionally,
87 >     * volatile-writes of table elements and entry "next" fields
88 >     * within locked operations use the cheaper "lazySet" forms of
89 >     * writes (via putOrderedObject) because these writes are always
90 >     * followed by lock releases that maintain sequential consistency
91 >     * of table updates.
92 >     *
93 >     * Historical note: The previous version of this class relied
94 >     * heavily on "final" fields, which avoided some volatile reads at
95 >     * the expense of a large initial footprint.  Some remnants of
96 >     * that design (including forced construction of segment 0) exist
97 >     * to ensure serialization compatibility.
98       */
99  
100      /* ---------------- Constants -------------- */
101  
102      /**
103 <     * The default initial number of table slots for this table.
104 <     * Used when not otherwise specified in constructor.
103 >     * The default initial capacity for this table,
104 >     * used when not otherwise specified in a constructor.
105 >     */
106 >    static final int DEFAULT_INITIAL_CAPACITY = 16;
107 >
108 >    /**
109 >     * The default load factor for this table, used when not
110 >     * otherwise specified in a constructor.
111       */
112 <    private static int DEFAULT_INITIAL_CAPACITY = 16;
112 >    static final float DEFAULT_LOAD_FACTOR = 0.75f;
113 >
114 >    /**
115 >     * The default concurrency level for this table, used when not
116 >     * otherwise specified in a constructor.
117 >     */
118 >    static final int DEFAULT_CONCURRENCY_LEVEL = 16;
119  
120      /**
121       * The maximum capacity, used if a higher value is implicitly
122       * specified by either of the constructors with arguments.  MUST
123 <     * be a power of two <= 1<<30 to ensure that entries are indexible
123 >     * be a power of two <= 1<<30 to ensure that entries are indexable
124       * using ints.
125       */
126 <    static final int MAXIMUM_CAPACITY = 1 << 30;
126 >    static final int MAXIMUM_CAPACITY = 1 << 30;
127  
128      /**
129 <     * The default load factor for this table.  Used when not
130 <     * otherwise specified in constructor.
129 >     * The minimum capacity for per-segment tables.  Must be a power
130 >     * of two, at least two to avoid immediate resizing on next use
131 >     * after lazy construction.
132       */
133 <    static final float DEFAULT_LOAD_FACTOR = 0.75f;
133 >    static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
134  
135      /**
136 <     * The default number of concurrency control segments.
137 <     **/
138 <    private static final int DEFAULT_SEGMENTS = 16;
136 >     * The maximum number of segments to allow; used to bound
137 >     * constructor arguments. Must be power of two less than 1 << 24.
138 >     */
139 >    static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
140  
141      /**
142 <     * The maximum number of segments to allow; used to bound ctor arguments.
142 >     * Number of unsynchronized retries in size and containsValue
143 >     * methods before resorting to locking. This is used to avoid
144 >     * unbounded retries if tables undergo continuous modification
145 >     * which would make it impossible to obtain an accurate result.
146       */
147 <    private static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
147 >    static final int RETRIES_BEFORE_LOCK = 2;
148  
149      /* ---------------- Fields -------------- */
150  
151      /**
152       * Mask value for indexing into segments. The upper bits of a
153       * key's hash code are used to choose the segment.
154 <     **/
155 <    private final int segmentMask;
154 >     */
155 >    final int segmentMask;
156  
157      /**
158       * Shift value for indexing within segments.
159 <     **/
160 <    private final int segmentShift;
159 >     */
160 >    final int segmentShift;
161  
162      /**
163 <     * The segments, each of which is a specialized hash table
163 >     * The segments, each of which is a specialized hash table.
164       */
165 <    private final Segment[] segments;
165 >    final Segment<K,V>[] segments;
166  
167 <    private transient Set<K> keySet;
168 <    private transient Set<Map.Entry<K,V>> entrySet;
169 <    private transient Collection<V> values;
167 >    transient Set<K> keySet;
168 >    transient Set<Map.Entry<K,V>> entrySet;
169 >    transient Collection<V> values;
170  
171 <    /* ---------------- Small Utilities -------------- */
171 >    /**
172 >     * ConcurrentHashMap list entry. Note that this is never exported
173 >     * out as a user-visible Map.Entry.
174 >     */
175 >    static final class HashEntry<K,V> {
176 >        final int hash;
177 >        final K key;
178 >        volatile V value;
179 >        volatile HashEntry<K,V> next;
180 >
181 >        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
182 >            this.hash = hash;
183 >            this.key = key;
184 >            this.value = value;
185 >            this.next = next;
186 >        }
187 >
188 >        /**
189 >         * Sets next field with volatile write semantics.  (See above
190 >         * about use of putOrderedObject.)
191 >         */
192 >        final void setNext(HashEntry<K,V> n) {
193 >            UNSAFE.putOrderedObject(this, nextOffset, n);
194 >        }
195 >
196 >        // Unsafe mechanics
197 >        static final sun.misc.Unsafe UNSAFE;
198 >        static final long nextOffset;
199 >        static {
200 >            try {
201 >                UNSAFE = sun.misc.Unsafe.getUnsafe();
202 >                Class k = HashEntry.class;
203 >                nextOffset = UNSAFE.objectFieldOffset
204 >                    (k.getDeclaredField("next"));
205 >            } catch (Exception e) {
206 >                throw new Error(e);
207 >            }
208 >        }
209 >    }
210  
211      /**
212 <     * Return a hash code for non-null Object x.
213 <     * Uses the same hash code spreader as most other j.u hash tables.
135 <     * @param x the object serving as a key
136 <     * @return the hash code
212 >     * Gets the ith element of given table (if nonnull) with volatile
213 >     * read semantics.
214       */
215 <    private static int hash(Object x) {
216 <        int h = x.hashCode();
217 <        h += ~(h << 9);
218 <        h ^=  (h >>> 14);
219 <        h +=  (h << 4);
143 <        h ^=  (h >>> 10);
144 <        return h;
215 >    @SuppressWarnings("unchecked")
216 >    static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
217 >        return (tab == null) ? null :
218 >            (HashEntry<K,V>) UNSAFE.getObjectVolatile
219 >            (tab, ((long)i << TSHIFT) + TBASE);
220      }
221  
222      /**
223 <     * Return the segment that should be used for key with given hash
223 >     * Sets the ith element of given table, with volatile write
224 >     * semantics. (See above about use of putOrderedObject.)
225       */
226 <    private Segment<K,V> segmentFor(int hash) {
227 <        return (Segment<K,V>) segments[(hash >>> segmentShift) & segmentMask];
226 >    static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
227 >                                       HashEntry<K,V> e) {
228 >        UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
229      }
230  
231 <    /* ---------------- Inner Classes -------------- */
231 >    /**
232 >     * Applies a supplemental hash function to a given hashCode, which
233 >     * defends against poor quality hash functions.  This is critical
234 >     * because ConcurrentHashMap uses power-of-two length hash tables,
235 >     * that otherwise encounter collisions for hashCodes that do not
236 >     * differ in lower or upper bits.
237 >     */
238 >    private static int hash(int h) {
239 >        // Spread bits to regularize both segment and index locations,
240 >        // using variant of single-word Wang/Jenkins hash.
241 >        h += (h <<  15) ^ 0xffffcd7d;
242 >        h ^= (h >>> 10);
243 >        h += (h <<   3);
244 >        h ^= (h >>>  6);
245 >        h += (h <<   2) + (h << 14);
246 >        return h ^ (h >>> 16);
247 >    }
248  
249      /**
250       * Segments are specialized versions of hash tables.  This
251       * subclasses from ReentrantLock opportunistically, just to
252       * simplify some locking and avoid separate construction.
253 <     **/
254 <    private static final class Segment<K,V> extends ReentrantLock implements Serializable {
253 >     */
254 >    static final class Segment<K,V> extends ReentrantLock implements Serializable {
255          /*
256 <         * Segments maintain a table of entry lists that are ALWAYS
257 <         * kept in a consistent state, so can be read without locking.
258 <         * Next fields of nodes are immutable (final).  All list
259 <         * additions are performed at the front of each bin. This
260 <         * makes it easy to check changes, and also fast to traverse.
261 <         * When nodes would otherwise be changed, new nodes are
169 <         * created to replace them. This works well for hash tables
170 <         * since the bin lists tend to be short. (The average length
171 <         * is less than two for the default load factor threshold.)
172 <         *
173 <         * Read operations can thus proceed without locking, but rely
174 <         * on a memory barrier to ensure that completed write
175 <         * operations performed by other threads are
176 <         * noticed. Conveniently, the "count" field, tracking the
177 <         * number of elements, can also serve as the volatile variable
178 <         * providing proper read/write barriers. This is convenient
179 <         * because this field needs to be read in many read operations
180 <         * anyway.
181 <         *
182 <         * Implementors note. The basic rules for all this are:
183 <         *
184 <         *   - All unsynchronized read operations must first read the
185 <         *     "count" field, and should not look at table entries if
186 <         *     it is 0.
256 >         * Segments maintain a table of entry lists that are always
257 >         * kept in a consistent state, so can be read (via volatile
258 >         * reads of segments and tables) without locking.  This
259 >         * requires replicating nodes when necessary during table
260 >         * resizing, so the old lists can be traversed by readers
261 >         * still using old version of table.
262           *
263 <         *   - All synchronized write operations should write to
264 <         *     the "count" field after updating. The operations must not
265 <         *     take any action that could even momentarily cause
266 <         *     a concurrent read operation to see inconsistent
267 <         *     data. This is made easier by the nature of the read
268 <         *     operations in Map. For example, no operation
269 <         *     can reveal that the table has grown but the threshold
270 <         *     has not yet been updated, so there are no atomicity
271 <         *     requirements for this with respect to reads.
272 <         *
273 <         * As a guide, all critical volatile reads and writes are marked
274 <         * in code comments.
263 >         * This class defines only mutative methods requiring locking.
264 >         * Except as noted, the methods of this class perform the
265 >         * per-segment versions of ConcurrentHashMap methods.  (Other
266 >         * methods are integrated directly into ConcurrentHashMap
267 >         * methods.) These mutative methods use a form of controlled
268 >         * spinning on contention via methods scanAndLock and
269 >         * scanAndLockForPut. These intersperse tryLocks with
270 >         * traversals to locate nodes.  The main benefit is to absorb
271 >         * cache misses (which are very common for hash tables) while
272 >         * obtaining locks so that traversal is faster once
273 >         * acquired. We do not actually use the found nodes since they
274 >         * must be re-acquired under lock anyway to ensure sequential
275 >         * consistency of updates (and in any case may be undetectably
276 >         * stale), but they will normally be much faster to re-locate.
277 >         * Also, scanAndLockForPut speculatively creates a fresh node
278 >         * to use in put if no node is found.
279           */
280  
281          private static final long serialVersionUID = 2249069246763182397L;
282  
283          /**
284 <         * The number of elements in this segment's region.
285 <         **/
286 <        transient volatile int count;
284 >         * The maximum number of times to tryLock in a prescan before
285 >         * possibly blocking on acquire in preparation for a locked
286 >         * segment operation. On multiprocessors, using a bounded
287 >         * number of retries maintains cache acquired while locating
288 >         * nodes.
289 >         */
290 >        static final int MAX_SCAN_RETRIES =
291 >            Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
292  
293          /**
294 <         * Number of updates; used for checking lack of modifications
295 <         * in bulk-read methods.
294 >         * The per-segment table. Elements are accessed via
295 >         * entryAt/setEntryAt providing volatile semantics.
296           */
297 <        transient int modCount;
297 >        transient volatile HashEntry<K,V>[] table;
298  
299          /**
300 <         * The table is rehashed when its size exceeds this threshold.
301 <         * (The value of this field is always (int)(capacity *
302 <         * loadFactor).)
300 >         * The number of elements. Accessed only either within locks
301 >         * or among other volatile reads that maintain visibility.
302 >         */
303 >        transient int count;
304 >
305 >        /**
306 >         * The total number of insertions and removals in this
307 >         * segment.  Even though this may overflows 32 bits, it
308 >         * provides sufficient accuracy for stability checks in CHM
309 >         * isEmpty() and size() methods.  Accessed only either within
310 >         * locks or among other volatile reads that maintain
311 >         * visibility.
312           */
313 <        private transient int threshold;
313 >        transient int modCount;
314  
315          /**
316 <         * The per-segment table
316 >         * The table is rehashed when its size exceeds this threshold.
317 >         * (The value of this field is always <tt>(int)(capacity *
318 >         * loadFactor)</tt>.)
319           */
320 <        transient HashEntry[] table;
320 >        transient int threshold;
321  
322          /**
323           * The load factor for the hash table.  Even though this value
# Line 230 | Line 325 | public class ConcurrentHashMap<K, V> ext
325           * links to outer object.
326           * @serial
327           */
328 <        private final float loadFactor;
234 <
235 <        Segment(int initialCapacity, float lf) {
236 <            loadFactor = lf;
237 <            setTable(new HashEntry[initialCapacity]);
238 <        }
239 <
240 <        /**
241 <         * Set table to new HashEntry array.
242 <         * Call only while holding lock or in constructor.
243 <         **/
244 <        private void setTable(HashEntry[] newTable) {
245 <            table = newTable;
246 <            threshold = (int)(newTable.length * loadFactor);
247 <            count = count; // write-volatile
248 <        }
249 <
250 <        /* Specialized implementations of map methods */
251 <
252 <        V get(Object key, int hash) {
253 <            if (count != 0) { // read-volatile
254 <                HashEntry[] tab = table;
255 <                int index = hash & (tab.length - 1);
256 <                HashEntry<K,V> e = (HashEntry<K,V>) tab[index];
257 <                while (e != null) {
258 <                    if (e.hash == hash && key.equals(e.key))
259 <                        return e.value;
260 <                    e = e.next;
261 <                }
262 <            }
263 <            return null;
264 <        }
265 <
266 <        boolean containsKey(Object key, int hash) {
267 <            if (count != 0) { // read-volatile
268 <                HashEntry[] tab = table;
269 <                int index = hash & (tab.length - 1);
270 <                HashEntry<K,V> e = (HashEntry<K,V>) tab[index];
271 <                while (e != null) {
272 <                    if (e.hash == hash && key.equals(e.key))
273 <                        return true;
274 <                    e = e.next;
275 <                }
276 <            }
277 <            return false;
278 <        }
328 >        final float loadFactor;
329  
330 <        boolean containsValue(Object value) {
331 <            if (count != 0) { // read-volatile
332 <                HashEntry[] tab = table;
333 <                int len = tab.length;
284 <                for (int i = 0 ; i < len; i++)
285 <                    for (HashEntry<K,V> e = (HashEntry<K,V>)tab[i] ; e != null ; e = e.next)
286 <                        if (value.equals(e.value))
287 <                            return true;
288 <            }
289 <            return false;
330 >        Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
331 >            this.loadFactor = lf;
332 >            this.threshold = threshold;
333 >            this.table = tab;
334          }
335  
336 <        boolean replace(K key, int hash, V oldValue, V newValue) {
337 <            lock();
336 >        final V put(K key, int hash, V value, boolean onlyIfAbsent) {
337 >            HashEntry<K,V> node = tryLock() ? null :
338 >                scanAndLockForPut(key, hash, value);
339 >            V oldValue;
340              try {
341 <                int c = count;
342 <                HashEntry[] tab = table;
343 <                int index = hash & (tab.length - 1);
344 <                HashEntry<K,V> first = (HashEntry<K,V>) tab[index];
345 <                HashEntry<K,V> e = first;
346 <                for (;;) {
347 <                    if (e == null)
348 <                        return false;
349 <                    if (e.hash == hash && key.equals(e.key))
350 <                        break;
351 <                    e = e.next;
352 <                }
353 <
354 <                V v = e.value;
355 <                if (v == null || !oldValue.equals(v))
356 <                    return false;
357 <
358 <                e.value = newValue;
359 <                count = c; // write-volatile
360 <                return true;
361 <                
362 <            } finally {
363 <                unlock();
364 <            }
365 <        }
366 <
321 <        V replace(K key, int hash, V newValue) {
322 <            lock();
323 <            try {
324 <                int c = count;
325 <                HashEntry[] tab = table;
326 <                int index = hash & (tab.length - 1);
327 <                HashEntry<K,V> first = (HashEntry<K,V>) tab[index];
328 <                HashEntry<K,V> e = first;
329 <                for (;;) {
330 <                    if (e == null)
331 <                        return null;
332 <                    if (e.hash == hash && key.equals(e.key))
333 <                        break;
334 <                    e = e.next;
335 <                }
336 <
337 <                V v = e.value;
338 <                e.value = newValue;
339 <                count = c; // write-volatile
340 <                return v;
341 <                
342 <            } finally {
343 <                unlock();
344 <            }
345 <        }
346 <
347 <
348 <        V put(K key, int hash, V value, boolean onlyIfAbsent) {
349 <            lock();
350 <            try {
351 <                int c = count;
352 <                HashEntry[] tab = table;
353 <                int index = hash & (tab.length - 1);
354 <                HashEntry<K,V> first = (HashEntry<K,V>) tab[index];
355 <
356 <                for (HashEntry<K,V> e = first; e != null; e = (HashEntry<K,V>) e.next) {
357 <                    if (e.hash == hash && key.equals(e.key)) {
358 <                        V oldValue = e.value;
359 <                        if (!onlyIfAbsent)
360 <                            e.value = value;
341 >                HashEntry<K,V>[] tab = table;
342 >                int index = (tab.length - 1) & hash;
343 >                HashEntry<K,V> first = entryAt(tab, index);
344 >                for (HashEntry<K,V> e = first;;) {
345 >                    if (e != null) {
346 >                        K k;
347 >                        if ((k = e.key) == key ||
348 >                            (e.hash == hash && key.equals(k))) {
349 >                            oldValue = e.value;
350 >                            if (!onlyIfAbsent)
351 >                                e.value = value;
352 >                            break;
353 >                        }
354 >                        e = e.next;
355 >                    }
356 >                    else {
357 >                        if (node != null)
358 >                            node.setNext(first);
359 >                        else
360 >                            node = new HashEntry<K,V>(hash, key, value, first);
361 >                        int c = count + 1;
362 >                        if (c > threshold && first != null &&
363 >                            tab.length < MAXIMUM_CAPACITY)
364 >                            rehash(node);
365 >                        else
366 >                            setEntryAt(tab, index, node);
367                          ++modCount;
368 <                        count = c; // write-volatile
369 <                        return oldValue;
368 >                        count = c;
369 >                        oldValue = null;
370 >                        break;
371                      }
372                  }
366
367                tab[index] = new HashEntry<K,V>(hash, key, value, first);
368                ++modCount;
369                ++c;
370                count = c; // write-volatile
371                if (c > threshold)
372                    setTable(rehash(tab));
373                return null;
373              } finally {
374                  unlock();
375              }
376 +            return oldValue;
377          }
378  
379 <        private HashEntry[] rehash(HashEntry[] oldTable) {
380 <            int oldCapacity = oldTable.length;
381 <            if (oldCapacity >= MAXIMUM_CAPACITY)
382 <                return oldTable;
383 <
379 >        /**
380 >         * Doubles size of table and repacks entries, also adding the
381 >         * given node to new table
382 >         */
383 >        @SuppressWarnings("unchecked")
384 >        private void rehash(HashEntry<K,V> node) {
385              /*
386 <             * Reclassify nodes in each list to new Map.  Because we are
387 <             * using power-of-two expansion, the elements from each bin
388 <             * must either stay at same index, or move with a power of two
389 <             * offset. We eliminate unnecessary node creation by catching
390 <             * cases where old nodes can be reused because their next
391 <             * fields won't change. Statistically, at the default
392 <             * threshold, only about one-sixth of them need cloning when
393 <             * a table doubles. The nodes they replace will be garbage
394 <             * collectable as soon as they are no longer referenced by any
395 <             * reader thread that may be in the midst of traversing table
396 <             * right now.
386 >             * Reclassify nodes in each list to new table.  Because we
387 >             * are using power-of-two expansion, the elements from
388 >             * each bin must either stay at same index, or move with a
389 >             * power of two offset. We eliminate unnecessary node
390 >             * creation by catching cases where old nodes can be
391 >             * reused because their next fields won't change.
392 >             * Statistically, at the default threshold, only about
393 >             * one-sixth of them need cloning when a table
394 >             * doubles. The nodes they replace will be garbage
395 >             * collectable as soon as they are no longer referenced by
396 >             * any reader thread that may be in the midst of
397 >             * concurrently traversing table. Entry accesses use plain
398 >             * array indexing because they are followed by volatile
399 >             * table write.
400               */
401 <
402 <            HashEntry[] newTable = new HashEntry[oldCapacity << 1];
403 <            int sizeMask = newTable.length - 1;
401 >            HashEntry<K,V>[] oldTable = table;
402 >            int oldCapacity = oldTable.length;
403 >            int newCapacity = oldCapacity << 1;
404 >            threshold = (int)(newCapacity * loadFactor);
405 >            HashEntry<K,V>[] newTable =
406 >                (HashEntry<K,V>[]) new HashEntry[newCapacity];
407 >            int sizeMask = newCapacity - 1;
408              for (int i = 0; i < oldCapacity ; i++) {
409 <                // We need to guarantee that any existing reads of old Map can
402 <                //  proceed. So we cannot yet null out each bin.
403 <                HashEntry<K,V> e = (HashEntry<K,V>)oldTable[i];
404 <
409 >                HashEntry<K,V> e = oldTable[i];
410                  if (e != null) {
411                      HashEntry<K,V> next = e.next;
412                      int idx = e.hash & sizeMask;
413 <
409 <                    //  Single node on list
410 <                    if (next == null)
413 >                    if (next == null)   //  Single node on list
414                          newTable[idx] = e;
415 <
413 <                    else {
414 <                        // Reuse trailing consecutive sequence at same slot
415 >                    else { // Reuse consecutive sequence at same slot
416                          HashEntry<K,V> lastRun = e;
417                          int lastIdx = idx;
418                          for (HashEntry<K,V> last = next;
# Line 424 | Line 425 | public class ConcurrentHashMap<K, V> ext
425                              }
426                          }
427                          newTable[lastIdx] = lastRun;
428 <
428 <                        // Clone all remaining nodes
428 >                        // Clone remaining nodes
429                          for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
430 <                            int k = p.hash & sizeMask;
431 <                            newTable[k] = new HashEntry<K,V>(p.hash,
432 <                                                             p.key,
433 <                                                             p.value,
434 <                                                             (HashEntry<K,V>) newTable[k]);
430 >                            V v = p.value;
431 >                            int h = p.hash;
432 >                            int k = h & sizeMask;
433 >                            HashEntry<K,V> n = newTable[k];
434 >                            newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
435                          }
436                      }
437                  }
438              }
439 <            return newTable;
439 >            int nodeIndex = node.hash & sizeMask; // add the new node
440 >            node.setNext(newTable[nodeIndex]);
441 >            newTable[nodeIndex] = node;
442 >            table = newTable;
443 >        }
444 >
445 >        /**
446 >         * Scans for a node containing given key while trying to
447 >         * acquire lock, creating and returning one if not found. Upon
448 >         * return, guarantees that lock is held.
449 >         *
450 >         * @return a new node if key not found, else null
451 >         */
452 >        private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
453 >            HashEntry<K,V> first = entryForHash(this, hash);
454 >            HashEntry<K,V> e = first;
455 >            HashEntry<K,V> node = null;
456 >            int retries = -1; // negative while locating node
457 >            while (!tryLock()) {
458 >                HashEntry<K,V> f; // to recheck first below
459 >                if (retries < 0) {
460 >                    if (e == null) {
461 >                        if (node == null) // speculatively create node
462 >                            node = new HashEntry<K,V>(hash, key, value, null);
463 >                        retries = 0;
464 >                    }
465 >                    else if (key.equals(e.key))
466 >                        retries = 0;
467 >                    else
468 >                        e = e.next;
469 >                }
470 >                else if (++retries > MAX_SCAN_RETRIES) {
471 >                    lock();
472 >                    break;
473 >                }
474 >                else if ((retries & 1) == 0 &&
475 >                         (f = entryForHash(this, hash)) != first) {
476 >                    e = first = f; // re-traverse if entry changed
477 >                    retries = -1;
478 >                }
479 >            }
480 >            return node;
481 >        }
482 >
483 >        /**
484 >         * Scans for a node containing the given key while trying to
485 >         * acquire lock for a remove or replace operation. Upon
486 >         * return, guarantees that lock is held.  Note that we must
487 >         * lock even if the key is not found, to ensure sequential
488 >         * consistency of updates.
489 >         */
490 >        private void scanAndLock(Object key, int hash) {
491 >            // similar to but simpler than scanAndLockForPut
492 >            HashEntry<K,V> first = entryForHash(this, hash);
493 >            HashEntry<K,V> e = first;
494 >            int retries = -1;
495 >            while (!tryLock()) {
496 >                HashEntry<K,V> f;
497 >                if (retries < 0) {
498 >                    if (e == null || key.equals(e.key))
499 >                        retries = 0;
500 >                    else
501 >                        e = e.next;
502 >                }
503 >                else if (++retries > MAX_SCAN_RETRIES) {
504 >                    lock();
505 >                    break;
506 >                }
507 >                else if ((retries & 1) == 0 &&
508 >                         (f = entryForHash(this, hash)) != first) {
509 >                    e = first = f;
510 >                    retries = -1;
511 >                }
512 >            }
513          }
514  
515          /**
516           * Remove; match on key only if value null, else match both.
517           */
518 <        V remove(Object key, int hash, Object value) {
519 <            lock();
518 >        final V remove(Object key, int hash, Object value) {
519 >            if (!tryLock())
520 >                scanAndLock(key, hash);
521 >            V oldValue = null;
522 >            try {
523 >                HashEntry<K,V>[] tab = table;
524 >                int index = (tab.length - 1) & hash;
525 >                HashEntry<K,V> e = entryAt(tab, index);
526 >                HashEntry<K,V> pred = null;
527 >                while (e != null) {
528 >                    K k;
529 >                    HashEntry<K,V> next = e.next;
530 >                    if ((k = e.key) == key ||
531 >                        (e.hash == hash && key.equals(k))) {
532 >                        V v = e.value;
533 >                        if (value == null || value == v || value.equals(v)) {
534 >                            if (pred == null)
535 >                                setEntryAt(tab, index, next);
536 >                            else
537 >                                pred.setNext(next);
538 >                            ++modCount;
539 >                            --count;
540 >                            oldValue = v;
541 >                        }
542 >                        break;
543 >                    }
544 >                    pred = e;
545 >                    e = next;
546 >                }
547 >            } finally {
548 >                unlock();
549 >            }
550 >            return oldValue;
551 >        }
552 >
553 >        final boolean replace(K key, int hash, V oldValue, V newValue) {
554 >            if (!tryLock())
555 >                scanAndLock(key, hash);
556 >            boolean replaced = false;
557              try {
558 <                int c = count;
559 <                HashEntry[] tab = table;
560 <                int index = hash & (tab.length - 1);
561 <                HashEntry<K,V> first = (HashEntry<K,V>)tab[index];
562 <
563 <                HashEntry<K,V> e = first;
564 <                for (;;) {
565 <                    if (e == null)
566 <                        return null;
457 <                    if (e.hash == hash && key.equals(e.key))
558 >                HashEntry<K,V> e;
559 >                for (e = entryForHash(this, hash); e != null; e = e.next) {
560 >                    K k;
561 >                    if ((k = e.key) == key ||
562 >                        (e.hash == hash && key.equals(k))) {
563 >                        if (oldValue.equals(e.value)) {
564 >                            e.value = newValue;
565 >                            replaced = true;
566 >                        }
567                          break;
568 <                    e = e.next;
568 >                    }
569                  }
570 +            } finally {
571 +                unlock();
572 +            }
573 +            return replaced;
574 +        }
575  
576 <                V oldValue = e.value;
577 <                if (value != null && !value.equals(oldValue))
578 <                    return null;
579 <
580 <                // All entries following removed node can stay in list, but
581 <                // all preceding ones need to be cloned.
582 <                HashEntry<K,V> newFirst = e.next;
583 <                for (HashEntry<K,V> p = first; p != e; p = p.next)
584 <                    newFirst = new HashEntry<K,V>(p.hash, p.key,
585 <                                                  p.value, newFirst);
586 <                tab[index] = newFirst;
587 <                ++modCount;
588 <                count = c-1; // write-volatile
589 <                return oldValue;
576 >        final V replace(K key, int hash, V value) {
577 >            if (!tryLock())
578 >                scanAndLock(key, hash);
579 >            V oldValue = null;
580 >            try {
581 >                HashEntry<K,V> e;
582 >                for (e = entryForHash(this, hash); e != null; e = e.next) {
583 >                    K k;
584 >                    if ((k = e.key) == key ||
585 >                        (e.hash == hash && key.equals(k))) {
586 >                        oldValue = e.value;
587 >                        e.value = value;
588 >                        break;
589 >                    }
590 >                }
591              } finally {
592                  unlock();
593              }
594 +            return oldValue;
595          }
596  
597 <        void clear() {
597 >        final void clear() {
598              lock();
599              try {
600 <                HashEntry[] tab = table;
600 >                HashEntry<K,V>[] tab = table;
601                  for (int i = 0; i < tab.length ; i++)
602 <                    tab[i] = null;
602 >                    setEntryAt(tab, i, null);
603                  ++modCount;
604 <                count = 0; // write-volatile
604 >                count = 0;
605              } finally {
606                  unlock();
607              }
608          }
609      }
610  
611 +    // Accessing segments
612 +
613      /**
614 <     * ConcurrentHashMap list entry. Note that this is never exported
615 <     * out as a user-visible Map.Entry
614 >     * Gets the jth element of given segment array (if nonnull) with
615 >     * volatile element access semantics via Unsafe.
616       */
617 <    private static class HashEntry<K,V> {
618 <        private final K key;
619 <        private V value;
620 <        private final int hash;
621 <        private final HashEntry<K,V> next;
617 >    @SuppressWarnings("unchecked")
618 >    static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
619 >        long u = (j << SSHIFT) + SBASE;
620 >        return ss == null ? null :
621 >            (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
622 >    }
623  
624 <        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
625 <            this.value = value;
626 <            this.hash = hash;
627 <            this.key = key;
628 <            this.next = next;
624 >    /**
625 >     * Returns the segment for the given index, creating it and
626 >     * recording in segment table (via CAS) if not already present.
627 >     *
628 >     * @param k the index
629 >     * @return the segment
630 >     */
631 >    @SuppressWarnings("unchecked")
632 >    private Segment<K,V> ensureSegment(int k) {
633 >        final Segment<K,V>[] ss = this.segments;
634 >        long u = (k << SSHIFT) + SBASE; // raw offset
635 >        Segment<K,V> seg;
636 >        if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
637 >            Segment<K,V> proto = ss[0]; // use segment 0 as prototype
638 >            int cap = proto.table.length;
639 >            float lf = proto.loadFactor;
640 >            int threshold = (int)(cap * lf);
641 >            HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
642 >            if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
643 >                == null) { // recheck
644 >                Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
645 >                while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
646 >                       == null) {
647 >                    if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
648 >                        break;
649 >                }
650 >            }
651          }
652 +        return seg;
653      }
654  
655 +    // Hash-based segment and entry accesses
656 +
657 +    /**
658 +     * Get the segment for the given hash
659 +     */
660 +    @SuppressWarnings("unchecked")
661 +    private Segment<K,V> segmentForHash(int h) {
662 +        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
663 +        return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u);
664 +    }
665 +
666 +    /**
667 +     * Gets the table entry for the given segment and hash
668 +     */
669 +    @SuppressWarnings("unchecked")
670 +    static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
671 +        HashEntry<K,V>[] tab;
672 +        return (seg == null || (tab = seg.table) == null) ? null :
673 +            (HashEntry<K,V>) UNSAFE.getObjectVolatile
674 +            (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
675 +    }
676  
677      /* ---------------- Public operations -------------- */
678  
679      /**
680 <     * Constructs a new, empty map with the specified initial
681 <     * capacity and the specified load factor.
680 >     * Creates a new, empty map with the specified initial
681 >     * capacity, load factor and concurrency level.
682       *
683       * @param initialCapacity the initial capacity. The implementation
684       * performs internal sizing to accommodate this many elements.
685       * @param loadFactor  the load factor threshold, used to control resizing.
686 +     * Resizing may be performed when the average number of elements per
687 +     * bin exceeds this threshold.
688       * @param concurrencyLevel the estimated number of concurrently
689       * updating threads. The implementation performs internal sizing
690 <     * to try to accommodate this many threads.  
690 >     * to try to accommodate this many threads.
691       * @throws IllegalArgumentException if the initial capacity is
692       * negative or the load factor or concurrencyLevel are
693       * nonpositive.
694       */
695 +    @SuppressWarnings("unchecked")
696      public ConcurrentHashMap(int initialCapacity,
697                               float loadFactor, int concurrencyLevel) {
698          if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
699              throw new IllegalArgumentException();
534
700          if (concurrencyLevel > MAX_SEGMENTS)
701              concurrencyLevel = MAX_SEGMENTS;
537
702          // Find power-of-two sizes best matching arguments
703          int sshift = 0;
704          int ssize = 1;
# Line 542 | Line 706 | public class ConcurrentHashMap<K, V> ext
706              ++sshift;
707              ssize <<= 1;
708          }
709 <        segmentShift = 32 - sshift;
710 <        segmentMask = ssize - 1;
547 <        this.segments = new Segment[ssize];
548 <
709 >        this.segmentShift = 32 - sshift;
710 >        this.segmentMask = ssize - 1;
711          if (initialCapacity > MAXIMUM_CAPACITY)
712              initialCapacity = MAXIMUM_CAPACITY;
713          int c = initialCapacity / ssize;
714          if (c * ssize < initialCapacity)
715              ++c;
716 <        int cap = 1;
716 >        int cap = MIN_SEGMENT_TABLE_CAPACITY;
717          while (cap < c)
718              cap <<= 1;
719 <
720 <        for (int i = 0; i < this.segments.length; ++i)
721 <            this.segments[i] = new Segment<K,V>(cap, loadFactor);
719 >        // create segments and segments[0]
720 >        Segment<K,V> s0 =
721 >            new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
722 >                             (HashEntry<K,V>[])new HashEntry[cap]);
723 >        Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
724 >        UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
725 >        this.segments = ss;
726      }
727  
728      /**
729 <     * Constructs a new, empty map with the specified initial
730 <     * capacity,  and with default load factor and concurrencyLevel.
729 >     * Creates a new, empty map with the specified initial capacity
730 >     * and load factor and with the default concurrencyLevel (16).
731       *
732       * @param initialCapacity The implementation performs internal
733       * sizing to accommodate this many elements.
734 +     * @param loadFactor  the load factor threshold, used to control resizing.
735 +     * Resizing may be performed when the average number of elements per
736 +     * bin exceeds this threshold.
737 +     * @throws IllegalArgumentException if the initial capacity of
738 +     * elements is negative or the load factor is nonpositive
739 +     *
740 +     * @since 1.6
741 +     */
742 +    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
743 +        this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
744 +    }
745 +
746 +    /**
747 +     * Creates a new, empty map with the specified initial capacity,
748 +     * and with default load factor (0.75) and concurrencyLevel (16).
749 +     *
750 +     * @param initialCapacity the initial capacity. The implementation
751 +     * performs internal sizing to accommodate this many elements.
752       * @throws IllegalArgumentException if the initial capacity of
753       * elements is negative.
754       */
755      public ConcurrentHashMap(int initialCapacity) {
756 <        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
756 >        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
757      }
758  
759      /**
760 <     * Constructs a new, empty map with a default initial capacity,
761 <     * load factor, and concurrencyLevel.
760 >     * Creates a new, empty map with a default initial capacity (16),
761 >     * load factor (0.75) and concurrencyLevel (16).
762       */
763      public ConcurrentHashMap() {
764 <        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
764 >        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
765      }
766  
767      /**
768 <     * Constructs a new map with the same mappings as the given map.  The
769 <     * map is created with a capacity of twice the number of mappings in
770 <     * the given map or 11 (whichever is greater), and a default load factor.
768 >     * Creates a new map with the same mappings as the given map.
769 >     * The map is created with a capacity of 1.5 times the number
770 >     * of mappings in the given map or 16 (whichever is greater),
771 >     * and a default load factor (0.75) and concurrencyLevel (16).
772 >     *
773 >     * @param m the map
774       */
775 <    public <A extends K, B extends V> ConcurrentHashMap(Map<A,B> t) {
776 <        this(Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1,
777 <                      11),
778 <             DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
779 <        putAll(t);
775 >    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
776 >        this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
777 >                      DEFAULT_INITIAL_CAPACITY),
778 >             DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
779 >        putAll(m);
780      }
781  
782 <    // inherit Map javadoc
782 >    /**
783 >     * Returns <tt>true</tt> if this map contains no key-value mappings.
784 >     *
785 >     * @return <tt>true</tt> if this map contains no key-value mappings
786 >     */
787      public boolean isEmpty() {
788          /*
789 <         * We need to keep track of per-segment modCounts to avoid ABA
790 <         * problems in which an element in one segment was added and
791 <         * in another removed during traversal, in which case the
792 <         * table was never actually empty at any point. Note the
793 <         * similar use of modCounts in the size() and containsValue()
794 <         * methods, which are the only other methods also susceptible
795 <         * to ABA problems.
789 >         * Sum per-segment modCounts to avoid mis-reporting when
790 >         * elements are concurrently added and removed in one segment
791 >         * while checking another, in which case the table was never
792 >         * actually empty at any point. (The sum ensures accuracy up
793 >         * through at least 1<<31 per-segment modifications before
794 >         * recheck.)  Methods size() and containsValue() use similar
795 >         * constructions for stability checks.
796           */
797 <        int[] mc = new int[segments.length];
798 <        int mcsum = 0;
799 <        for (int i = 0; i < segments.length; ++i) {
800 <            if (segments[i].count != 0)
801 <                return false;
802 <            else
612 <                mcsum += mc[i] = segments[i].modCount;
613 <        }
614 <        // If mcsum happens to be zero, then we know we got a snapshot
615 <        // before any modifications at all were made.  This is
616 <        // probably common enough to bother tracking.
617 <        if (mcsum != 0) {
618 <            for (int i = 0; i < segments.length; ++i) {
619 <                if (segments[i].count != 0 ||
620 <                    mc[i] != segments[i].modCount)
797 >        long sum = 0L;
798 >        final Segment<K,V>[] segments = this.segments;
799 >        for (int j = 0; j < segments.length; ++j) {
800 >            Segment<K,V> seg = segmentAt(segments, j);
801 >            if (seg != null) {
802 >                if (seg.count != 0)
803                      return false;
804 +                sum += seg.modCount;
805 +            }
806 +        }
807 +        if (sum != 0L) { // recheck unless no modifications
808 +            for (int j = 0; j < segments.length; ++j) {
809 +                Segment<K,V> seg = segmentAt(segments, j);
810 +                if (seg != null) {
811 +                    if (seg.count != 0)
812 +                        return false;
813 +                    sum -= seg.modCount;
814 +                }
815              }
816 +            if (sum != 0L)
817 +                return false;
818          }
819          return true;
820      }
821  
822 <    // inherit Map javadoc
822 >    /**
823 >     * Returns the number of key-value mappings in this map.  If the
824 >     * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
825 >     * <tt>Integer.MAX_VALUE</tt>.
826 >     *
827 >     * @return the number of key-value mappings in this map
828 >     */
829      public int size() {
830 <        int[] mc = new int[segments.length];
831 <        for (;;) {
832 <            long sum = 0;
833 <            int mcsum = 0;
834 <            for (int i = 0; i < segments.length; ++i) {
835 <                sum += segments[i].count;
836 <                mcsum += mc[i] = segments[i].modCount;
837 <            }
838 <            int check = 0;
839 <            if (mcsum != 0) {
840 <                for (int i = 0; i < segments.length; ++i) {
841 <                    check += segments[i].count;
842 <                    if (mc[i] != segments[i].modCount) {
843 <                        check = -1; // force retry
844 <                        break;
830 >        // Try a few times to get accurate count. On failure due to
831 >        // continuous async changes in table, resort to locking.
832 >        final Segment<K,V>[] segments = this.segments;
833 >        int size;
834 >        boolean overflow; // true if size overflows 32 bits
835 >        long sum;         // sum of modCounts
836 >        long last = 0L;   // previous sum
837 >        int retries = -1; // first iteration isn't retry
838 >        try {
839 >            for (;;) {
840 >                if (retries++ == RETRIES_BEFORE_LOCK) {
841 >                    for (int j = 0; j < segments.length; ++j)
842 >                        ensureSegment(j).lock(); // force creation
843 >                }
844 >                sum = 0L;
845 >                size = 0;
846 >                overflow = false;
847 >                for (int j = 0; j < segments.length; ++j) {
848 >                    Segment<K,V> seg = segmentAt(segments, j);
849 >                    if (seg != null) {
850 >                        sum += seg.modCount;
851 >                        int c = seg.count;
852 >                        if (c < 0 || (size += c) < 0)
853 >                            overflow = true;
854                      }
855                  }
856 +                if (sum == last)
857 +                    break;
858 +                last = sum;
859              }
860 <            if (check == sum) {
861 <                if (sum > Integer.MAX_VALUE)
862 <                    return Integer.MAX_VALUE;
863 <                else
651 <                    return (int)sum;
860 >        } finally {
861 >            if (retries > RETRIES_BEFORE_LOCK) {
862 >                for (int j = 0; j < segments.length; ++j)
863 >                    segmentAt(segments, j).unlock();
864              }
865          }
866 +        return overflow ? Integer.MAX_VALUE : size;
867      }
868  
656
869      /**
870 <     * Returns the value to which the specified key is mapped in this table.
870 >     * Returns the value to which the specified key is mapped,
871 >     * or {@code null} if this map contains no mapping for the key.
872 >     *
873 >     * <p>More formally, if this map contains a mapping from a key
874 >     * {@code k} to a value {@code v} such that {@code key.equals(k)},
875 >     * then this method returns {@code v}; otherwise it returns
876 >     * {@code null}.  (There can be at most one such mapping.)
877       *
878 <     * @param   key   a key in the table.
661 <     * @return  the value to which the key is mapped in this table;
662 <     *          <tt>null</tt> if the key is not mapped to any value in
663 <     *          this table.
664 <     * @throws  NullPointerException  if the key is
665 <     *               <tt>null</tt>.
878 >     * @throws NullPointerException if the specified key is null
879       */
880      public V get(Object key) {
881 <        int hash = hash(key); // throws NullPointerException if key null
882 <        return segmentFor(hash).get(key, hash);
881 >        int hash = hash(key.hashCode());
882 >        for (HashEntry<K,V> e = entryForHash(segmentForHash(hash), hash);
883 >             e != null; e = e.next) {
884 >            K k;
885 >            if ((k = e.key) == key || (e.hash == hash && key.equals(k)))
886 >                return e.value;
887 >        }
888 >        return null;
889      }
890  
891      /**
892       * Tests if the specified object is a key in this table.
893       *
894 <     * @param   key   possible key.
895 <     * @return  <tt>true</tt> if and only if the specified object
896 <     *          is a key in this table, as determined by the
897 <     *          <tt>equals</tt> method; <tt>false</tt> otherwise.
898 <     * @throws  NullPointerException  if the key is
680 <     *               <tt>null</tt>.
894 >     * @param  key   possible key
895 >     * @return <tt>true</tt> if and only if the specified object
896 >     *         is a key in this table, as determined by the
897 >     *         <tt>equals</tt> method; <tt>false</tt> otherwise.
898 >     * @throws NullPointerException if the specified key is null
899       */
900      public boolean containsKey(Object key) {
901 <        int hash = hash(key); // throws NullPointerException if key null
902 <        return segmentFor(hash).containsKey(key, hash);
901 >        int hash = hash(key.hashCode());
902 >        for (HashEntry<K,V> e = entryForHash(segmentForHash(hash), hash);
903 >             e != null; e = e.next) {
904 >            K k;
905 >            if ((k = e.key) == key || (e.hash == hash && key.equals(k)))
906 >                return true;
907 >        }
908 >        return false;
909      }
910  
911      /**
# Line 690 | Line 914 | public class ConcurrentHashMap<K, V> ext
914       * traversal of the hash table, and so is much slower than
915       * method <tt>containsKey</tt>.
916       *
917 <     * @param value value whose presence in this map is to be tested.
917 >     * @param value value whose presence in this map is to be tested
918       * @return <tt>true</tt> if this map maps one or more keys to the
919 <     * specified value.
920 <     * @throws  NullPointerException  if the value is <tt>null</tt>.
919 >     *         specified value
920 >     * @throws NullPointerException if the specified value is null
921       */
922      public boolean containsValue(Object value) {
923 +        // Same idea as size() but using hashes as checksum
924          if (value == null)
925              throw new NullPointerException();
926 <
927 <        int[] mc = new int[segments.length];
928 <        for (;;) {
929 <            int sum = 0;
930 <            int mcsum = 0;
931 <            for (int i = 0; i < segments.length; ++i) {
932 <                int c = segments[i].count;
933 <                mcsum += mc[i] = segments[i].modCount;
934 <                if (segments[i].containsValue(value))
935 <                    return true;
936 <            }
937 <            boolean cleanSweep = true;
938 <            if (mcsum != 0) {
939 <                for (int i = 0; i < segments.length; ++i) {
940 <                    int c = segments[i].count;
941 <                    if (mc[i] != segments[i].modCount) {
942 <                        cleanSweep = false;
943 <                        break;
926 >        final Segment<K,V>[] segments = this.segments;
927 >        boolean found = false;
928 >        long last = 0L;
929 >        int retries = -1;
930 >        try {
931 >            outer: for (;;) {
932 >                if (retries++ == RETRIES_BEFORE_LOCK) {
933 >                    for (int j = 0; j < segments.length; ++j)
934 >                        ensureSegment(j).lock(); // force creation
935 >                }
936 >                long sum = 0L;
937 >                for (int j = 0; j < segments.length; ++j) {
938 >                    HashEntry<K,V>[] tab;
939 >                    Segment<K,V> seg = segmentAt(segments, j);
940 >                    if (seg != null && (tab = seg.table) != null) {
941 >                        for (int i = 0 ; i < tab.length; i++) {
942 >                            HashEntry<K,V> e;
943 >                            for (e = entryAt(tab, i); e != null; e = e.next) {
944 >                                V v = e.value;
945 >                                if (v != null && value.equals(v)) {
946 >                                    found = true;
947 >                                    break outer;
948 >                                }
949 >                                sum += e.hash;
950 >                            }
951 >                        }
952                      }
953                  }
954 +                if (sum == last)
955 +                    break;
956 +                last = sum;
957 +            }
958 +        } finally {
959 +            if (retries > RETRIES_BEFORE_LOCK) {
960 +                for (int j = 0; j < segments.length; ++j)
961 +                    segmentAt(segments, j).unlock();
962              }
722            if (cleanSweep)
723                return false;
963          }
964 +        return found;
965      }
966  
967      /**
968       * Legacy method testing if some key maps into the specified value
969       * in this table.  This method is identical in functionality to
970 <     * {@link #containsValue}, and  exists solely to ensure
970 >     * {@link #containsValue}, and exists solely to ensure
971       * full compatibility with class {@link java.util.Hashtable},
972       * which supported this method prior to introduction of the
973       * Java Collections framework.
974  
975 <     * @param      value   a value to search for.
976 <     * @return     <tt>true</tt> if and only if some key maps to the
977 <     *             <tt>value</tt> argument in this table as
978 <     *             determined by the <tt>equals</tt> method;
979 <     *             <tt>false</tt> otherwise.
980 <     * @throws  NullPointerException  if the value is <tt>null</tt>.
975 >     * @param  value a value to search for
976 >     * @return <tt>true</tt> if and only if some key maps to the
977 >     *         <tt>value</tt> argument in this table as
978 >     *         determined by the <tt>equals</tt> method;
979 >     *         <tt>false</tt> otherwise
980 >     * @throws NullPointerException if the specified value is null
981       */
982      public boolean contains(Object value) {
983          return containsValue(value);
984      }
985  
986      /**
987 <     * Maps the specified <tt>key</tt> to the specified
988 <     * <tt>value</tt> in this table. Neither the key nor the
749 <     * value can be <tt>null</tt>. <p>
987 >     * Maps the specified key to the specified value in this table.
988 >     * Neither the key nor the value can be null.
989       *
990 <     * The value can be retrieved by calling the <tt>get</tt> method
990 >     * <p> The value can be retrieved by calling the <tt>get</tt> method
991       * with a key that is equal to the original key.
992       *
993 <     * @param      key     the table key.
994 <     * @param      value   the value.
995 <     * @return     the previous value of the specified key in this table,
996 <     *             or <tt>null</tt> if it did not have one.
997 <     * @throws  NullPointerException  if the key or value is
759 <     *               <tt>null</tt>.
993 >     * @param key key with which the specified value is to be associated
994 >     * @param value value to be associated with the specified key
995 >     * @return the previous value associated with <tt>key</tt>, or
996 >     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
997 >     * @throws NullPointerException if the specified key or value is null
998       */
999      public V put(K key, V value) {
1000          if (value == null)
1001              throw new NullPointerException();
1002 <        int hash = hash(key);
1003 <        return segmentFor(hash).put(key, hash, value, false);
1002 >        int hash = hash(key.hashCode());
1003 >        int j = (hash >>> segmentShift) & segmentMask;
1004 >        Segment<K,V> s = segmentAt(segments, j);
1005 >        if (s == null)
1006 >            s = ensureSegment(j);
1007 >        return s.put(key, hash, value, false);
1008      }
1009  
1010      /**
1011 <     * If the specified key is not already associated
770 <     * with a value, associate it with the given value.
771 <     * This is equivalent to
772 <     * <pre>
773 <     *   if (!map.containsKey(key))
774 <     *      return map.put(key, value);
775 <     *   else
776 <     *      return map.get(key);
777 <     * </pre>
778 <     * Except that the action is performed atomically.
779 <     * @param key key with which the specified value is to be associated.
780 <     * @param value value to be associated with the specified key.
781 <     * @return previous value associated with specified key, or <tt>null</tt>
782 <     *         if there was no mapping for key.  A <tt>null</tt> return can
783 <     *         also indicate that the map previously associated <tt>null</tt>
784 <     *         with the specified key, if the implementation supports
785 <     *         <tt>null</tt> values.
786 <     *
787 <     * @throws UnsupportedOperationException if the <tt>put</tt> operation is
788 <     *            not supported by this map.
789 <     * @throws ClassCastException if the class of the specified key or value
790 <     *            prevents it from being stored in this map.
791 <     * @throws NullPointerException if the specified key or value is
792 <     *            <tt>null</tt>.
1011 >     * {@inheritDoc}
1012       *
1013 <     **/
1013 >     * @return the previous value associated with the specified key,
1014 >     *         or <tt>null</tt> if there was no mapping for the key
1015 >     * @throws NullPointerException if the specified key or value is null
1016 >     */
1017      public V putIfAbsent(K key, V value) {
1018          if (value == null)
1019              throw new NullPointerException();
1020 <        int hash = hash(key);
1021 <        return segmentFor(hash).put(key, hash, value, true);
1020 >        int hash = hash(key.hashCode());
1021 >        int j = (hash >>> segmentShift) & segmentMask;
1022 >        Segment<K,V> s = segmentAt(segments, j);
1023 >        if (s == null)
1024 >            s = ensureSegment(j);
1025 >        return s.put(key, hash, value, true);
1026      }
1027  
802
1028      /**
1029       * Copies all of the mappings from the specified map to this one.
805     *
1030       * These mappings replace any mappings that this map had for any of the
1031 <     * keys currently in the specified Map.
1031 >     * keys currently in the specified map.
1032       *
1033 <     * @param t Mappings to be stored in this map.
1033 >     * @param m mappings to be stored in this map
1034       */
1035 <    public void putAll(Map<? extends K, ? extends V> t) {
1036 <        for (Iterator<Map.Entry<? extends K, ? extends V>> it = (Iterator<Map.Entry<? extends K, ? extends V>>) t.entrySet().iterator(); it.hasNext(); ) {
813 <            Entry<? extends K, ? extends V> e = it.next();
1035 >    public void putAll(Map<? extends K, ? extends V> m) {
1036 >        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1037              put(e.getKey(), e.getValue());
815        }
1038      }
1039  
1040      /**
1041 <     * Removes the key (and its corresponding value) from this
1042 <     * table. This method does nothing if the key is not in the table.
1041 >     * Removes the key (and its corresponding value) from this map.
1042 >     * This method does nothing if the key is not in the map.
1043       *
1044 <     * @param   key   the key that needs to be removed.
1045 <     * @return  the value to which the key had been mapped in this table,
1046 <     *          or <tt>null</tt> if the key did not have a mapping.
1047 <     * @throws  NullPointerException  if the key is
826 <     *               <tt>null</tt>.
1044 >     * @param  key the key that needs to be removed
1045 >     * @return the previous value associated with <tt>key</tt>, or
1046 >     *         <tt>null</tt> if there was no mapping for <tt>key</tt>
1047 >     * @throws NullPointerException if the specified key is null
1048       */
1049      public V remove(Object key) {
1050 <        int hash = hash(key);
1051 <        return segmentFor(hash).remove(key, hash, null);
1050 >        int hash = hash(key.hashCode());
1051 >        Segment<K,V> s = segmentForHash(hash);
1052 >        return s == null ? null : s.remove(key, hash, null);
1053      }
1054  
1055      /**
1056 <     * Remove entry for key only if currently mapped to given value.
1057 <     * Acts as
1058 <     * <pre>
837 <     *  if (map.get(key).equals(value)) {
838 <     *     map.remove(key);
839 <     *     return true;
840 <     * } else return false;
841 <     * </pre>
842 <     * except that the action is performed atomically.
843 <     * @param key key with which the specified value is associated.
844 <     * @param value value associated with the specified key.
845 <     * @return true if the value was removed
846 <     * @throws NullPointerException if the specified key is
847 <     *            <tt>null</tt>.
1056 >     * {@inheritDoc}
1057 >     *
1058 >     * @throws NullPointerException if the specified key is null
1059       */
1060      public boolean remove(Object key, Object value) {
1061 <        int hash = hash(key);
1062 <        return segmentFor(hash).remove(key, hash, value) != null;
1061 >        int hash = hash(key.hashCode());
1062 >        Segment<K,V> s;
1063 >        return value != null && (s = segmentForHash(hash)) != null &&
1064 >            s.remove(key, hash, value) != null;
1065      }
1066  
854
1067      /**
1068 <     * Replace entry for key only if currently mapped to given value.
1069 <     * Acts as
1070 <     * <pre>
859 <     *  if (map.get(key).equals(oldValue)) {
860 <     *     map.put(key, newValue);
861 <     *     return true;
862 <     * } else return false;
863 <     * </pre>
864 <     * except that the action is performed atomically.
865 <     * @param key key with which the specified value is associated.
866 <     * @param oldValue value expected to be associated with the specified key.
867 <     * @param newValue value to be associated with the specified key.
868 <     * @return true if the value was replaced
869 <     * @throws NullPointerException if the specified key or values are
870 <     * <tt>null</tt>.
1068 >     * {@inheritDoc}
1069 >     *
1070 >     * @throws NullPointerException if any of the arguments are null
1071       */
1072      public boolean replace(K key, V oldValue, V newValue) {
1073 +        int hash = hash(key.hashCode());
1074          if (oldValue == null || newValue == null)
1075              throw new NullPointerException();
1076 <        int hash = hash(key);
1077 <        return segmentFor(hash).replace(key, hash, oldValue, newValue);
1076 >        Segment<K,V> s = segmentForHash(hash);
1077 >        return s != null && s.replace(key, hash, oldValue, newValue);
1078      }
1079  
1080      /**
1081 <     * Replace entry for key only if currently mapped to some value.
1082 <     * Acts as
1083 <     * <pre>
1084 <     *  if ((map.containsKey(key)) {
1085 <     *     return map.put(key, value);
885 <     * } else return null;
886 <     * </pre>
887 <     * except that the action is performed atomically.
888 <     * @param key key with which the specified value is associated.
889 <     * @param value value to be associated with the specified key.
890 <     * @return previous value associated with specified key, or <tt>null</tt>
891 <     *         if there was no mapping for key.  
892 <     * @throws NullPointerException if the specified key or value is
893 <     *            <tt>null</tt>.
1081 >     * {@inheritDoc}
1082 >     *
1083 >     * @return the previous value associated with the specified key,
1084 >     *         or <tt>null</tt> if there was no mapping for the key
1085 >     * @throws NullPointerException if the specified key or value is null
1086       */
1087      public V replace(K key, V value) {
1088 +        int hash = hash(key.hashCode());
1089          if (value == null)
1090              throw new NullPointerException();
1091 <        int hash = hash(key);
1092 <        return segmentFor(hash).replace(key, hash, value);
1091 >        Segment<K,V> s = segmentForHash(hash);
1092 >        return s == null ? null : s.replace(key, hash, value);
1093      }
1094  
902
1095      /**
1096 <     * Removes all mappings from this map.
1096 >     * Removes all of the mappings from this map.
1097       */
1098      public void clear() {
1099 <        for (int i = 0; i < segments.length; ++i)
1100 <            segments[i].clear();
1101 <    }
1102 <
1103 <
1104 <    /**
913 <     * Returns a shallow copy of this
914 <     * <tt>ConcurrentHashMap</tt> instance: the keys and
915 <     * values themselves are not cloned.
916 <     *
917 <     * @return a shallow copy of this map.
918 <     */
919 <    public Object clone() {
920 <        // We cannot call super.clone, since it would share final
921 <        // segments array, and there's no way to reassign finals.
922 <
923 <        float lf = segments[0].loadFactor;
924 <        int segs = segments.length;
925 <        int cap = (int)(size() / lf);
926 <        if (cap < segs) cap = segs;
927 <        ConcurrentHashMap<K,V> t = new ConcurrentHashMap<K,V>(cap, lf, segs);
928 <        t.putAll(this);
929 <        return t;
1099 >        final Segment<K,V>[] segments = this.segments;
1100 >        for (int j = 0; j < segments.length; ++j) {
1101 >            Segment<K,V> s = segmentAt(segments, j);
1102 >            if (s != null)
1103 >                s.clear();
1104 >        }
1105      }
1106  
1107      /**
1108 <     * Returns a set view of the keys contained in this map.  The set is
1109 <     * backed by the map, so changes to the map are reflected in the set, and
1110 <     * vice-versa.  The set supports element removal, which removes the
1111 <     * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
1112 <     * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
1113 <     * <tt>clear</tt> operations.  It does not support the <tt>add</tt> or
1108 >     * Returns a {@link Set} view of the keys contained in this map.
1109 >     * The set is backed by the map, so changes to the map are
1110 >     * reflected in the set, and vice-versa.  The set supports element
1111 >     * removal, which removes the corresponding mapping from this map,
1112 >     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
1113 >     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
1114 >     * operations.  It does not support the <tt>add</tt> or
1115       * <tt>addAll</tt> operations.
1116 <     * The returned <tt>iterator</tt> is a "weakly consistent" iterator that
1117 <     * will never throw {@link java.util.ConcurrentModificationException},
1116 >     *
1117 >     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1118 >     * that will never throw {@link ConcurrentModificationException},
1119       * and guarantees to traverse elements as they existed upon
1120       * construction of the iterator, and may (but is not guaranteed to)
1121       * reflect any modifications subsequent to construction.
945     *
946     * @return a set view of the keys contained in this map.
1122       */
1123      public Set<K> keySet() {
1124          Set<K> ks = keySet;
1125          return (ks != null) ? ks : (keySet = new KeySet());
1126      }
1127  
953
1128      /**
1129 <     * Returns a collection view of the values contained in this map.  The
1130 <     * collection is backed by the map, so changes to the map are reflected in
1131 <     * the collection, and vice-versa.  The collection supports element
1132 <     * removal, which removes the corresponding mapping from this map, via the
1133 <     * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
1134 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
1135 <     * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
1136 <     * The returned <tt>iterator</tt> is a "weakly consistent" iterator that
1137 <     * will never throw {@link java.util.ConcurrentModificationException},
1129 >     * Returns a {@link Collection} view of the values contained in this map.
1130 >     * The collection is backed by the map, so changes to the map are
1131 >     * reflected in the collection, and vice-versa.  The collection
1132 >     * supports element removal, which removes the corresponding
1133 >     * mapping from this map, via the <tt>Iterator.remove</tt>,
1134 >     * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
1135 >     * <tt>retainAll</tt>, and <tt>clear</tt> operations.  It does not
1136 >     * support the <tt>add</tt> or <tt>addAll</tt> operations.
1137 >     *
1138 >     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1139 >     * that will never throw {@link ConcurrentModificationException},
1140       * and guarantees to traverse elements as they existed upon
1141       * construction of the iterator, and may (but is not guaranteed to)
1142       * reflect any modifications subsequent to construction.
967     *
968     * @return a collection view of the values contained in this map.
1143       */
1144      public Collection<V> values() {
1145          Collection<V> vs = values;
1146          return (vs != null) ? vs : (values = new Values());
1147      }
1148  
975
1149      /**
1150 <     * Returns a collection view of the mappings contained in this map.  Each
1151 <     * element in the returned collection is a <tt>Map.Entry</tt>.  The
1152 <     * collection is backed by the map, so changes to the map are reflected in
1153 <     * the collection, and vice-versa.  The collection supports element
1154 <     * removal, which removes the corresponding mapping from the map, via the
1155 <     * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
1156 <     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
1157 <     * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
1158 <     * The returned <tt>iterator</tt> is a "weakly consistent" iterator that
1159 <     * will never throw {@link java.util.ConcurrentModificationException},
1150 >     * Returns a {@link Set} view of the mappings contained in this map.
1151 >     * The set is backed by the map, so changes to the map are
1152 >     * reflected in the set, and vice-versa.  The set supports element
1153 >     * removal, which removes the corresponding mapping from the map,
1154 >     * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
1155 >     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
1156 >     * operations.  It does not support the <tt>add</tt> or
1157 >     * <tt>addAll</tt> operations.
1158 >     *
1159 >     * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1160 >     * that will never throw {@link ConcurrentModificationException},
1161       * and guarantees to traverse elements as they existed upon
1162       * construction of the iterator, and may (but is not guaranteed to)
1163       * reflect any modifications subsequent to construction.
990     *
991     * @return a collection view of the mappings contained in this map.
1164       */
1165      public Set<Map.Entry<K,V>> entrySet() {
1166          Set<Map.Entry<K,V>> es = entrySet;
1167 <        return (es != null) ? es : (entrySet = (Set<Map.Entry<K,V>>) (Set) new EntrySet());
1167 >        return (es != null) ? es : (entrySet = new EntrySet());
1168      }
1169  
998
1170      /**
1171       * Returns an enumeration of the keys in this table.
1172       *
1173 <     * @return  an enumeration of the keys in this table.
1174 <     * @see     #keySet
1173 >     * @return an enumeration of the keys in this table
1174 >     * @see #keySet()
1175       */
1176      public Enumeration<K> keys() {
1177          return new KeyIterator();
# Line 1008 | Line 1179 | public class ConcurrentHashMap<K, V> ext
1179  
1180      /**
1181       * Returns an enumeration of the values in this table.
1011     * Use the Enumeration methods on the returned object to fetch the elements
1012     * sequentially.
1182       *
1183 <     * @return  an enumeration of the values in this table.
1184 <     * @see     #values
1183 >     * @return an enumeration of the values in this table
1184 >     * @see #values()
1185       */
1186      public Enumeration<V> elements() {
1187          return new ValueIterator();
# Line 1020 | Line 1189 | public class ConcurrentHashMap<K, V> ext
1189  
1190      /* ---------------- Iterator Support -------------- */
1191  
1192 <    private abstract class HashIterator {
1193 <        private int nextSegmentIndex;
1194 <        private int nextTableIndex;
1195 <        private HashEntry[] currentTable;
1196 <        private HashEntry<K, V> nextEntry;
1192 >    abstract class HashIterator {
1193 >        int nextSegmentIndex;
1194 >        int nextTableIndex;
1195 >        HashEntry<K,V>[] currentTable;
1196 >        HashEntry<K, V> nextEntry;
1197          HashEntry<K, V> lastReturned;
1198  
1199 <        private HashIterator() {
1199 >        HashIterator() {
1200              nextSegmentIndex = segments.length - 1;
1201              nextTableIndex = -1;
1202              advance();
1203          }
1204  
1205 <        public boolean hasMoreElements() { return hasNext(); }
1206 <
1207 <        private void advance() {
1208 <            if (nextEntry != null && (nextEntry = nextEntry.next) != null)
1209 <                return;
1210 <
1211 <            while (nextTableIndex >= 0) {
1212 <                if ( (nextEntry = (HashEntry<K,V>)currentTable[nextTableIndex--]) != null)
1213 <                    return;
1214 <            }
1215 <
1216 <            while (nextSegmentIndex >= 0) {
1217 <                Segment<K,V> seg = (Segment<K,V>)segments[nextSegmentIndex--];
1218 <                if (seg.count != 0) {
1219 <                    currentTable = seg.table;
1051 <                    for (int j = currentTable.length - 1; j >= 0; --j) {
1052 <                        if ( (nextEntry = (HashEntry<K,V>)currentTable[j]) != null) {
1053 <                            nextTableIndex = j - 1;
1054 <                            return;
1055 <                        }
1056 <                    }
1205 >        /**
1206 >         * Set nextEntry to first node of next non-empty table
1207 >         * (in backwards order, to simplify checks).
1208 >         */
1209 >        final void advance() {
1210 >            for (;;) {
1211 >                if (nextTableIndex >= 0) {
1212 >                    if ((nextEntry = entryAt(currentTable,
1213 >                                             nextTableIndex--)) != null)
1214 >                        break;
1215 >                }
1216 >                else if (nextSegmentIndex >= 0) {
1217 >                    Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
1218 >                    if (seg != null && (currentTable = seg.table) != null)
1219 >                        nextTableIndex = currentTable.length - 1;
1220                  }
1221 +                else
1222 +                    break;
1223              }
1224          }
1225  
1226 <        public boolean hasNext() { return nextEntry != null; }
1227 <
1228 <        HashEntry<K,V> nextEntry() {
1064 <            if (nextEntry == null)
1226 >        final HashEntry<K,V> nextEntry() {
1227 >            HashEntry<K,V> e = lastReturned = nextEntry;
1228 >            if (e == null)
1229                  throw new NoSuchElementException();
1230 <            lastReturned = nextEntry;
1231 <            advance();
1232 <            return lastReturned;
1230 >            if ((nextEntry = e.next) == null)
1231 >                advance();
1232 >            return e;
1233          }
1234  
1235 <        public void remove() {
1235 >        public final boolean hasNext() { return nextEntry != null; }
1236 >        public final boolean hasMoreElements() { return nextEntry != null; }
1237 >
1238 >        public final void remove() {
1239              if (lastReturned == null)
1240                  throw new IllegalStateException();
1241              ConcurrentHashMap.this.remove(lastReturned.key);
# Line 1076 | Line 1243 | public class ConcurrentHashMap<K, V> ext
1243          }
1244      }
1245  
1246 <    private class KeyIterator extends HashIterator implements Iterator<K>, Enumeration<K> {
1247 <        public K next() { return super.nextEntry().key; }
1248 <        public K nextElement() { return super.nextEntry().key; }
1246 >    final class KeyIterator
1247 >        extends HashIterator
1248 >        implements Iterator<K>, Enumeration<K>
1249 >    {
1250 >        public final K next()        { return super.nextEntry().key; }
1251 >        public final K nextElement() { return super.nextEntry().key; }
1252 >    }
1253 >
1254 >    final class ValueIterator
1255 >        extends HashIterator
1256 >        implements Iterator<V>, Enumeration<V>
1257 >    {
1258 >        public final V next()        { return super.nextEntry().value; }
1259 >        public final V nextElement() { return super.nextEntry().value; }
1260      }
1261  
1084    private class ValueIterator extends HashIterator implements Iterator<V>, Enumeration<V> {
1085        public V next() { return super.nextEntry().value; }
1086        public V nextElement() { return super.nextEntry().value; }
1087    }
1088
1089    
1090
1262      /**
1263 <     * Exported Entry objects must write-through changes in setValue,
1264 <     * even if the nodes have been cloned. So we cannot return
1265 <     * internal HashEntry objects. Instead, the iterator itself acts
1266 <     * as a forwarding pseudo-entry.
1267 <     */
1268 <    private class EntryIterator extends HashIterator implements Map.Entry<K,V>, Iterator<Entry<K,V>> {
1269 <        public Map.Entry<K,V> next() {
1270 <            nextEntry();
1100 <            return this;
1101 <        }
1102 <
1103 <        public K getKey() {
1104 <            if (lastReturned == null)
1105 <                throw new IllegalStateException("Entry was removed");
1106 <            return lastReturned.key;
1107 <        }
1108 <
1109 <        public V getValue() {
1110 <            if (lastReturned == null)
1111 <                throw new IllegalStateException("Entry was removed");
1112 <            return ConcurrentHashMap.this.get(lastReturned.key);
1263 >     * Custom Entry class used by EntryIterator.next(), that relays
1264 >     * setValue changes to the underlying map.
1265 >     */
1266 >    final class WriteThroughEntry
1267 >        extends AbstractMap.SimpleEntry<K,V>
1268 >    {
1269 >        WriteThroughEntry(K k, V v) {
1270 >            super(k,v);
1271          }
1272  
1273 +        /**
1274 +         * Set our entry's value and write through to the map. The
1275 +         * value to return is somewhat arbitrary here. Since a
1276 +         * WriteThroughEntry does not necessarily track asynchronous
1277 +         * changes, the most recent "previous" value could be
1278 +         * different from what we return (or could even have been
1279 +         * removed in which case the put will re-establish). We do not
1280 +         * and cannot guarantee more.
1281 +         */
1282          public V setValue(V value) {
1283 <            if (lastReturned == null)
1284 <                throw new IllegalStateException("Entry was removed");
1285 <            return ConcurrentHashMap.this.put(lastReturned.key, value);
1286 <        }
1120 <
1121 <        public boolean equals(Object o) {
1122 <            if (!(o instanceof Map.Entry))
1123 <                return false;
1124 <            Map.Entry e = (Map.Entry)o;
1125 <            return eq(getKey(), e.getKey()) && eq(getValue(), e.getValue());
1126 <        }
1127 <
1128 <        public int hashCode() {
1129 <            Object k = getKey();
1130 <            Object v = getValue();
1131 <            return ((k == null) ? 0 : k.hashCode()) ^
1132 <                   ((v == null) ? 0 : v.hashCode());
1133 <        }
1134 <
1135 <        public String toString() {
1136 <            return getKey() + "=" + getValue();
1283 >            if (value == null) throw new NullPointerException();
1284 >            V v = super.setValue(value);
1285 >            ConcurrentHashMap.this.put(getKey(), value);
1286 >            return v;
1287          }
1288 +    }
1289  
1290 <        private boolean eq(Object o1, Object o2) {
1291 <            return (o1 == null ? o2 == null : o1.equals(o2));
1290 >    final class EntryIterator
1291 >        extends HashIterator
1292 >        implements Iterator<Entry<K,V>>
1293 >    {
1294 >        public Map.Entry<K,V> next() {
1295 >            HashEntry<K,V> e = super.nextEntry();
1296 >            return new WriteThroughEntry(e.key, e.value);
1297          }
1142
1298      }
1299  
1300 <    private class KeySet extends AbstractSet<K> {
1300 >    final class KeySet extends AbstractSet<K> {
1301          public Iterator<K> iterator() {
1302              return new KeyIterator();
1303          }
1304          public int size() {
1305              return ConcurrentHashMap.this.size();
1306          }
1307 +        public boolean isEmpty() {
1308 +            return ConcurrentHashMap.this.isEmpty();
1309 +        }
1310          public boolean contains(Object o) {
1311              return ConcurrentHashMap.this.containsKey(o);
1312          }
# Line 1160 | Line 1318 | public class ConcurrentHashMap<K, V> ext
1318          }
1319      }
1320  
1321 <    private class Values extends AbstractCollection<V> {
1321 >    final class Values extends AbstractCollection<V> {
1322          public Iterator<V> iterator() {
1323              return new ValueIterator();
1324          }
1325          public int size() {
1326              return ConcurrentHashMap.this.size();
1327          }
1328 +        public boolean isEmpty() {
1329 +            return ConcurrentHashMap.this.isEmpty();
1330 +        }
1331          public boolean contains(Object o) {
1332              return ConcurrentHashMap.this.containsValue(o);
1333          }
# Line 1175 | Line 1336 | public class ConcurrentHashMap<K, V> ext
1336          }
1337      }
1338  
1339 <    private class EntrySet extends AbstractSet<Map.Entry<K,V>> {
1339 >    final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
1340          public Iterator<Map.Entry<K,V>> iterator() {
1341              return new EntryIterator();
1342          }
1343          public boolean contains(Object o) {
1344              if (!(o instanceof Map.Entry))
1345                  return false;
1346 <            Map.Entry<K,V> e = (Map.Entry<K,V>)o;
1346 >            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
1347              V v = ConcurrentHashMap.this.get(e.getKey());
1348              return v != null && v.equals(e.getValue());
1349          }
1350          public boolean remove(Object o) {
1351              if (!(o instanceof Map.Entry))
1352                  return false;
1353 <            Map.Entry<K,V> e = (Map.Entry<K,V>)o;
1353 >            Map.Entry<?,?> e = (Map.Entry<?,?>)o;
1354              return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
1355          }
1356          public int size() {
1357              return ConcurrentHashMap.this.size();
1358          }
1359 +        public boolean isEmpty() {
1360 +            return ConcurrentHashMap.this.isEmpty();
1361 +        }
1362          public void clear() {
1363              ConcurrentHashMap.this.clear();
1364          }
1201        public Object[] toArray() {
1202            // Since we don't ordinarily have distinct Entry objects, we
1203            // must pack elements using exportable SimpleEntry
1204            Collection<Map.Entry<K,V>> c = new ArrayList<Map.Entry<K,V>>(size());
1205            for (Iterator<Map.Entry<K,V>> i = iterator(); i.hasNext(); )
1206                c.add(new SimpleEntry<K,V>(i.next()));
1207            return c.toArray();
1208        }
1209        public <T> T[] toArray(T[] a) {
1210            Collection<Map.Entry<K,V>> c = new ArrayList<Map.Entry<K,V>>(size());
1211            for (Iterator<Map.Entry<K,V>> i = iterator(); i.hasNext(); )
1212                c.add(new SimpleEntry<K,V>(i.next()));
1213            return c.toArray(a);
1214        }
1215
1216    }
1217
1218    /**
1219     * This duplicates java.util.AbstractMap.SimpleEntry until this class
1220     * is made accessible.
1221     */
1222    static class SimpleEntry<K,V> implements Entry<K,V> {
1223        K key;
1224        V value;
1225
1226        public SimpleEntry(K key, V value) {
1227            this.key   = key;
1228            this.value = value;
1229        }
1230
1231        public SimpleEntry(Entry<K,V> e) {
1232            this.key   = e.getKey();
1233            this.value = e.getValue();
1234        }
1235
1236        public K getKey() {
1237            return key;
1238        }
1239
1240        public V getValue() {
1241            return value;
1242        }
1243
1244        public V setValue(V value) {
1245            V oldValue = this.value;
1246            this.value = value;
1247            return oldValue;
1248        }
1249
1250        public boolean equals(Object o) {
1251            if (!(o instanceof Map.Entry))
1252                return false;
1253            Map.Entry e = (Map.Entry)o;
1254            return eq(key, e.getKey()) && eq(value, e.getValue());
1255        }
1256
1257        public int hashCode() {
1258            return ((key   == null)   ? 0 :   key.hashCode()) ^
1259                   ((value == null)   ? 0 : value.hashCode());
1260        }
1261
1262        public String toString() {
1263            return key + "=" + value;
1264        }
1265
1266        private static boolean eq(Object o1, Object o2) {
1267            return (o1 == null ? o2 == null : o1.equals(o2));
1268        }
1365      }
1366  
1367      /* ---------------- Serialization Support -------------- */
1368  
1369      /**
1370 <     * Save the state of the <tt>ConcurrentHashMap</tt>
1371 <     * instance to a stream (i.e.,
1276 <     * serialize it).
1370 >     * Save the state of the <tt>ConcurrentHashMap</tt> instance to a
1371 >     * stream (i.e., serialize it).
1372       * @param s the stream
1373       * @serialData
1374       * the key (Object) and value (Object)
1375       * for each key-value mapping, followed by a null pair.
1376       * The key-value mappings are emitted in no particular order.
1377       */
1378 <    private void writeObject(java.io.ObjectOutputStream s) throws IOException  {
1378 >    private void writeObject(java.io.ObjectOutputStream s) throws IOException {
1379 >        // force all segments for serialization compatibility
1380 >        for (int k = 0; k < segments.length; ++k)
1381 >            ensureSegment(k);
1382          s.defaultWriteObject();
1383  
1384 +        final Segment<K,V>[] segments = this.segments;
1385          for (int k = 0; k < segments.length; ++k) {
1386 <            Segment<K,V> seg = (Segment<K,V>)segments[k];
1386 >            Segment<K,V> seg = segmentAt(segments, k);
1387              seg.lock();
1388              try {
1389 <                HashEntry[] tab = seg.table;
1389 >                HashEntry<K,V>[] tab = seg.table;
1390                  for (int i = 0; i < tab.length; ++i) {
1391 <                    for (HashEntry<K,V> e = (HashEntry<K,V>)tab[i]; e != null; e = e.next) {
1391 >                    HashEntry<K,V> e;
1392 >                    for (e = entryAt(tab, i); e != null; e = e.next) {
1393                          s.writeObject(e.key);
1394                          s.writeObject(e.value);
1395                      }
# Line 1303 | Line 1403 | public class ConcurrentHashMap<K, V> ext
1403      }
1404  
1405      /**
1406 <     * Reconstitute the <tt>ConcurrentHashMap</tt>
1407 <     * instance from a stream (i.e.,
1308 <     * deserialize it).
1406 >     * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a
1407 >     * stream (i.e., deserialize it).
1408       * @param s the stream
1409       */
1410 +    @SuppressWarnings("unchecked")
1411      private void readObject(java.io.ObjectInputStream s)
1412 <        throws IOException, ClassNotFoundException  {
1412 >        throws IOException, ClassNotFoundException {
1413          s.defaultReadObject();
1414  
1415 <        // Initialize each segment to be minimally sized, and let grow.
1416 <        for (int i = 0; i < segments.length; ++i) {
1417 <            segments[i].setTable(new HashEntry[1]);
1415 >        // Re-initialize segments to be minimally sized, and let grow.
1416 >        int cap = MIN_SEGMENT_TABLE_CAPACITY;
1417 >        final Segment<K,V>[] segments = this.segments;
1418 >        for (int k = 0; k < segments.length; ++k) {
1419 >            Segment<K,V> seg = segments[k];
1420 >            if (seg != null) {
1421 >                seg.threshold = (int)(cap * seg.loadFactor);
1422 >                seg.table = (HashEntry<K,V>[]) new HashEntry[cap];
1423 >            }
1424          }
1425  
1426          // Read the keys and values, and put the mappings in the table
# Line 1326 | Line 1432 | public class ConcurrentHashMap<K, V> ext
1432              put(key, value);
1433          }
1434      }
1329 }
1435  
1436 +    // Unsafe mechanics
1437 +    private static final sun.misc.Unsafe UNSAFE;
1438 +    private static final long SBASE;
1439 +    private static final int SSHIFT;
1440 +    private static final long TBASE;
1441 +    private static final int TSHIFT;
1442 +
1443 +    static {
1444 +        int ss, ts;
1445 +        try {
1446 +            UNSAFE = sun.misc.Unsafe.getUnsafe();
1447 +            Class tc = HashEntry[].class;
1448 +            Class sc = Segment[].class;
1449 +            TBASE = UNSAFE.arrayBaseOffset(tc);
1450 +            SBASE = UNSAFE.arrayBaseOffset(sc);
1451 +            ts = UNSAFE.arrayIndexScale(tc);
1452 +            ss = UNSAFE.arrayIndexScale(sc);
1453 +        } catch (Exception e) {
1454 +            throw new Error(e);
1455 +        }
1456 +        if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
1457 +            throw new Error("data type scale not a power of two");
1458 +        SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
1459 +        TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
1460 +    }
1461 +
1462 + }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines