ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
(Generate patch)

Comparing jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java (file contents):
Revision 1.92 by dl, Sat Dec 2 20:55:01 2006 UTC vs.
Revision 1.110 by jsr166, Wed Apr 27 14:06:30 2011 UTC

# Line 1 | Line 1
1   /*
2   * Written by Doug Lea with assistance from members of JCP JSR-166
3   * Expert Group and released to the public domain, as explained at
4 < * http://creativecommons.org/licenses/publicdomain
4 > * http://creativecommons.org/publicdomain/zero/1.0/
5   */
6  
7   package java.util.concurrent;
# Line 76 | Line 76 | public class ConcurrentHashMap<K, V> ext
76  
77      /*
78       * The basic strategy is to subdivide the table among Segments,
79 <     * each of which itself is a concurrently readable hash table.
79 >     * each of which itself is a concurrently readable hash table.  To
80 >     * reduce footprint, all but one segments are constructed only
81 >     * when first needed (see ensureSegment). To maintain visibility
82 >     * in the presence of lazy construction, accesses to segments as
83 >     * well as elements of segment's table must use volatile access,
84 >     * which is done via Unsafe within methods segmentAt etc
85 >     * below. These provide the functionality of AtomicReferenceArrays
86 >     * but reduce the levels of indirection. Additionally,
87 >     * volatile-writes of table elements and entry "next" fields
88 >     * within locked operations use the cheaper "lazySet" forms of
89 >     * writes (via putOrderedObject) because these writes are always
90 >     * followed by lock releases that maintain sequential consistency
91 >     * of table updates.
92 >     *
93 >     * Historical note: The previous version of this class relied
94 >     * heavily on "final" fields, which avoided some volatile reads at
95 >     * the expense of a large initial footprint.  Some remnants of
96 >     * that design (including forced construction of segment 0) exist
97 >     * to ensure serialization compatibility.
98       */
99  
100      /* ---------------- Constants -------------- */
# Line 108 | Line 126 | public class ConcurrentHashMap<K, V> ext
126      static final int MAXIMUM_CAPACITY = 1 << 30;
127  
128      /**
129 +     * The minimum capacity for per-segment tables.  Must be a power
130 +     * of two, at least two to avoid immediate resizing on next use
131 +     * after lazy construction.
132 +     */
133 +    static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
134 +
135 +    /**
136       * The maximum number of segments to allow; used to bound
137 <     * constructor arguments.
137 >     * constructor arguments. Must be power of two less than 1 << 24.
138       */
139      static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
140  
# Line 135 | Line 160 | public class ConcurrentHashMap<K, V> ext
160      final int segmentShift;
161  
162      /**
163 <     * The segments, each of which is a specialized hash table
163 >     * The segments, each of which is a specialized hash table.
164       */
165      final Segment<K,V>[] segments;
166  
# Line 143 | Line 168 | public class ConcurrentHashMap<K, V> ext
168      transient Set<Map.Entry<K,V>> entrySet;
169      transient Collection<V> values;
170  
146    /* ---------------- Small Utilities -------------- */
147
148    /**
149     * Applies a supplemental hash function to a given hashCode, which
150     * defends against poor quality hash functions.  This is critical
151     * because ConcurrentHashMap uses power-of-two length hash tables,
152     * that otherwise encounter collisions for hashCodes that do not
153     * differ in lower bits.
154     */
155    private static int hash(int h) {
156        // Spread bits to regularize both segment and index locations,
157        // using variant of Jenkins's shift-based hash.
158        h += ~(h << 13);
159        h ^= h >>> 7;
160        h += h << 3;
161        h ^= h >>> 17;
162        h += h << 5;
163        return h;
164    }
165
166    /**
167     * Returns the segment that should be used for key with given hash
168     * @param hash the hash code for the key
169     * @return the segment
170     */
171    final Segment<K,V> segmentFor(int hash) {
172        return segments[(hash >>> segmentShift) & segmentMask];
173    }
174
175    /* ---------------- Inner Classes -------------- */
176
171      /**
172       * ConcurrentHashMap list entry. Note that this is never exported
173       * out as a user-visible Map.Entry.
180     *
181     * Because the value field is volatile, not final, it is legal wrt
182     * the Java Memory Model for an unsynchronized reader to see null
183     * instead of initial value when read via a data race.  Although a
184     * reordering leading to this is not likely to ever actually
185     * occur, the Segment.readValueUnderLock method is used as a
186     * backup in case a null (pre-initialized) value is ever seen in
187     * an unsynchronized access method.
174       */
175      static final class HashEntry<K,V> {
190        final K key;
176          final int hash;
177 +        final K key;
178          volatile V value;
179 <        final HashEntry<K,V> next;
179 >        volatile HashEntry<K,V> next;
180  
181 <        HashEntry(K key, int hash, HashEntry<K,V> next, V value) {
196 <            this.key = key;
181 >        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
182              this.hash = hash;
183 <            this.next = next;
183 >            this.key = key;
184              this.value = value;
185 +            this.next = next;
186          }
187  
188 <        @SuppressWarnings("unchecked")
189 <        static final <K,V> HashEntry<K,V>[] newArray(int i) {
190 <            return new HashEntry[i];
191 <        }
188 >        /**
189 >         * Sets next field with volatile write semantics.  (See above
190 >         * about use of putOrderedObject.)
191 >         */
192 >        final void setNext(HashEntry<K,V> n) {
193 >            UNSAFE.putOrderedObject(this, nextOffset, n);
194 >        }
195 >
196 >        // Unsafe mechanics
197 >        static final sun.misc.Unsafe UNSAFE;
198 >        static final long nextOffset;
199 >        static {
200 >            try {
201 >                UNSAFE = sun.misc.Unsafe.getUnsafe();
202 >                Class k = HashEntry.class;
203 >                nextOffset = UNSAFE.objectFieldOffset
204 >                    (k.getDeclaredField("next"));
205 >            } catch (Exception e) {
206 >                throw new Error(e);
207 >            }
208 >        }
209 >    }
210 >
211 >    /**
212 >     * Gets the ith element of given table (if nonnull) with volatile
213 >     * read semantics. Note: This is manually integrated into a few
214 >     * performance-sensitive methods to reduce call overhead.
215 >     */
216 >    @SuppressWarnings("unchecked")
217 >    static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
218 >        return (tab == null) ? null :
219 >            (HashEntry<K,V>) UNSAFE.getObjectVolatile
220 >            (tab, ((long)i << TSHIFT) + TBASE);
221 >    }
222 >
223 >    /**
224 >     * Sets the ith element of given table, with volatile write
225 >     * semantics. (See above about use of putOrderedObject.)
226 >     */
227 >    static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
228 >                                       HashEntry<K,V> e) {
229 >        UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
230 >    }
231 >
232 >    /**
233 >     * Applies a supplemental hash function to a given hashCode, which
234 >     * defends against poor quality hash functions.  This is critical
235 >     * because ConcurrentHashMap uses power-of-two length hash tables,
236 >     * that otherwise encounter collisions for hashCodes that do not
237 >     * differ in lower or upper bits.
238 >     */
239 >    private static int hash(int h) {
240 >        // Spread bits to regularize both segment and index locations,
241 >        // using variant of single-word Wang/Jenkins hash.
242 >        h += (h <<  15) ^ 0xffffcd7d;
243 >        h ^= (h >>> 10);
244 >        h += (h <<   3);
245 >        h ^= (h >>>  6);
246 >        h += (h <<   2) + (h << 14);
247 >        return h ^ (h >>> 16);
248      }
249  
250      /**
# Line 212 | Line 254 | public class ConcurrentHashMap<K, V> ext
254       */
255      static final class Segment<K,V> extends ReentrantLock implements Serializable {
256          /*
257 <         * Segments maintain a table of entry lists that are ALWAYS
258 <         * kept in a consistent state, so can be read without locking.
259 <         * Next fields of nodes are immutable (final).  All list
260 <         * additions are performed at the front of each bin. This
261 <         * makes it easy to check changes, and also fast to traverse.
262 <         * When nodes would otherwise be changed, new nodes are
221 <         * created to replace them. This works well for hash tables
222 <         * since the bin lists tend to be short. (The average length
223 <         * is less than two for the default load factor threshold.)
224 <         *
225 <         * Read operations can thus proceed without locking, but rely
226 <         * on selected uses of volatiles to ensure that completed
227 <         * write operations performed by other threads are
228 <         * noticed. For most purposes, the "count" field, tracking the
229 <         * number of elements, serves as that volatile variable
230 <         * ensuring visibility.  This is convenient because this field
231 <         * needs to be read in many read operations anyway:
257 >         * Segments maintain a table of entry lists that are always
258 >         * kept in a consistent state, so can be read (via volatile
259 >         * reads of segments and tables) without locking.  This
260 >         * requires replicating nodes when necessary during table
261 >         * resizing, so the old lists can be traversed by readers
262 >         * still using old version of table.
263           *
264 <         *   - All (unsynchronized) read operations must first read the
265 <         *     "count" field, and should not look at table entries if
266 <         *     it is 0.
267 <         *
268 <         *   - All (synchronized) write operations should write to
269 <         *     the "count" field after structurally changing any bin.
270 <         *     The operations must not take any action that could even
271 <         *     momentarily cause a concurrent read operation to see
272 <         *     inconsistent data. This is made easier by the nature of
273 <         *     the read operations in Map. For example, no operation
274 <         *     can reveal that the table has grown but the threshold
275 <         *     has not yet been updated, so there are no atomicity
276 <         *     requirements for this with respect to reads.
277 <         *
278 <         * As a guide, all critical volatile reads and writes to the
279 <         * count field are marked in code comments.
264 >         * This class defines only mutative methods requiring locking.
265 >         * Except as noted, the methods of this class perform the
266 >         * per-segment versions of ConcurrentHashMap methods.  (Other
267 >         * methods are integrated directly into ConcurrentHashMap
268 >         * methods.) These mutative methods use a form of controlled
269 >         * spinning on contention via methods scanAndLock and
270 >         * scanAndLockForPut. These intersperse tryLocks with
271 >         * traversals to locate nodes.  The main benefit is to absorb
272 >         * cache misses (which are very common for hash tables) while
273 >         * obtaining locks so that traversal is faster once
274 >         * acquired. We do not actually use the found nodes since they
275 >         * must be re-acquired under lock anyway to ensure sequential
276 >         * consistency of updates (and in any case may be undetectably
277 >         * stale), but they will normally be much faster to re-locate.
278 >         * Also, scanAndLockForPut speculatively creates a fresh node
279 >         * to use in put if no node is found.
280           */
281  
282          private static final long serialVersionUID = 2249069246763182397L;
283  
284          /**
285 <         * The number of elements in this segment's region.
285 >         * The maximum number of times to tryLock in a prescan before
286 >         * possibly blocking on acquire in preparation for a locked
287 >         * segment operation. On multiprocessors, using a bounded
288 >         * number of retries maintains cache acquired while locating
289 >         * nodes.
290 >         */
291 >        static final int MAX_SCAN_RETRIES =
292 >            Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
293 >
294 >        /**
295 >         * The per-segment table. Elements are accessed via
296 >         * entryAt/setEntryAt providing volatile semantics.
297 >         */
298 >        transient volatile HashEntry<K,V>[] table;
299 >
300 >        /**
301 >         * The number of elements. Accessed only either within locks
302 >         * or among other volatile reads that maintain visibility.
303           */
304 <        transient volatile int count;
304 >        transient int count;
305  
306          /**
307 <         * Number of updates that alter the size of the table. This is
308 <         * used during bulk-read methods to make sure they see a
309 <         * consistent snapshot: If modCounts change during a traversal
310 <         * of segments computing size or checking containsValue, then
311 <         * we might have an inconsistent view of state so (usually)
264 <         * must retry.
307 >         * The total number of mutative operations in this segment.
308 >         * Even though this may overflows 32 bits, it provides
309 >         * sufficient accuracy for stability checks in CHM isEmpty()
310 >         * and size() methods.  Accessed only either within locks or
311 >         * among other volatile reads that maintain visibility.
312           */
313          transient int modCount;
314  
# Line 273 | Line 320 | public class ConcurrentHashMap<K, V> ext
320          transient int threshold;
321  
322          /**
276         * The per-segment table.
277         */
278        transient volatile HashEntry<K,V>[] table;
279
280        /**
323           * The load factor for the hash table.  Even though this value
324           * is same for all segments, it is replicated to avoid needing
325           * links to outer object.
# Line 285 | Line 327 | public class ConcurrentHashMap<K, V> ext
327           */
328          final float loadFactor;
329  
330 <        Segment(int initialCapacity, float lf) {
331 <            loadFactor = lf;
332 <            setTable(HashEntry.<K,V>newArray(initialCapacity));
330 >        Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
331 >            this.loadFactor = lf;
332 >            this.threshold = threshold;
333 >            this.table = tab;
334          }
335  
336 <        @SuppressWarnings("unchecked")
337 <        static final <K,V> Segment<K,V>[] newArray(int i) {
338 <            return new Segment[i];
339 <        }
297 <
298 <        /**
299 <         * Sets table to new HashEntry array.
300 <         * Call only while holding lock or in constructor.
301 <         */
302 <        void setTable(HashEntry<K,V>[] newTable) {
303 <            threshold = (int)(newTable.length * loadFactor);
304 <            table = newTable;
305 <        }
306 <
307 <        /**
308 <         * Returns properly casted first entry of bin for given hash.
309 <         */
310 <        HashEntry<K,V> getFirst(int hash) {
311 <            HashEntry<K,V>[] tab = table;
312 <            return tab[hash & (tab.length - 1)];
313 <        }
314 <
315 <        /**
316 <         * Reads value field of an entry under lock. Called if value
317 <         * field ever appears to be null. This is possible only if a
318 <         * compiler happens to reorder a HashEntry initialization with
319 <         * its table assignment, which is legal under memory model
320 <         * but is not known to ever occur.
321 <         */
322 <        V readValueUnderLock(HashEntry<K,V> e) {
323 <            lock();
336 >        final V put(K key, int hash, V value, boolean onlyIfAbsent) {
337 >            HashEntry<K,V> node = tryLock() ? null :
338 >                scanAndLockForPut(key, hash, value);
339 >            V oldValue;
340              try {
325                return e.value;
326            } finally {
327                unlock();
328            }
329        }
330
331        /* Specialized implementations of map methods */
332
333        V get(Object key, int hash) {
334            if (count != 0) { // read-volatile
335                HashEntry<K,V> e = getFirst(hash);
336                while (e != null) {
337                    if (e.hash == hash && key.equals(e.key)) {
338                        V v = e.value;
339                        if (v != null)
340                            return v;
341                        return readValueUnderLock(e); // recheck
342                    }
343                    e = e.next;
344                }
345            }
346            return null;
347        }
348
349        boolean containsKey(Object key, int hash) {
350            if (count != 0) { // read-volatile
351                HashEntry<K,V> e = getFirst(hash);
352                while (e != null) {
353                    if (e.hash == hash && key.equals(e.key))
354                        return true;
355                    e = e.next;
356                }
357            }
358            return false;
359        }
360
361        boolean containsValue(Object value) {
362            if (count != 0) { // read-volatile
341                  HashEntry<K,V>[] tab = table;
342 <                int len = tab.length;
343 <                for (int i = 0 ; i < len; i++) {
344 <                    for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
345 <                        V v = e.value;
346 <                        if (v == null) // recheck
347 <                            v = readValueUnderLock(e);
348 <                        if (value.equals(v))
349 <                            return true;
342 >                int index = (tab.length - 1) & hash;
343 >                HashEntry<K,V> first = entryAt(tab, index);
344 >                for (HashEntry<K,V> e = first;;) {
345 >                    if (e != null) {
346 >                        K k;
347 >                        if ((k = e.key) == key ||
348 >                            (e.hash == hash && key.equals(k))) {
349 >                            oldValue = e.value;
350 >                            if (!onlyIfAbsent) {
351 >                                e.value = value;
352 >                                ++modCount;
353 >                            }
354 >                            break;
355 >                        }
356 >                        e = e.next;
357 >                    }
358 >                    else {
359 >                        if (node != null)
360 >                            node.setNext(first);
361 >                        else
362 >                            node = new HashEntry<K,V>(hash, key, value, first);
363 >                        int c = count + 1;
364 >                        if (c > threshold && tab.length < MAXIMUM_CAPACITY)
365 >                            rehash(node);
366 >                        else
367 >                            setEntryAt(tab, index, node);
368 >                        ++modCount;
369 >                        count = c;
370 >                        oldValue = null;
371 >                        break;
372                      }
373                  }
374            }
375            return false;
376        }
377
378        boolean replace(K key, int hash, V oldValue, V newValue) {
379            lock();
380            try {
381                HashEntry<K,V> e = getFirst(hash);
382                while (e != null && (e.hash != hash || !key.equals(e.key)))
383                    e = e.next;
384
385                boolean replaced = false;
386                if (e != null && oldValue.equals(e.value)) {
387                    replaced = true;
388                    e.value = newValue;
389                }
390                return replaced;
391            } finally {
392                unlock();
393            }
394        }
395
396        V replace(K key, int hash, V newValue) {
397            lock();
398            try {
399                HashEntry<K,V> e = getFirst(hash);
400                while (e != null && (e.hash != hash || !key.equals(e.key)))
401                    e = e.next;
402
403                V oldValue = null;
404                if (e != null) {
405                    oldValue = e.value;
406                    e.value = newValue;
407                }
408                return oldValue;
409            } finally {
410                unlock();
411            }
412        }
413
414
415        V put(K key, int hash, V value, boolean onlyIfAbsent) {
416            lock();
417            try {
418                int c = count;
419                if (c++ > threshold) // ensure capacity
420                    rehash();
421                HashEntry<K,V>[] tab = table;
422                int index = hash & (tab.length - 1);
423                HashEntry<K,V> first = tab[index];
424                HashEntry<K,V> e = first;
425                while (e != null && (e.hash != hash || !key.equals(e.key)))
426                    e = e.next;
427
428                V oldValue;
429                if (e != null) {
430                    oldValue = e.value;
431                    if (!onlyIfAbsent)
432                        e.value = value;
433                }
434                else {
435                    oldValue = null;
436                    ++modCount;
437                    tab[index] = new HashEntry<K,V>(key, hash, first, value);
438                    count = c; // write-volatile
439                }
440                return oldValue;
374              } finally {
375                  unlock();
376              }
377 +            return oldValue;
378          }
379  
380 <        void rehash() {
381 <            HashEntry<K,V>[] oldTable = table;
382 <            int oldCapacity = oldTable.length;
383 <            if (oldCapacity >= MAXIMUM_CAPACITY)
384 <                return;
385 <
380 >        /**
381 >         * Doubles size of table and repacks entries, also adding the
382 >         * given node to new table
383 >         */
384 >        @SuppressWarnings("unchecked")
385 >        private void rehash(HashEntry<K,V> node) {
386              /*
387 <             * Reclassify nodes in each list to new Map.  Because we are
388 <             * using power-of-two expansion, the elements from each bin
389 <             * must either stay at same index, or move with a power of two
390 <             * offset. We eliminate unnecessary node creation by catching
391 <             * cases where old nodes can be reused because their next
392 <             * fields won't change. Statistically, at the default
393 <             * threshold, only about one-sixth of them need cloning when
394 <             * a table doubles. The nodes they replace will be garbage
395 <             * collectable as soon as they are no longer referenced by any
396 <             * reader thread that may be in the midst of traversing table
397 <             * right now.
387 >             * Reclassify nodes in each list to new table.  Because we
388 >             * are using power-of-two expansion, the elements from
389 >             * each bin must either stay at same index, or move with a
390 >             * power of two offset. We eliminate unnecessary node
391 >             * creation by catching cases where old nodes can be
392 >             * reused because their next fields won't change.
393 >             * Statistically, at the default threshold, only about
394 >             * one-sixth of them need cloning when a table
395 >             * doubles. The nodes they replace will be garbage
396 >             * collectable as soon as they are no longer referenced by
397 >             * any reader thread that may be in the midst of
398 >             * concurrently traversing table. Entry accesses use plain
399 >             * array indexing because they are followed by volatile
400 >             * table write.
401               */
402 <
403 <            HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
404 <            threshold = (int)(newTable.length * loadFactor);
405 <            int sizeMask = newTable.length - 1;
402 >            HashEntry<K,V>[] oldTable = table;
403 >            int oldCapacity = oldTable.length;
404 >            int newCapacity = oldCapacity << 1;
405 >            threshold = (int)(newCapacity * loadFactor);
406 >            HashEntry<K,V>[] newTable =
407 >                (HashEntry<K,V>[]) new HashEntry[newCapacity];
408 >            int sizeMask = newCapacity - 1;
409              for (int i = 0; i < oldCapacity ; i++) {
470                // We need to guarantee that any existing reads of old Map can
471                //  proceed. So we cannot yet null out each bin.
410                  HashEntry<K,V> e = oldTable[i];
473
411                  if (e != null) {
412                      HashEntry<K,V> next = e.next;
413                      int idx = e.hash & sizeMask;
414 <
478 <                    //  Single node on list
479 <                    if (next == null)
414 >                    if (next == null)   //  Single node on list
415                          newTable[idx] = e;
416 <
482 <                    else {
483 <                        // Reuse trailing consecutive sequence at same slot
416 >                    else { // Reuse consecutive sequence at same slot
417                          HashEntry<K,V> lastRun = e;
418                          int lastIdx = idx;
419                          for (HashEntry<K,V> last = next;
# Line 493 | Line 426 | public class ConcurrentHashMap<K, V> ext
426                              }
427                          }
428                          newTable[lastIdx] = lastRun;
429 <
497 <                        // Clone all remaining nodes
429 >                        // Clone remaining nodes
430                          for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
431 <                            int k = p.hash & sizeMask;
431 >                            V v = p.value;
432 >                            int h = p.hash;
433 >                            int k = h & sizeMask;
434                              HashEntry<K,V> n = newTable[k];
435 <                            newTable[k] = new HashEntry<K,V>(p.key, p.hash,
502 <                                                             n, p.value);
435 >                            newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
436                          }
437                      }
438                  }
439              }
440 +            int nodeIndex = node.hash & sizeMask; // add the new node
441 +            node.setNext(newTable[nodeIndex]);
442 +            newTable[nodeIndex] = node;
443              table = newTable;
444          }
445  
446          /**
447 +         * Scans for a node containing given key while trying to
448 +         * acquire lock, creating and returning one if not found. Upon
449 +         * return, guarantees that lock is held. Unlike in most
450 +         * methods, calls to method equals are not screened: Since
451 +         * traversal speed doesn't matter, we might as well help warm
452 +         * up the associated code and accesses as well.
453 +         *
454 +         * @return a new node if key not found, else null
455 +         */
456 +        private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
457 +            HashEntry<K,V> first = entryForHash(this, hash);
458 +            HashEntry<K,V> e = first;
459 +            HashEntry<K,V> node = null;
460 +            int retries = -1; // negative while locating node
461 +            while (!tryLock()) {
462 +                HashEntry<K,V> f; // to recheck first below
463 +                if (retries < 0) {
464 +                    if (e == null) {
465 +                        if (node == null) // speculatively create node
466 +                            node = new HashEntry<K,V>(hash, key, value, null);
467 +                        retries = 0;
468 +                    }
469 +                    else if (key.equals(e.key))
470 +                        retries = 0;
471 +                    else
472 +                        e = e.next;
473 +                }
474 +                else if (++retries > MAX_SCAN_RETRIES) {
475 +                    lock();
476 +                    break;
477 +                }
478 +                else if ((retries & 1) == 0 &&
479 +                         (f = entryForHash(this, hash)) != first) {
480 +                    e = first = f; // re-traverse if entry changed
481 +                    retries = -1;
482 +                }
483 +            }
484 +            return node;
485 +        }
486 +
487 +        /**
488 +         * Scans for a node containing the given key while trying to
489 +         * acquire lock for a remove or replace operation. Upon
490 +         * return, guarantees that lock is held.  Note that we must
491 +         * lock even if the key is not found, to ensure sequential
492 +         * consistency of updates.
493 +         */
494 +        private void scanAndLock(Object key, int hash) {
495 +            // similar to but simpler than scanAndLockForPut
496 +            HashEntry<K,V> first = entryForHash(this, hash);
497 +            HashEntry<K,V> e = first;
498 +            int retries = -1;
499 +            while (!tryLock()) {
500 +                HashEntry<K,V> f;
501 +                if (retries < 0) {
502 +                    if (e == null || key.equals(e.key))
503 +                        retries = 0;
504 +                    else
505 +                        e = e.next;
506 +                }
507 +                else if (++retries > MAX_SCAN_RETRIES) {
508 +                    lock();
509 +                    break;
510 +                }
511 +                else if ((retries & 1) == 0 &&
512 +                         (f = entryForHash(this, hash)) != first) {
513 +                    e = first = f;
514 +                    retries = -1;
515 +                }
516 +            }
517 +        }
518 +
519 +        /**
520           * Remove; match on key only if value null, else match both.
521           */
522 <        V remove(Object key, int hash, Object value) {
523 <            lock();
522 >        final V remove(Object key, int hash, Object value) {
523 >            if (!tryLock())
524 >                scanAndLock(key, hash);
525 >            V oldValue = null;
526              try {
516                int c = count - 1;
527                  HashEntry<K,V>[] tab = table;
528 <                int index = hash & (tab.length - 1);
529 <                HashEntry<K,V> first = tab[index];
530 <                HashEntry<K,V> e = first;
531 <                while (e != null && (e.hash != hash || !key.equals(e.key)))
532 <                    e = e.next;
528 >                int index = (tab.length - 1) & hash;
529 >                HashEntry<K,V> e = entryAt(tab, index);
530 >                HashEntry<K,V> pred = null;
531 >                while (e != null) {
532 >                    K k;
533 >                    HashEntry<K,V> next = e.next;
534 >                    if ((k = e.key) == key ||
535 >                        (e.hash == hash && key.equals(k))) {
536 >                        V v = e.value;
537 >                        if (value == null || value == v || value.equals(v)) {
538 >                            if (pred == null)
539 >                                setEntryAt(tab, index, next);
540 >                            else
541 >                                pred.setNext(next);
542 >                            ++modCount;
543 >                            --count;
544 >                            oldValue = v;
545 >                        }
546 >                        break;
547 >                    }
548 >                    pred = e;
549 >                    e = next;
550 >                }
551 >            } finally {
552 >                unlock();
553 >            }
554 >            return oldValue;
555 >        }
556  
557 <                V oldValue = null;
558 <                if (e != null) {
559 <                    V v = e.value;
560 <                    if (value == null || value.equals(v)) {
561 <                        oldValue = v;
562 <                        // All entries following removed node can stay
563 <                        // in list, but all preceding ones need to be
564 <                        // cloned.
557 >        final boolean replace(K key, int hash, V oldValue, V newValue) {
558 >            if (!tryLock())
559 >                scanAndLock(key, hash);
560 >            boolean replaced = false;
561 >            try {
562 >                HashEntry<K,V> e;
563 >                for (e = entryForHash(this, hash); e != null; e = e.next) {
564 >                    K k;
565 >                    if ((k = e.key) == key ||
566 >                        (e.hash == hash && key.equals(k))) {
567 >                        if (oldValue.equals(e.value)) {
568 >                            e.value = newValue;
569 >                            ++modCount;
570 >                            replaced = true;
571 >                        }
572 >                        break;
573 >                    }
574 >                }
575 >            } finally {
576 >                unlock();
577 >            }
578 >            return replaced;
579 >        }
580 >
581 >        final V replace(K key, int hash, V value) {
582 >            if (!tryLock())
583 >                scanAndLock(key, hash);
584 >            V oldValue = null;
585 >            try {
586 >                HashEntry<K,V> e;
587 >                for (e = entryForHash(this, hash); e != null; e = e.next) {
588 >                    K k;
589 >                    if ((k = e.key) == key ||
590 >                        (e.hash == hash && key.equals(k))) {
591 >                        oldValue = e.value;
592 >                        e.value = value;
593                          ++modCount;
594 <                        HashEntry<K,V> newFirst = e.next;
534 <                        for (HashEntry<K,V> p = first; p != e; p = p.next)
535 <                            newFirst = new HashEntry<K,V>(p.key, p.hash,
536 <                                                          newFirst, p.value);
537 <                        tab[index] = newFirst;
538 <                        count = c; // write-volatile
594 >                        break;
595                      }
596                  }
541                return oldValue;
597              } finally {
598                  unlock();
599              }
600 +            return oldValue;
601          }
602  
603 <        void clear() {
604 <            if (count != 0) {
605 <                lock();
606 <                try {
607 <                    HashEntry<K,V>[] tab = table;
608 <                    for (int i = 0; i < tab.length ; i++)
609 <                        tab[i] = null;
610 <                    ++modCount;
611 <                    count = 0; // write-volatile
612 <                } finally {
613 <                    unlock();
603 >        final void clear() {
604 >            lock();
605 >            try {
606 >                HashEntry<K,V>[] tab = table;
607 >                for (int i = 0; i < tab.length ; i++)
608 >                    setEntryAt(tab, i, null);
609 >                ++modCount;
610 >                count = 0;
611 >            } finally {
612 >                unlock();
613 >            }
614 >        }
615 >    }
616 >
617 >    // Accessing segments
618 >
619 >    /**
620 >     * Gets the jth element of given segment array (if nonnull) with
621 >     * volatile element access semantics via Unsafe. (The null check
622 >     * can trigger harmlessly only during deserialization.) Note:
623 >     * because each element of segments array is set only once (using
624 >     * fully ordered writes), some performance-sensitive methods rely
625 >     * on this method only as a recheck upon null reads.
626 >     */
627 >    @SuppressWarnings("unchecked")
628 >    static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
629 >        long u = (j << SSHIFT) + SBASE;
630 >        return ss == null ? null :
631 >            (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
632 >    }
633 >
634 >    /**
635 >     * Returns the segment for the given index, creating it and
636 >     * recording in segment table (via CAS) if not already present.
637 >     *
638 >     * @param k the index
639 >     * @return the segment
640 >     */
641 >    @SuppressWarnings("unchecked")
642 >    private Segment<K,V> ensureSegment(int k) {
643 >        final Segment<K,V>[] ss = this.segments;
644 >        long u = (k << SSHIFT) + SBASE; // raw offset
645 >        Segment<K,V> seg;
646 >        if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
647 >            Segment<K,V> proto = ss[0]; // use segment 0 as prototype
648 >            int cap = proto.table.length;
649 >            float lf = proto.loadFactor;
650 >            int threshold = (int)(cap * lf);
651 >            HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
652 >            if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
653 >                == null) { // recheck
654 >                Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
655 >                while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
656 >                       == null) {
657 >                    if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
658 >                        break;
659                  }
660              }
661          }
662 +        return seg;
663      }
664  
665 +    // Hash-based segment and entry accesses
666  
667 +    /**
668 +     * Gets the segment for the given hash code.
669 +     */
670 +    @SuppressWarnings("unchecked")
671 +    private Segment<K,V> segmentForHash(int h) {
672 +        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
673 +        return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u);
674 +    }
675 +
676 +    /**
677 +     * Gets the table entry for the given segment and hash code.
678 +     */
679 +    @SuppressWarnings("unchecked")
680 +    static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
681 +        HashEntry<K,V>[] tab;
682 +        return (seg == null || (tab = seg.table) == null) ? null :
683 +            (HashEntry<K,V>) UNSAFE.getObjectVolatile
684 +            (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
685 +    }
686  
687      /* ---------------- Public operations -------------- */
688  
# Line 580 | Line 702 | public class ConcurrentHashMap<K, V> ext
702       * negative or the load factor or concurrencyLevel are
703       * nonpositive.
704       */
705 +    @SuppressWarnings("unchecked")
706      public ConcurrentHashMap(int initialCapacity,
707                               float loadFactor, int concurrencyLevel) {
708          if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
709              throw new IllegalArgumentException();
587
710          if (concurrencyLevel > MAX_SEGMENTS)
711              concurrencyLevel = MAX_SEGMENTS;
590
712          // Find power-of-two sizes best matching arguments
713          int sshift = 0;
714          int ssize = 1;
# Line 595 | Line 716 | public class ConcurrentHashMap<K, V> ext
716              ++sshift;
717              ssize <<= 1;
718          }
719 <        segmentShift = 32 - sshift;
720 <        segmentMask = ssize - 1;
600 <        this.segments = Segment.newArray(ssize);
601 <
719 >        this.segmentShift = 32 - sshift;
720 >        this.segmentMask = ssize - 1;
721          if (initialCapacity > MAXIMUM_CAPACITY)
722              initialCapacity = MAXIMUM_CAPACITY;
723          int c = initialCapacity / ssize;
724          if (c * ssize < initialCapacity)
725              ++c;
726 <        int cap = 1;
726 >        int cap = MIN_SEGMENT_TABLE_CAPACITY;
727          while (cap < c)
728              cap <<= 1;
729 <
730 <        for (int i = 0; i < this.segments.length; ++i)
731 <            this.segments[i] = new Segment<K,V>(cap, loadFactor);
729 >        // create segments and segments[0]
730 >        Segment<K,V> s0 =
731 >            new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
732 >                             (HashEntry<K,V>[])new HashEntry[cap]);
733 >        Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
734 >        UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
735 >        this.segments = ss;
736      }
737  
738      /**
# Line 672 | Line 795 | public class ConcurrentHashMap<K, V> ext
795       * @return <tt>true</tt> if this map contains no key-value mappings
796       */
797      public boolean isEmpty() {
675        final Segment<K,V>[] segments = this.segments;
798          /*
799 <         * We keep track of per-segment modCounts to avoid ABA
800 <         * problems in which an element in one segment was added and
801 <         * in another removed during traversal, in which case the
802 <         * table was never actually empty at any point. Note the
803 <         * similar use of modCounts in the size() and containsValue()
804 <         * methods, which are the only other methods also susceptible
805 <         * to ABA problems.
799 >         * Sum per-segment modCounts to avoid mis-reporting when
800 >         * elements are concurrently added and removed in one segment
801 >         * while checking another, in which case the table was never
802 >         * actually empty at any point. (The sum ensures accuracy up
803 >         * through at least 1<<31 per-segment modifications before
804 >         * recheck.)  Methods size() and containsValue() use similar
805 >         * constructions for stability checks.
806           */
807 <        int[] mc = new int[segments.length];
808 <        int mcsum = 0;
809 <        for (int i = 0; i < segments.length; ++i) {
810 <            if (segments[i].count != 0)
811 <                return false;
812 <            else
691 <                mcsum += mc[i] = segments[i].modCount;
692 <        }
693 <        // If mcsum happens to be zero, then we know we got a snapshot
694 <        // before any modifications at all were made.  This is
695 <        // probably common enough to bother tracking.
696 <        if (mcsum != 0) {
697 <            for (int i = 0; i < segments.length; ++i) {
698 <                if (segments[i].count != 0 ||
699 <                    mc[i] != segments[i].modCount)
807 >        long sum = 0L;
808 >        final Segment<K,V>[] segments = this.segments;
809 >        for (int j = 0; j < segments.length; ++j) {
810 >            Segment<K,V> seg = segmentAt(segments, j);
811 >            if (seg != null) {
812 >                if (seg.count != 0)
813                      return false;
814 +                sum += seg.modCount;
815              }
816          }
817 +        if (sum != 0L) { // recheck unless no modifications
818 +            for (int j = 0; j < segments.length; ++j) {
819 +                Segment<K,V> seg = segmentAt(segments, j);
820 +                if (seg != null) {
821 +                    if (seg.count != 0)
822 +                        return false;
823 +                    sum -= seg.modCount;
824 +                }
825 +            }
826 +            if (sum != 0L)
827 +                return false;
828 +        }
829          return true;
830      }
831  
# Line 711 | Line 837 | public class ConcurrentHashMap<K, V> ext
837       * @return the number of key-value mappings in this map
838       */
839      public int size() {
714        final Segment<K,V>[] segments = this.segments;
715        long sum = 0;
716        long check = 0;
717        int[] mc = new int[segments.length];
840          // Try a few times to get accurate count. On failure due to
841          // continuous async changes in table, resort to locking.
842 <        for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
843 <            check = 0;
844 <            sum = 0;
845 <            int mcsum = 0;
846 <            for (int i = 0; i < segments.length; ++i) {
847 <                sum += segments[i].count;
848 <                mcsum += mc[i] = segments[i].modCount;
849 <            }
850 <            if (mcsum != 0) {
851 <                for (int i = 0; i < segments.length; ++i) {
852 <                    check += segments[i].count;
853 <                    if (mc[i] != segments[i].modCount) {
854 <                        check = -1; // force retry
855 <                        break;
842 >        final Segment<K,V>[] segments = this.segments;
843 >        int size;
844 >        boolean overflow; // true if size overflows 32 bits
845 >        long sum;         // sum of modCounts
846 >        long last = 0L;   // previous sum
847 >        int retries = -1; // first iteration isn't retry
848 >        try {
849 >            for (;;) {
850 >                if (retries++ == RETRIES_BEFORE_LOCK) {
851 >                    for (int j = 0; j < segments.length; ++j)
852 >                        ensureSegment(j).lock(); // force creation
853 >                }
854 >                sum = 0L;
855 >                size = 0;
856 >                overflow = false;
857 >                for (int j = 0; j < segments.length; ++j) {
858 >                    Segment<K,V> seg = segmentAt(segments, j);
859 >                    if (seg != null) {
860 >                        sum += seg.modCount;
861 >                        int c = seg.count;
862 >                        if (c < 0 || (size += c) < 0)
863 >                            overflow = true;
864                      }
865                  }
866 +                if (sum == last)
867 +                    break;
868 +                last = sum;
869 +            }
870 +        } finally {
871 +            if (retries > RETRIES_BEFORE_LOCK) {
872 +                for (int j = 0; j < segments.length; ++j)
873 +                    segmentAt(segments, j).unlock();
874              }
737            if (check == sum)
738                break;
875          }
876 <        if (check != sum) { // Resort to locking all segments
741 <            sum = 0;
742 <            for (int i = 0; i < segments.length; ++i)
743 <                segments[i].lock();
744 <            for (int i = 0; i < segments.length; ++i)
745 <                sum += segments[i].count;
746 <            for (int i = 0; i < segments.length; ++i)
747 <                segments[i].unlock();
748 <        }
749 <        if (sum > Integer.MAX_VALUE)
750 <            return Integer.MAX_VALUE;
751 <        else
752 <            return (int)sum;
876 >        return overflow ? Integer.MAX_VALUE : size;
877      }
878  
879      /**
# Line 764 | Line 888 | public class ConcurrentHashMap<K, V> ext
888       * @throws NullPointerException if the specified key is null
889       */
890      public V get(Object key) {
891 <        int hash = hash(key.hashCode());
892 <        return segmentFor(hash).get(key, hash);
891 >        Segment<K,V> s; // manually integrate access methods to reduce overhead
892 >        HashEntry<K,V>[] tab;
893 >        int h = hash(key.hashCode());
894 >        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
895 >        if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
896 >            (tab = s.table) != null) {
897 >            for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
898 >                     (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
899 >                 e != null; e = e.next) {
900 >                K k;
901 >                if ((k = e.key) == key || (e.hash == h && key.equals(k)))
902 >                    return e.value;
903 >            }
904 >        }
905 >        return null;
906      }
907  
908      /**
# Line 777 | Line 914 | public class ConcurrentHashMap<K, V> ext
914       *         <tt>equals</tt> method; <tt>false</tt> otherwise.
915       * @throws NullPointerException if the specified key is null
916       */
917 +    @SuppressWarnings("unchecked")
918      public boolean containsKey(Object key) {
919 <        int hash = hash(key.hashCode());
920 <        return segmentFor(hash).containsKey(key, hash);
919 >        Segment<K,V> s; // same as get() except no need for volatile value read
920 >        HashEntry<K,V>[] tab;
921 >        int h = hash(key.hashCode());
922 >        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
923 >        if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
924 >            (tab = s.table) != null) {
925 >            for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
926 >                     (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
927 >                 e != null; e = e.next) {
928 >                K k;
929 >                if ((k = e.key) == key || (e.hash == h && key.equals(k)))
930 >                    return true;
931 >            }
932 >        }
933 >        return false;
934      }
935  
936      /**
# Line 794 | Line 945 | public class ConcurrentHashMap<K, V> ext
945       * @throws NullPointerException if the specified value is null
946       */
947      public boolean containsValue(Object value) {
948 +        // Same idea as size()
949          if (value == null)
950              throw new NullPointerException();
799
800        // See explanation of modCount use above
801
951          final Segment<K,V>[] segments = this.segments;
803        int[] mc = new int[segments.length];
804
805        // Try a few times without locking
806        for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
807            int sum = 0;
808            int mcsum = 0;
809            for (int i = 0; i < segments.length; ++i) {
810                int c = segments[i].count;
811                mcsum += mc[i] = segments[i].modCount;
812                if (segments[i].containsValue(value))
813                    return true;
814            }
815            boolean cleanSweep = true;
816            if (mcsum != 0) {
817                for (int i = 0; i < segments.length; ++i) {
818                    int c = segments[i].count;
819                    if (mc[i] != segments[i].modCount) {
820                        cleanSweep = false;
821                        break;
822                    }
823                }
824            }
825            if (cleanSweep)
826                return false;
827        }
828        // Resort to locking all segments
829        for (int i = 0; i < segments.length; ++i)
830            segments[i].lock();
952          boolean found = false;
953 +        long last = 0L;   // previous sum
954 +        int retries = -1;
955          try {
956 <            for (int i = 0; i < segments.length; ++i) {
957 <                if (segments[i].containsValue(value)) {
958 <                    found = true;
959 <                    break;
956 >            outer: for (;;) {
957 >                if (retries++ == RETRIES_BEFORE_LOCK) {
958 >                    for (int j = 0; j < segments.length; ++j)
959 >                        ensureSegment(j).lock(); // force creation
960                  }
961 +                long sum = 0L;
962 +                for (int j = 0; j < segments.length; ++j) {
963 +                    HashEntry<K,V>[] tab;
964 +                    Segment<K,V> seg = segmentAt(segments, j);
965 +                    if (seg != null && (tab = seg.table) != null) {
966 +                        for (int i = 0 ; i < tab.length; i++) {
967 +                            HashEntry<K,V> e;
968 +                            for (e = entryAt(tab, i); e != null; e = e.next) {
969 +                                V v = e.value;
970 +                                if (v != null && value.equals(v)) {
971 +                                    found = true;
972 +                                    break outer;
973 +                                }
974 +                            }
975 +                        }
976 +                        sum += seg.modCount;
977 +                    }
978 +                }
979 +                if (retries > 0 && sum == last)
980 +                    break;
981 +                last = sum;
982              }
983          } finally {
984 <            for (int i = 0; i < segments.length; ++i)
985 <                segments[i].unlock();
984 >            if (retries > RETRIES_BEFORE_LOCK) {
985 >                for (int j = 0; j < segments.length; ++j)
986 >                    segmentAt(segments, j).unlock();
987 >            }
988          }
989          return found;
990      }
# Line 850 | Line 996 | public class ConcurrentHashMap<K, V> ext
996       * full compatibility with class {@link java.util.Hashtable},
997       * which supported this method prior to introduction of the
998       * Java Collections framework.
999 <
999 >     *
1000       * @param  value a value to search for
1001       * @return <tt>true</tt> if and only if some key maps to the
1002       *         <tt>value</tt> argument in this table as
# Line 875 | Line 1021 | public class ConcurrentHashMap<K, V> ext
1021       *         <tt>null</tt> if there was no mapping for <tt>key</tt>
1022       * @throws NullPointerException if the specified key or value is null
1023       */
1024 +    @SuppressWarnings("unchecked")
1025      public V put(K key, V value) {
1026 +        Segment<K,V> s;
1027          if (value == null)
1028              throw new NullPointerException();
1029          int hash = hash(key.hashCode());
1030 <        return segmentFor(hash).put(key, hash, value, false);
1030 >        int j = (hash >>> segmentShift) & segmentMask;
1031 >        if ((s = (Segment<K,V>)UNSAFE.getObject          // nonvolatile; recheck
1032 >             (segments, (j << SSHIFT) + SBASE)) == null) //  in ensureSegment
1033 >            s = ensureSegment(j);
1034 >        return s.put(key, hash, value, false);
1035      }
1036  
1037      /**
# Line 889 | Line 1041 | public class ConcurrentHashMap<K, V> ext
1041       *         or <tt>null</tt> if there was no mapping for the key
1042       * @throws NullPointerException if the specified key or value is null
1043       */
1044 +    @SuppressWarnings("unchecked")
1045      public V putIfAbsent(K key, V value) {
1046 +        Segment<K,V> s;
1047          if (value == null)
1048              throw new NullPointerException();
1049          int hash = hash(key.hashCode());
1050 <        return segmentFor(hash).put(key, hash, value, true);
1050 >        int j = (hash >>> segmentShift) & segmentMask;
1051 >        if ((s = (Segment<K,V>)UNSAFE.getObject
1052 >             (segments, (j << SSHIFT) + SBASE)) == null)
1053 >            s = ensureSegment(j);
1054 >        return s.put(key, hash, value, true);
1055      }
1056  
1057      /**
# Line 918 | Line 1076 | public class ConcurrentHashMap<K, V> ext
1076       * @throws NullPointerException if the specified key is null
1077       */
1078      public V remove(Object key) {
1079 <        int hash = hash(key.hashCode());
1080 <        return segmentFor(hash).remove(key, hash, null);
1079 >        int hash = hash(key.hashCode());
1080 >        Segment<K,V> s = segmentForHash(hash);
1081 >        return s == null ? null : s.remove(key, hash, null);
1082      }
1083  
1084      /**
# Line 929 | Line 1088 | public class ConcurrentHashMap<K, V> ext
1088       */
1089      public boolean remove(Object key, Object value) {
1090          int hash = hash(key.hashCode());
1091 <        if (value == null)
1092 <            return false;
1093 <        return segmentFor(hash).remove(key, hash, value) != null;
1091 >        Segment<K,V> s;
1092 >        return value != null && (s = segmentForHash(hash)) != null &&
1093 >            s.remove(key, hash, value) != null;
1094      }
1095  
1096      /**
# Line 940 | Line 1099 | public class ConcurrentHashMap<K, V> ext
1099       * @throws NullPointerException if any of the arguments are null
1100       */
1101      public boolean replace(K key, V oldValue, V newValue) {
1102 +        int hash = hash(key.hashCode());
1103          if (oldValue == null || newValue == null)
1104              throw new NullPointerException();
1105 <        int hash = hash(key.hashCode());
1106 <        return segmentFor(hash).replace(key, hash, oldValue, newValue);
1105 >        Segment<K,V> s = segmentForHash(hash);
1106 >        return s != null && s.replace(key, hash, oldValue, newValue);
1107      }
1108  
1109      /**
# Line 954 | Line 1114 | public class ConcurrentHashMap<K, V> ext
1114       * @throws NullPointerException if the specified key or value is null
1115       */
1116      public V replace(K key, V value) {
1117 +        int hash = hash(key.hashCode());
1118          if (value == null)
1119              throw new NullPointerException();
1120 <        int hash = hash(key.hashCode());
1121 <        return segmentFor(hash).replace(key, hash, value);
1120 >        Segment<K,V> s = segmentForHash(hash);
1121 >        return s == null ? null : s.replace(key, hash, value);
1122      }
1123  
1124      /**
1125       * Removes all of the mappings from this map.
1126       */
1127      public void clear() {
1128 <        for (int i = 0; i < segments.length; ++i)
1129 <            segments[i].clear();
1128 >        final Segment<K,V>[] segments = this.segments;
1129 >        for (int j = 0; j < segments.length; ++j) {
1130 >            Segment<K,V> s = segmentAt(segments, j);
1131 >            if (s != null)
1132 >                s.clear();
1133 >        }
1134      }
1135  
1136      /**
# Line 1035 | Line 1200 | public class ConcurrentHashMap<K, V> ext
1200       * Returns an enumeration of the keys in this table.
1201       *
1202       * @return an enumeration of the keys in this table
1203 <     * @see #keySet
1203 >     * @see #keySet()
1204       */
1205      public Enumeration<K> keys() {
1206          return new KeyIterator();
# Line 1045 | Line 1210 | public class ConcurrentHashMap<K, V> ext
1210       * Returns an enumeration of the values in this table.
1211       *
1212       * @return an enumeration of the values in this table
1213 <     * @see #values
1213 >     * @see #values()
1214       */
1215      public Enumeration<V> elements() {
1216          return new ValueIterator();
# Line 1066 | Line 1231 | public class ConcurrentHashMap<K, V> ext
1231              advance();
1232          }
1233  
1234 <        public boolean hasMoreElements() { return hasNext(); }
1235 <
1234 >        /**
1235 >         * Sets nextEntry to first node of next non-empty table
1236 >         * (in backwards order, to simplify checks).
1237 >         */
1238          final void advance() {
1239 <            if (nextEntry != null && (nextEntry = nextEntry.next) != null)
1240 <                return;
1241 <
1242 <            while (nextTableIndex >= 0) {
1243 <                if ( (nextEntry = currentTable[nextTableIndex--]) != null)
1244 <                    return;
1245 <            }
1246 <
1247 <            while (nextSegmentIndex >= 0) {
1248 <                Segment<K,V> seg = segments[nextSegmentIndex--];
1082 <                if (seg.count != 0) {
1083 <                    currentTable = seg.table;
1084 <                    for (int j = currentTable.length - 1; j >= 0; --j) {
1085 <                        if ( (nextEntry = currentTable[j]) != null) {
1086 <                            nextTableIndex = j - 1;
1087 <                            return;
1088 <                        }
1089 <                    }
1239 >            for (;;) {
1240 >                if (nextTableIndex >= 0) {
1241 >                    if ((nextEntry = entryAt(currentTable,
1242 >                                             nextTableIndex--)) != null)
1243 >                        break;
1244 >                }
1245 >                else if (nextSegmentIndex >= 0) {
1246 >                    Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
1247 >                    if (seg != null && (currentTable = seg.table) != null)
1248 >                        nextTableIndex = currentTable.length - 1;
1249                  }
1250 +                else
1251 +                    break;
1252              }
1253          }
1254  
1255 <        public boolean hasNext() { return nextEntry != null; }
1256 <
1257 <        HashEntry<K,V> nextEntry() {
1097 <            if (nextEntry == null)
1255 >        final HashEntry<K,V> nextEntry() {
1256 >            HashEntry<K,V> e = nextEntry;
1257 >            if (e == null)
1258                  throw new NoSuchElementException();
1259 <            lastReturned = nextEntry;
1260 <            advance();
1261 <            return lastReturned;
1259 >            lastReturned = e; // cannot assign until after null check
1260 >            if ((nextEntry = e.next) == null)
1261 >                advance();
1262 >            return e;
1263          }
1264  
1265 <        public void remove() {
1265 >        public final boolean hasNext() { return nextEntry != null; }
1266 >        public final boolean hasMoreElements() { return nextEntry != null; }
1267 >
1268 >        public final void remove() {
1269              if (lastReturned == null)
1270                  throw new IllegalStateException();
1271              ConcurrentHashMap.this.remove(lastReturned.key);
# Line 1110 | Line 1274 | public class ConcurrentHashMap<K, V> ext
1274      }
1275  
1276      final class KeyIterator
1277 <        extends HashIterator
1278 <        implements Iterator<K>, Enumeration<K>
1277 >        extends HashIterator
1278 >        implements Iterator<K>, Enumeration<K>
1279      {
1280 <        public K next()        { return super.nextEntry().key; }
1281 <        public K nextElement() { return super.nextEntry().key; }
1280 >        public final K next()        { return super.nextEntry().key; }
1281 >        public final K nextElement() { return super.nextEntry().key; }
1282      }
1283  
1284      final class ValueIterator
1285 <        extends HashIterator
1286 <        implements Iterator<V>, Enumeration<V>
1285 >        extends HashIterator
1286 >        implements Iterator<V>, Enumeration<V>
1287      {
1288 <        public V next()        { return super.nextEntry().value; }
1289 <        public V nextElement() { return super.nextEntry().value; }
1288 >        public final V next()        { return super.nextEntry().value; }
1289 >        public final V nextElement() { return super.nextEntry().value; }
1290      }
1291  
1292      /**
# Line 1130 | Line 1294 | public class ConcurrentHashMap<K, V> ext
1294       * setValue changes to the underlying map.
1295       */
1296      final class WriteThroughEntry
1297 <        extends AbstractMap.SimpleEntry<K,V>
1297 >        extends AbstractMap.SimpleEntry<K,V>
1298      {
1299          WriteThroughEntry(K k, V v) {
1300              super(k,v);
1301          }
1302  
1303          /**
1304 <         * Set our entry's value and write through to the map. The
1304 >         * Sets our entry's value and writes through to the map. The
1305           * value to return is somewhat arbitrary here. Since a
1306           * WriteThroughEntry does not necessarily track asynchronous
1307           * changes, the most recent "previous" value could be
# Line 1145 | Line 1309 | public class ConcurrentHashMap<K, V> ext
1309           * removed in which case the put will re-establish). We do not
1310           * and cannot guarantee more.
1311           */
1312 <        public V setValue(V value) {
1312 >        public V setValue(V value) {
1313              if (value == null) throw new NullPointerException();
1314              V v = super.setValue(value);
1315              ConcurrentHashMap.this.put(getKey(), value);
# Line 1154 | Line 1318 | public class ConcurrentHashMap<K, V> ext
1318      }
1319  
1320      final class EntryIterator
1321 <        extends HashIterator
1322 <        implements Iterator<Entry<K,V>>
1321 >        extends HashIterator
1322 >        implements Iterator<Entry<K,V>>
1323      {
1324          public Map.Entry<K,V> next() {
1325              HashEntry<K,V> e = super.nextEntry();
# Line 1170 | Line 1334 | public class ConcurrentHashMap<K, V> ext
1334          public int size() {
1335              return ConcurrentHashMap.this.size();
1336          }
1337 +        public boolean isEmpty() {
1338 +            return ConcurrentHashMap.this.isEmpty();
1339 +        }
1340          public boolean contains(Object o) {
1341              return ConcurrentHashMap.this.containsKey(o);
1342          }
# Line 1188 | Line 1355 | public class ConcurrentHashMap<K, V> ext
1355          public int size() {
1356              return ConcurrentHashMap.this.size();
1357          }
1358 +        public boolean isEmpty() {
1359 +            return ConcurrentHashMap.this.isEmpty();
1360 +        }
1361          public boolean contains(Object o) {
1362              return ConcurrentHashMap.this.containsValue(o);
1363          }
# Line 1216 | Line 1386 | public class ConcurrentHashMap<K, V> ext
1386          public int size() {
1387              return ConcurrentHashMap.this.size();
1388          }
1389 +        public boolean isEmpty() {
1390 +            return ConcurrentHashMap.this.isEmpty();
1391 +        }
1392          public void clear() {
1393              ConcurrentHashMap.this.clear();
1394          }
# Line 1224 | Line 1397 | public class ConcurrentHashMap<K, V> ext
1397      /* ---------------- Serialization Support -------------- */
1398  
1399      /**
1400 <     * Save the state of the <tt>ConcurrentHashMap</tt> instance to a
1401 <     * stream (i.e., serialize it).
1400 >     * Saves the state of the <tt>ConcurrentHashMap</tt> instance to a
1401 >     * stream (i.e., serializes it).
1402       * @param s the stream
1403       * @serialData
1404       * the key (Object) and value (Object)
1405       * for each key-value mapping, followed by a null pair.
1406       * The key-value mappings are emitted in no particular order.
1407       */
1408 <    private void writeObject(java.io.ObjectOutputStream s) throws IOException  {
1408 >    private void writeObject(java.io.ObjectOutputStream s) throws IOException {
1409 >        // force all segments for serialization compatibility
1410 >        for (int k = 0; k < segments.length; ++k)
1411 >            ensureSegment(k);
1412          s.defaultWriteObject();
1413  
1414 +        final Segment<K,V>[] segments = this.segments;
1415          for (int k = 0; k < segments.length; ++k) {
1416 <            Segment<K,V> seg = segments[k];
1416 >            Segment<K,V> seg = segmentAt(segments, k);
1417              seg.lock();
1418              try {
1419                  HashEntry<K,V>[] tab = seg.table;
1420                  for (int i = 0; i < tab.length; ++i) {
1421 <                    for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
1421 >                    HashEntry<K,V> e;
1422 >                    for (e = entryAt(tab, i); e != null; e = e.next) {
1423                          s.writeObject(e.key);
1424                          s.writeObject(e.value);
1425                      }
# Line 1255 | Line 1433 | public class ConcurrentHashMap<K, V> ext
1433      }
1434  
1435      /**
1436 <     * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a
1437 <     * stream (i.e., deserialize it).
1436 >     * Reconstitutes the <tt>ConcurrentHashMap</tt> instance from a
1437 >     * stream (i.e., deserializes it).
1438       * @param s the stream
1439       */
1440 +    @SuppressWarnings("unchecked")
1441      private void readObject(java.io.ObjectInputStream s)
1442 <        throws IOException, ClassNotFoundException  {
1442 >        throws IOException, ClassNotFoundException {
1443          s.defaultReadObject();
1444  
1445 <        // Initialize each segment to be minimally sized, and let grow.
1446 <        for (int i = 0; i < segments.length; ++i) {
1447 <            segments[i].setTable(new HashEntry[1]);
1445 >        // Re-initialize segments to be minimally sized, and let grow.
1446 >        int cap = MIN_SEGMENT_TABLE_CAPACITY;
1447 >        final Segment<K,V>[] segments = this.segments;
1448 >        for (int k = 0; k < segments.length; ++k) {
1449 >            Segment<K,V> seg = segments[k];
1450 >            if (seg != null) {
1451 >                seg.threshold = (int)(cap * seg.loadFactor);
1452 >                seg.table = (HashEntry<K,V>[]) new HashEntry[cap];
1453 >            }
1454          }
1455  
1456          // Read the keys and values, and put the mappings in the table
# Line 1277 | Line 1462 | public class ConcurrentHashMap<K, V> ext
1462              put(key, value);
1463          }
1464      }
1465 +
1466 +    // Unsafe mechanics
1467 +    private static final sun.misc.Unsafe UNSAFE;
1468 +    private static final long SBASE;
1469 +    private static final int SSHIFT;
1470 +    private static final long TBASE;
1471 +    private static final int TSHIFT;
1472 +
1473 +    static {
1474 +        int ss, ts;
1475 +        try {
1476 +            UNSAFE = sun.misc.Unsafe.getUnsafe();
1477 +            Class tc = HashEntry[].class;
1478 +            Class sc = Segment[].class;
1479 +            TBASE = UNSAFE.arrayBaseOffset(tc);
1480 +            SBASE = UNSAFE.arrayBaseOffset(sc);
1481 +            ts = UNSAFE.arrayIndexScale(tc);
1482 +            ss = UNSAFE.arrayIndexScale(sc);
1483 +        } catch (Exception e) {
1484 +            throw new Error(e);
1485 +        }
1486 +        if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
1487 +            throw new Error("data type scale not a power of two");
1488 +        SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
1489 +        TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
1490 +    }
1491 +
1492   }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines