ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
(Generate patch)

Comparing jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java (file contents):
Revision 1.98 by jsr166, Tue Mar 15 19:47:03 2011 UTC vs.
Revision 1.99 by dl, Tue Apr 12 22:52:07 2011 UTC

# Line 1 | Line 1
1   /*
2   * Written by Doug Lea with assistance from members of JCP JSR-166
3   * Expert Group and released to the public domain, as explained at
4 < * http://creativecommons.org/publicdomain/zero/1.0/
4 > * http://creativecommhons.org/publicdomain/zero/1.0/
5   */
6  
7   package java.util.concurrent;
# Line 76 | Line 76 | public class ConcurrentHashMap<K, V> ext
76  
77      /*
78       * The basic strategy is to subdivide the table among Segments,
79 <     * each of which itself is a concurrently readable hash table.
79 >     * each of which itself is a concurrently readable hash table.  To
80 >     * reduce footprint, all but one segments are constructed only
81 >     * when first needed (see ensureSegment). To maintain visibility
82 >     * in the presence of lazy construction, accesses to segments as
83 >     * well as elements of segment's table must use volatile access,
84 >     * which is done via Unsafe within methods segmentAt etc
85 >     * below. These provide the functionality of AtomicReferenceArrays
86 >     * but reduce the levels of indirection. Additionally,
87 >     * volatile-writes of table elements and entry "next" fields
88 >     * within locked operations use the cheaper "lazySet" forms of
89 >     * writes (via putOrderedObject) because these write are always
90 >     * followed by lock releases that maintain sequential consistency
91 >     * of table updates.
92 >     *
93 >     * Historical note: The previous version of this class relied
94 >     * heavily on "final" fields, which avoided some volatile reads at
95 >     * the expense of a large initial footprint.  Some remnants of
96 >     * that design (including forced construction of segment 0) exist
97 >     * to ensure serialization compatibility.
98       */
99  
100      /* ---------------- Constants -------------- */
# Line 108 | Line 126 | public class ConcurrentHashMap<K, V> ext
126      static final int MAXIMUM_CAPACITY = 1 << 30;
127  
128      /**
129 +     * The minimum capacity for per-segment tables.  Must be a power
130 +     * of two, at least two to avoid immediate resizing on next use
131 +     * after lazy construction.
132 +     */
133 +    static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
134 +
135 +    /**
136       * The maximum number of segments to allow; used to bound
137 <     * constructor arguments.
137 >     * constructor arguments. Must be power of two less than 1 << 24.
138       */
139      static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
140  
# Line 135 | Line 160 | public class ConcurrentHashMap<K, V> ext
160      final int segmentShift;
161  
162      /**
163 <     * The segments, each of which is a specialized hash table
163 >     * The segments, each of which is a specialized hash table.
164       */
165      final Segment<K,V>[] segments;
166  
# Line 143 | Line 168 | public class ConcurrentHashMap<K, V> ext
168      transient Set<Map.Entry<K,V>> entrySet;
169      transient Collection<V> values;
170  
171 <    /* ---------------- Small Utilities -------------- */
171 >    /**
172 >     * ConcurrentHashMap list entry. Note that this is never exported
173 >     * out as a user-visible Map.Entry.
174 >     */
175 >    static final class HashEntry<K,V> {
176 >        final int hash;
177 >        final K key;
178 >        volatile V value;
179 >        volatile HashEntry<K,V> next;
180 >
181 >        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
182 >            this.hash = hash;
183 >            this.key = key;
184 >            this.value = value;
185 >            this.next = next;
186 >        }
187 >
188 >        /**
189 >         * Sets next field with volatile write semantics.  (See above
190 >         * about use of putOrderedObject.)
191 >         */
192 >        final void setNext(HashEntry<K,V> n) {
193 >            UNSAFE.putOrderedObject(this, nextOffset, n);
194 >        }
195 >
196 >        // Unsafe mechanics
197 >        static final sun.misc.Unsafe UNSAFE;
198 >        static final long nextOffset;
199 >        static {
200 >            try {
201 >                UNSAFE = sun.misc.Unsafe.getUnsafe();
202 >                Class k = HashEntry.class;
203 >                nextOffset = UNSAFE.objectFieldOffset
204 >                    (k.getDeclaredField("next"));
205 >            } catch (Exception e) {
206 >                throw new Error(e);
207 >            }
208 >        }
209 >    }
210 >
211 >    /**
212 >     * Gets the ith element of given table (if nonnull) with volatile
213 >     * read semantics.
214 >     */
215 >    @SuppressWarnings("unchecked")
216 >    static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
217 >        return tab == null? null :
218 >            (HashEntry<K,V>) UNSAFE.getObjectVolatile
219 >            (tab, ((long)i << TSHIFT) + TBASE);
220 >    }
221 >
222 >    /**
223 >     * Sets the ith element of given table, with volatile write
224 >     * semantics. (See above about use of putOrderedObject.)
225 >     */
226 >    static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
227 >                                       HashEntry<K,V> e) {
228 >        UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
229 >    }
230  
231      /**
232       * Applies a supplemental hash function to a given hashCode, which
# Line 164 | Line 247 | public class ConcurrentHashMap<K, V> ext
247      }
248  
249      /**
167     * Returns the segment that should be used for key with given hash
168     * @param hash the hash code for the key
169     * @return the segment
170     */
171    final Segment<K,V> segmentFor(int hash) {
172        return segments[(hash >>> segmentShift) & segmentMask];
173    }
174
175    /* ---------------- Inner Classes -------------- */
176
177    /**
178     * ConcurrentHashMap list entry. Note that this is never exported
179     * out as a user-visible Map.Entry.
180     *
181     * Because the value field is volatile, not final, it is legal wrt
182     * the Java Memory Model for an unsynchronized reader to see null
183     * instead of initial value when read via a data race.  Although a
184     * reordering leading to this is not likely to ever actually
185     * occur, the Segment.readValueUnderLock method is used as a
186     * backup in case a null (pre-initialized) value is ever seen in
187     * an unsynchronized access method.
188     */
189    static final class HashEntry<K,V> {
190        final K key;
191        final int hash;
192        volatile V value;
193        final HashEntry<K,V> next;
194
195        HashEntry(K key, int hash, HashEntry<K,V> next, V value) {
196            this.key = key;
197            this.hash = hash;
198            this.next = next;
199            this.value = value;
200        }
201
202        @SuppressWarnings("unchecked")
203        static final <K,V> HashEntry<K,V>[] newArray(int i) {
204            return new HashEntry[i];
205        }
206    }
207
208    /**
250       * Segments are specialized versions of hash tables.  This
251       * subclasses from ReentrantLock opportunistically, just to
252       * simplify some locking and avoid separate construction.
253       */
254      static final class Segment<K,V> extends ReentrantLock implements Serializable {
255          /*
256 <         * Segments maintain a table of entry lists that are ALWAYS
257 <         * kept in a consistent state, so can be read without locking.
258 <         * Next fields of nodes are immutable (final).  All list
259 <         * additions are performed at the front of each bin. This
260 <         * makes it easy to check changes, and also fast to traverse.
261 <         * When nodes would otherwise be changed, new nodes are
221 <         * created to replace them. This works well for hash tables
222 <         * since the bin lists tend to be short. (The average length
223 <         * is less than two for the default load factor threshold.)
224 <         *
225 <         * Read operations can thus proceed without locking, but rely
226 <         * on selected uses of volatiles to ensure that completed
227 <         * write operations performed by other threads are
228 <         * noticed. For most purposes, the "count" field, tracking the
229 <         * number of elements, serves as that volatile variable
230 <         * ensuring visibility.  This is convenient because this field
231 <         * needs to be read in many read operations anyway:
232 <         *
233 <         *   - All (unsynchronized) read operations must first read the
234 <         *     "count" field, and should not look at table entries if
235 <         *     it is 0.
236 <         *
237 <         *   - All (synchronized) write operations should write to
238 <         *     the "count" field after structurally changing any bin.
239 <         *     The operations must not take any action that could even
240 <         *     momentarily cause a concurrent read operation to see
241 <         *     inconsistent data. This is made easier by the nature of
242 <         *     the read operations in Map. For example, no operation
243 <         *     can reveal that the table has grown but the threshold
244 <         *     has not yet been updated, so there are no atomicity
245 <         *     requirements for this with respect to reads.
256 >         * Segments maintain a table of entry lists that are always
257 >         * kept in a consistent state, so can be read (via volatile
258 >         * reads of segments and tables) without locking.  This
259 >         * requires replicating nodes when necessary during table
260 >         * resizing, so the old lists can be traversed by readers
261 >         * still using old version of table.
262           *
263 <         * As a guide, all critical volatile reads and writes to the
264 <         * count field are marked in code comments.
263 >         * This class defines only mutative methods requiring locking.
264 >         * Except as noted, the methods of this class perform the
265 >         * per-segment versions of ConcurrentHashMap methods.  (Other
266 >         * methods are integrated directly into ConcurrentHashMap
267 >         * methods.) These mutative methods use a form of controlled
268 >         * spinning on contention via methods scanAndLock and
269 >         * scanAndLockForPut. These intersperse tryLocks with
270 >         * traversals to locate nodes.  The main benefit is to absorb
271 >         * cache misses (which are very common for hash tables) while
272 >         * obtaining locks so that traversal is faster once
273 >         * acquired. We do not actually use the found nodes since they
274 >         * must be re-acquired under lock anyway to ensure sequential
275 >         * consistency of updates (and in any case may be undetectably
276 >         * stale), but they will normally be much faster to re-locate.
277 >         * Also, scanAndLockForPut speculatively creates a fresh node
278 >         * to use in put if no node is found.
279           */
280  
281          private static final long serialVersionUID = 2249069246763182397L;
282  
283          /**
284 <         * The number of elements in this segment's region.
284 >         * The maximum number of times to tryLock in a prescan before
285 >         * possibly blocking on acquire in preparation for a locked
286 >         * segment operation. On multiprocessors, using a bounded
287 >         * number of retries maintains cache acquired while locating
288 >         * nodes.
289           */
290 <        transient volatile int count;
290 >        static final int MAX_SCAN_RETRIES =
291 >            Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
292  
293          /**
294 <         * Number of updates that alter the size of the table. This is
295 <         * used during bulk-read methods to make sure they see a
296 <         * consistent snapshot: If modCounts change during a traversal
297 <         * of segments computing size or checking containsValue, then
298 <         * we might have an inconsistent view of state so (usually)
299 <         * must retry.
294 >         * The per-segment table. Elements are accessed via
295 >         * entryAt/setEntryAt providing volatile semantics.
296 >         */
297 >        transient volatile HashEntry<K,V>[] table;
298 >
299 >        /**
300 >         * The number of elements. Accessed only either within locks
301 >         * or among other volatile reads that maintain visibility.
302 >         */
303 >        transient int count;
304 >
305 >        /**
306 >         * The total number of insertions and removals in this
307 >         * segment.  Even though this may overflows 32 bits, it
308 >         * provides sufficient accuracy for stability checks in CHM
309 >         * isEmpty() and size() methods.  Accessed only either within
310 >         * locks or among other volatile reads that maintain
311 >         * visibility.
312           */
313          transient int modCount;
314  
# Line 273 | Line 320 | public class ConcurrentHashMap<K, V> ext
320          transient int threshold;
321  
322          /**
276         * The per-segment table.
277         */
278        transient volatile HashEntry<K,V>[] table;
279
280        /**
323           * The load factor for the hash table.  Even though this value
324           * is same for all segments, it is replicated to avoid needing
325           * links to outer object.
# Line 285 | Line 327 | public class ConcurrentHashMap<K, V> ext
327           */
328          final float loadFactor;
329  
330 <        Segment(int initialCapacity, float lf) {
331 <            loadFactor = lf;
332 <            setTable(HashEntry.<K,V>newArray(initialCapacity));
333 <        }
292 <
293 <        @SuppressWarnings("unchecked")
294 <        static final <K,V> Segment<K,V>[] newArray(int i) {
295 <            return new Segment[i];
330 >        Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
331 >            this.loadFactor = lf;
332 >            this.threshold = threshold;
333 >            this.table = tab;
334          }
335  
336 <        /**
337 <         * Sets table to new HashEntry array.
338 <         * Call only while holding lock or in constructor.
339 <         */
302 <        void setTable(HashEntry<K,V>[] newTable) {
303 <            threshold = (int)(newTable.length * loadFactor);
304 <            table = newTable;
305 <        }
306 <
307 <        /**
308 <         * Returns properly casted first entry of bin for given hash.
309 <         */
310 <        HashEntry<K,V> getFirst(int hash) {
311 <            HashEntry<K,V>[] tab = table;
312 <            return tab[hash & (tab.length - 1)];
313 <        }
314 <
315 <        /**
316 <         * Reads value field of an entry under lock. Called if value
317 <         * field ever appears to be null. This is possible only if a
318 <         * compiler happens to reorder a HashEntry initialization with
319 <         * its table assignment, which is legal under memory model
320 <         * but is not known to ever occur.
321 <         */
322 <        V readValueUnderLock(HashEntry<K,V> e) {
323 <            lock();
336 >        final V put(K key, int hash, V value, boolean onlyIfAbsent) {
337 >            HashEntry<K,V> node = tryLock() ? null :
338 >                scanAndLockForPut(key, hash, value);
339 >            V oldValue;
340              try {
325                return e.value;
326            } finally {
327                unlock();
328            }
329        }
330
331        /* Specialized implementations of map methods */
332
333        V get(Object key, int hash) {
334            if (count != 0) { // read-volatile
335                HashEntry<K,V> e = getFirst(hash);
336                while (e != null) {
337                    if (e.hash == hash && key.equals(e.key)) {
338                        V v = e.value;
339                        if (v != null)
340                            return v;
341                        return readValueUnderLock(e); // recheck
342                    }
343                    e = e.next;
344                }
345            }
346            return null;
347        }
348
349        boolean containsKey(Object key, int hash) {
350            if (count != 0) { // read-volatile
351                HashEntry<K,V> e = getFirst(hash);
352                while (e != null) {
353                    if (e.hash == hash && key.equals(e.key))
354                        return true;
355                    e = e.next;
356                }
357            }
358            return false;
359        }
360
361        boolean containsValue(Object value) {
362            if (count != 0) { // read-volatile
341                  HashEntry<K,V>[] tab = table;
342 <                int len = tab.length;
343 <                for (int i = 0 ; i < len; i++) {
344 <                    for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
345 <                        V v = e.value;
346 <                        if (v == null) // recheck
347 <                            v = readValueUnderLock(e);
348 <                        if (value.equals(v))
349 <                            return true;
342 >                int index = (tab.length - 1) & hash;
343 >                HashEntry<K,V> first = entryAt(tab, index);
344 >                for (HashEntry<K,V> e = first;;) {
345 >                    if (e != null) {
346 >                        K k;
347 >                        if ((k = e.key) == key ||
348 >                            (e.hash == hash && key.equals(k))) {
349 >                            oldValue = e.value;
350 >                            if (!onlyIfAbsent)
351 >                                e.value = value;
352 >                            break;
353 >                        }
354 >                        e = e.next;
355 >                    }
356 >                    else {
357 >                        if (node != null)
358 >                            node.setNext(first);
359 >                        else
360 >                            node = new HashEntry<K,V>(hash, key, value, first);
361 >                        int c = count + 1;
362 >                        if (c > threshold && first != null &&
363 >                            tab.length < MAXIMUM_CAPACITY)
364 >                            rehash(node);
365 >                        else
366 >                            setEntryAt(tab, index, node);
367 >                        ++modCount;
368 >                        count = c;
369 >                        oldValue = null;
370 >                        break;
371                      }
372                  }
374            }
375            return false;
376        }
377
378        boolean replace(K key, int hash, V oldValue, V newValue) {
379            lock();
380            try {
381                HashEntry<K,V> e = getFirst(hash);
382                while (e != null && (e.hash != hash || !key.equals(e.key)))
383                    e = e.next;
384
385                boolean replaced = false;
386                if (e != null && oldValue.equals(e.value)) {
387                    replaced = true;
388                    e.value = newValue;
389                }
390                return replaced;
391            } finally {
392                unlock();
393            }
394        }
395
396        V replace(K key, int hash, V newValue) {
397            lock();
398            try {
399                HashEntry<K,V> e = getFirst(hash);
400                while (e != null && (e.hash != hash || !key.equals(e.key)))
401                    e = e.next;
402
403                V oldValue = null;
404                if (e != null) {
405                    oldValue = e.value;
406                    e.value = newValue;
407                }
408                return oldValue;
409            } finally {
410                unlock();
411            }
412        }
413
414
415        V put(K key, int hash, V value, boolean onlyIfAbsent) {
416            lock();
417            try {
418                int c = count;
419                if (c++ > threshold) // ensure capacity
420                    rehash();
421                HashEntry<K,V>[] tab = table;
422                int index = hash & (tab.length - 1);
423                HashEntry<K,V> first = tab[index];
424                HashEntry<K,V> e = first;
425                while (e != null && (e.hash != hash || !key.equals(e.key)))
426                    e = e.next;
427
428                V oldValue;
429                if (e != null) {
430                    oldValue = e.value;
431                    if (!onlyIfAbsent)
432                        e.value = value;
433                }
434                else {
435                    oldValue = null;
436                    ++modCount;
437                    tab[index] = new HashEntry<K,V>(key, hash, first, value);
438                    count = c; // write-volatile
439                }
440                return oldValue;
373              } finally {
374                  unlock();
375              }
376 +            return oldValue;
377          }
378  
379 <        void rehash() {
380 <            HashEntry<K,V>[] oldTable = table;
381 <            int oldCapacity = oldTable.length;
382 <            if (oldCapacity >= MAXIMUM_CAPACITY)
383 <                return;
384 <
379 >        /**
380 >         * Doubles size of table and repacks entries, also adding the
381 >         * given node to new table
382 >         */
383 >        @SuppressWarnings("unchecked")
384 >        private void rehash(HashEntry<K,V> node) {
385              /*
386 <             * Reclassify nodes in each list to new Map.  Because we are
387 <             * using power-of-two expansion, the elements from each bin
388 <             * must either stay at same index, or move with a power of two
389 <             * offset. We eliminate unnecessary node creation by catching
390 <             * cases where old nodes can be reused because their next
391 <             * fields won't change. Statistically, at the default
392 <             * threshold, only about one-sixth of them need cloning when
393 <             * a table doubles. The nodes they replace will be garbage
394 <             * collectable as soon as they are no longer referenced by any
395 <             * reader thread that may be in the midst of traversing table
396 <             * right now.
386 >             * Reclassify nodes in each list to new table.  Because we
387 >             * are using power-of-two expansion, the elements from
388 >             * each bin must either stay at same index, or move with a
389 >             * power of two offset. We eliminate unnecessary node
390 >             * creation by catching cases where old nodes can be
391 >             * reused because their next fields won't change.
392 >             * Statistically, at the default threshold, only about
393 >             * one-sixth of them need cloning when a table
394 >             * doubles. The nodes they replace will be garbage
395 >             * collectable as soon as they are no longer referenced by
396 >             * any reader thread that may be in the midst of
397 >             * concurrently traversing table. Entry accesses use plain
398 >             * array indexing because they are followed by volatile
399 >             * table write.
400               */
401 <
402 <            HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
403 <            threshold = (int)(newTable.length * loadFactor);
404 <            int sizeMask = newTable.length - 1;
401 >            HashEntry<K,V>[] oldTable = table;
402 >            int oldCapacity = oldTable.length;
403 >            int newCapacity = oldCapacity << 1;
404 >            threshold = (int)(newCapacity * loadFactor);
405 >            HashEntry<K,V>[] newTable =
406 >                (HashEntry<K,V>[]) new HashEntry[newCapacity];
407 >            int sizeMask = newCapacity - 1;
408              for (int i = 0; i < oldCapacity ; i++) {
470                // We need to guarantee that any existing reads of old Map can
471                //  proceed. So we cannot yet null out each bin.
409                  HashEntry<K,V> e = oldTable[i];
473
410                  if (e != null) {
411                      HashEntry<K,V> next = e.next;
412                      int idx = e.hash & sizeMask;
413 <
478 <                    //  Single node on list
479 <                    if (next == null)
413 >                    if (next == null)   //  Single node on list
414                          newTable[idx] = e;
415 <
482 <                    else {
483 <                        // Reuse trailing consecutive sequence at same slot
415 >                    else { // Reuse consecutive sequence at same slot
416                          HashEntry<K,V> lastRun = e;
417                          int lastIdx = idx;
418                          for (HashEntry<K,V> last = next;
# Line 493 | Line 425 | public class ConcurrentHashMap<K, V> ext
425                              }
426                          }
427                          newTable[lastIdx] = lastRun;
428 <
497 <                        // Clone all remaining nodes
428 >                        // Clone remaining nodes
429                          for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
430 <                            int k = p.hash & sizeMask;
430 >                            V v = p.value;
431 >                            int h = p.hash;
432 >                            int k = h & sizeMask;
433                              HashEntry<K,V> n = newTable[k];
434 <                            newTable[k] = new HashEntry<K,V>(p.key, p.hash,
502 <                                                             n, p.value);
434 >                            newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
435                          }
436                      }
437                  }
438              }
439 +            int nodeIndex = node.hash & sizeMask; // add the new node
440 +            node.setNext(newTable[nodeIndex]);
441 +            newTable[nodeIndex] = node;
442              table = newTable;
443          }
444  
445          /**
446 +         * Scans for a node containing given key while trying to
447 +         * acquire lock, creating and returning one if not found. Upon
448 +         * return, guarantees that lock is held.
449 +         *
450 +         * @return a new node if key not found, else null
451 +         */
452 +        private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
453 +            HashEntry<K,V> first = entryForHash(this, hash);
454 +            HashEntry<K,V> e = first;
455 +            HashEntry<K,V> node = null;
456 +            int retries = -1; // negative while locating node
457 +            while (!tryLock()) {
458 +                HashEntry<K,V> f; // to recheck first below
459 +                if (retries < 0) {
460 +                    if (e == null) {
461 +                        if (node == null) // speculatively create node
462 +                            node = new HashEntry<K,V>(hash, key, value, null);
463 +                        retries = 0;
464 +                    }
465 +                    else if (key.equals(e.key))
466 +                        retries = 0;
467 +                    else
468 +                        e = e.next;
469 +                }
470 +                else if (++retries > MAX_SCAN_RETRIES) {
471 +                    lock();
472 +                    break;
473 +                }
474 +                else if ((retries & 1) == 0 &&
475 +                         (f = entryForHash(this, hash)) != first) {
476 +                    e = first = f; // re-traverse if entry changed
477 +                    retries = -1;
478 +                }
479 +            }
480 +            return node;
481 +        }
482 +
483 +        /**
484 +         * Scans for a node containing the given key while trying to
485 +         * acquire lock for a remove or replace operation. Upon
486 +         * return, guarantees that lock is held.  Note that we must
487 +         * lock even if the key is not found, to ensure sequential
488 +         * consistency of updates.
489 +         */
490 +        private void scanAndLock(Object key, int hash) {
491 +            // similar to but simpler than scanAndLockForPut
492 +            HashEntry<K,V> first = entryForHash(this, hash);
493 +            HashEntry<K,V> e = first;
494 +            int retries = -1;
495 +            while (!tryLock()) {
496 +                HashEntry<K,V> f;
497 +                if (retries < 0) {
498 +                    if (e == null || key.equals(e.key))
499 +                        retries = 0;
500 +                    else
501 +                        e = e.next;
502 +                }
503 +                else if (++retries > MAX_SCAN_RETRIES) {
504 +                    lock();
505 +                    break;
506 +                }
507 +                else if ((retries & 1) == 0 &&
508 +                         (f = entryForHash(this, hash)) != first) {
509 +                    e = first = f;
510 +                    retries = -1;
511 +                }
512 +            }
513 +        }
514 +
515 +        /**
516           * Remove; match on key only if value null, else match both.
517           */
518 <        V remove(Object key, int hash, Object value) {
519 <            lock();
518 >        final V remove(Object key, int hash, Object value) {
519 >            if (!tryLock())
520 >                scanAndLock(key, hash);
521 >            V oldValue = null;
522              try {
516                int c = count - 1;
523                  HashEntry<K,V>[] tab = table;
524 <                int index = hash & (tab.length - 1);
525 <                HashEntry<K,V> first = tab[index];
526 <                HashEntry<K,V> e = first;
527 <                while (e != null && (e.hash != hash || !key.equals(e.key)))
528 <                    e = e.next;
524 >                int index = (tab.length - 1) & hash;
525 >                HashEntry<K,V> e = entryAt(tab, index);
526 >                HashEntry<K,V> pred = null;
527 >                while (e != null) {
528 >                    K k;
529 >                    HashEntry<K,V> next = e.next;
530 >                    if ((k = e.key) == key ||
531 >                        (e.hash == hash && key.equals(k))) {
532 >                        V v = e.value;
533 >                        if (value == null || value == v || value.equals(v)) {
534 >                            if (pred == null)
535 >                                setEntryAt(tab, index, next);
536 >                            else
537 >                                pred.setNext(next);
538 >                            ++modCount;
539 >                            --count;
540 >                            oldValue = v;
541 >                        }
542 >                        break;
543 >                    }
544 >                    pred = e;
545 >                    e = next;
546 >                }
547 >            } finally {
548 >                unlock();
549 >            }
550 >            return oldValue;
551 >        }
552  
553 <                V oldValue = null;
554 <                if (e != null) {
555 <                    V v = e.value;
556 <                    if (value == null || value.equals(v)) {
557 <                        oldValue = v;
558 <                        // All entries following removed node can stay
559 <                        // in list, but all preceding ones need to be
560 <                        // cloned.
561 <                        ++modCount;
562 <                        HashEntry<K,V> newFirst = e.next;
563 <                        for (HashEntry<K,V> p = first; p != e; p = p.next)
564 <                            newFirst = new HashEntry<K,V>(p.key, p.hash,
565 <                                                          newFirst, p.value);
566 <                        tab[index] = newFirst;
567 <                        count = c; // write-volatile
553 >        final boolean replace(K key, int hash, V oldValue, V newValue) {
554 >            if (!tryLock())
555 >                scanAndLock(key, hash);
556 >            boolean replaced = false;
557 >            try {
558 >                HashEntry<K,V> e;
559 >                for (e = entryForHash(this, hash); e != null; e = e.next) {
560 >                    K k;
561 >                    if ((k = e.key) == key ||
562 >                        (e.hash == hash && key.equals(k))) {
563 >                        if (oldValue.equals(e.value)) {
564 >                            e.value = newValue;
565 >                            replaced = true;
566 >                        }
567 >                        break;
568                      }
569                  }
570 <                return oldValue;
570 >            } finally {
571 >                unlock();
572 >            }
573 >            return replaced;
574 >        }
575 >
576 >        final V replace(K key, int hash, V value) {
577 >            if (!tryLock())
578 >                scanAndLock(key, hash);
579 >            V oldValue = null;
580 >            try {
581 >                HashEntry<K,V> e;
582 >                for (e = entryForHash(this, hash); e != null; e = e.next) {
583 >                    K k;
584 >                    if ((k = e.key) == key ||
585 >                        (e.hash == hash && key.equals(k))) {
586 >                        oldValue = e.value;
587 >                        e.value = value;
588 >                        break;
589 >                    }
590 >                }
591 >            } finally {
592 >                unlock();
593 >            }
594 >            return oldValue;
595 >        }
596 >
597 >        final void clear() {
598 >            lock();
599 >            try {
600 >                HashEntry<K,V>[] tab = table;
601 >                for (int i = 0; i < tab.length ; i++)
602 >                    setEntryAt(tab, i, null);
603 >                ++modCount;
604 >                count = 0;
605              } finally {
606                  unlock();
607              }
608          }
609 +    }
610 +
611 +    // Accessing segments
612  
613 <        void clear() {
614 <            if (count != 0) {
615 <                lock();
616 <                try {
617 <                    HashEntry<K,V>[] tab = table;
618 <                    for (int i = 0; i < tab.length ; i++)
619 <                        tab[i] = null;
620 <                    ++modCount;
621 <                    count = 0; // write-volatile
622 <                } finally {
623 <                    unlock();
613 >    /**
614 >     * Gets the jth element of given segment array (if nonnull) with
615 >     * volatile element access semantics via Unsafe.
616 >     */
617 >    @SuppressWarnings("unchecked")
618 >    static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
619 >        long u = (j << SSHIFT) + SBASE;
620 >        return ss == null ? null :
621 >            (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
622 >    }
623 >
624 >    /**
625 >     * Returns the segment for the given index, creating it and
626 >     * recording in segment table (via CAS) if not already present.
627 >     *
628 >     * @param k the index
629 >     * @return the segment
630 >     */
631 >    @SuppressWarnings("unchecked")
632 >    private Segment<K,V> ensureSegment(int k) {
633 >        final Segment<K,V>[] ss = this.segments;
634 >        long u = (k << SSHIFT) + SBASE; // raw offset
635 >        Segment<K,V> seg;
636 >        if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
637 >            Segment<K,V> proto = ss[0]; // use segment 0 as prototype
638 >            int cap = proto.table.length;
639 >            float lf = proto.loadFactor;
640 >            int threshold = (int)(cap * lf);
641 >            HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
642 >            if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
643 >                == null) { // recheck
644 >                Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
645 >                while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
646 >                       == null) {
647 >                    if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
648 >                        break;
649                  }
650              }
651          }
652 +        return seg;
653      }
654  
655 +    // Hash-based segment and entry accesses
656  
657 +    /**
658 +     * Get the segment for the given hash
659 +     */
660 +    @SuppressWarnings("unchecked")
661 +    private Segment<K,V> segmentForHash(int h) {
662 +        long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
663 +        return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u);
664 +    }
665 +
666 +    /**
667 +     * Gets the table entry for the given segment and hash
668 +     */
669 +    @SuppressWarnings("unchecked")
670 +    static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
671 +        HashEntry<K,V>[] tab;
672 +        return seg == null || (tab = seg.table) == null? null :
673 +            (HashEntry<K,V>) UNSAFE.getObjectVolatile
674 +            (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
675 +    }
676  
677      /* ---------------- Public operations -------------- */
678  
# Line 580 | Line 692 | public class ConcurrentHashMap<K, V> ext
692       * negative or the load factor or concurrencyLevel are
693       * nonpositive.
694       */
695 +    @SuppressWarnings("unchecked")
696      public ConcurrentHashMap(int initialCapacity,
697                               float loadFactor, int concurrencyLevel) {
698          if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
699              throw new IllegalArgumentException();
587
700          if (concurrencyLevel > MAX_SEGMENTS)
701              concurrencyLevel = MAX_SEGMENTS;
590
702          // Find power-of-two sizes best matching arguments
703          int sshift = 0;
704          int ssize = 1;
# Line 595 | Line 706 | public class ConcurrentHashMap<K, V> ext
706              ++sshift;
707              ssize <<= 1;
708          }
709 <        segmentShift = 32 - sshift;
710 <        segmentMask = ssize - 1;
600 <        this.segments = Segment.newArray(ssize);
601 <
709 >        this.segmentShift = 32 - sshift;
710 >        this.segmentMask = ssize - 1;
711          if (initialCapacity > MAXIMUM_CAPACITY)
712              initialCapacity = MAXIMUM_CAPACITY;
713          int c = initialCapacity / ssize;
714          if (c * ssize < initialCapacity)
715              ++c;
716 <        int cap = 1;
716 >        int cap = MIN_SEGMENT_TABLE_CAPACITY;
717          while (cap < c)
718              cap <<= 1;
719 <
720 <        for (int i = 0; i < this.segments.length; ++i)
721 <            this.segments[i] = new Segment<K,V>(cap, loadFactor);
719 >        // create segments and segments[0]
720 >        Segment<K,V> s0 =
721 >            new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
722 >                             (HashEntry<K,V>[])new HashEntry[cap]);
723 >        Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
724 >        UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
725 >        this.segments = ss;
726      }
727  
728      /**
# Line 672 | Line 785 | public class ConcurrentHashMap<K, V> ext
785       * @return <tt>true</tt> if this map contains no key-value mappings
786       */
787      public boolean isEmpty() {
675        final Segment<K,V>[] segments = this.segments;
788          /*
789 <         * We keep track of per-segment modCounts to avoid ABA
790 <         * problems in which an element in one segment was added and
791 <         * in another removed during traversal, in which case the
792 <         * table was never actually empty at any point. Note the
793 <         * similar use of modCounts in the size() and containsValue()
794 <         * methods, which are the only other methods also susceptible
795 <         * to ABA problems.
789 >         * Sum per-segment modCounts to avoid mis-reporting when
790 >         * elements are concurrently added and removed in one segment
791 >         * while checking another, in which case the table was never
792 >         * actually empty at any point. (The sum ensures accuracy up
793 >         * through at least 1<<31 per-segment modifications before
794 >         * recheck.)  Methods size() and containsValue() use similar
795 >         * constructions for stability checks.
796           */
797 <        int[] mc = new int[segments.length];
798 <        int mcsum = 0;
799 <        for (int i = 0; i < segments.length; ++i) {
800 <            if (segments[i].count != 0)
801 <                return false;
802 <            else
691 <                mcsum += mc[i] = segments[i].modCount;
692 <        }
693 <        // If mcsum happens to be zero, then we know we got a snapshot
694 <        // before any modifications at all were made.  This is
695 <        // probably common enough to bother tracking.
696 <        if (mcsum != 0) {
697 <            for (int i = 0; i < segments.length; ++i) {
698 <                if (segments[i].count != 0 ||
699 <                    mc[i] != segments[i].modCount)
797 >        long sum = 0L;
798 >        final Segment<K,V>[] segments = this.segments;
799 >        for (int j = 0; j < segments.length; ++j) {
800 >            Segment<K,V> seg = segmentAt(segments, j);
801 >            if (seg != null) {
802 >                if (seg.count != 0)
803                      return false;
804 +                sum += seg.modCount;
805              }
806          }
807 +        if (sum != 0L) { // recheck unless no modifications
808 +            for (int j = 0; j < segments.length; ++j) {
809 +                Segment<K,V> seg = segmentAt(segments, j);
810 +                if (seg != null) {
811 +                    if (seg.count != 0)
812 +                        return false;
813 +                    sum -= seg.modCount;
814 +                }
815 +            }
816 +            if (sum != 0L)
817 +                return false;
818 +        }
819          return true;
820      }
821  
# Line 711 | Line 827 | public class ConcurrentHashMap<K, V> ext
827       * @return the number of key-value mappings in this map
828       */
829      public int size() {
714        final Segment<K,V>[] segments = this.segments;
715        long sum = 0;
716        long check = 0;
717        int[] mc = new int[segments.length];
830          // Try a few times to get accurate count. On failure due to
831          // continuous async changes in table, resort to locking.
832 <        for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
833 <            check = 0;
834 <            sum = 0;
835 <            int mcsum = 0;
836 <            for (int i = 0; i < segments.length; ++i) {
837 <                sum += segments[i].count;
838 <                mcsum += mc[i] = segments[i].modCount;
839 <            }
840 <            if (mcsum != 0) {
841 <                for (int i = 0; i < segments.length; ++i) {
842 <                    check += segments[i].count;
843 <                    if (mc[i] != segments[i].modCount) {
844 <                        check = -1; // force retry
845 <                        break;
832 >        final Segment<K,V>[] segments = this.segments;
833 >        int size;
834 >        boolean overflow; // true if size overflows 32 bits
835 >        long sum;         // sum of modCounts
836 >        long last = 0L;   // previous sum
837 >        int retries = -1; // first iteration isn't retry
838 >        try {
839 >            for (;;) {
840 >                if (retries++ == RETRIES_BEFORE_LOCK) {
841 >                    for (int j = 0; j < segments.length; ++j)
842 >                        ensureSegment(j).lock(); // force creation
843 >                }
844 >                sum = 0L;
845 >                size = 0;
846 >                overflow = false;
847 >                for (int j = 0; j < segments.length; ++j) {
848 >                    Segment<K,V> seg = segmentAt(segments, j);
849 >                    if (seg != null) {
850 >                        sum += seg.modCount;
851 >                        int c = seg.count;
852 >                        if (c < 0 || (size += c) < 0)
853 >                            overflow = true;
854                      }
855                  }
856 +                if (sum == last)
857 +                    break;
858 +                last = sum;
859 +            }
860 +        } finally {
861 +            if (retries > RETRIES_BEFORE_LOCK) {
862 +                for (int j = 0; j < segments.length; ++j)
863 +                    segmentAt(segments, j).unlock();
864              }
737            if (check == sum)
738                break;
865          }
866 <        if (check != sum) { // Resort to locking all segments
741 <            sum = 0;
742 <            for (int i = 0; i < segments.length; ++i)
743 <                segments[i].lock();
744 <            for (int i = 0; i < segments.length; ++i)
745 <                sum += segments[i].count;
746 <            for (int i = 0; i < segments.length; ++i)
747 <                segments[i].unlock();
748 <        }
749 <        if (sum > Integer.MAX_VALUE)
750 <            return Integer.MAX_VALUE;
751 <        else
752 <            return (int)sum;
866 >        return overflow ? Integer.MAX_VALUE : size;
867      }
868  
869      /**
# Line 765 | Line 879 | public class ConcurrentHashMap<K, V> ext
879       */
880      public V get(Object key) {
881          int hash = hash(key.hashCode());
882 <        return segmentFor(hash).get(key, hash);
882 >        for (HashEntry<K,V> e = entryForHash(segmentForHash(hash), hash);
883 >             e != null; e = e.next) {
884 >            K k;
885 >            if ((k = e.key) == key || (e.hash == hash && key.equals(k)))
886 >                return e.value;
887 >        }
888 >        return null;
889      }
890  
891      /**
# Line 779 | Line 899 | public class ConcurrentHashMap<K, V> ext
899       */
900      public boolean containsKey(Object key) {
901          int hash = hash(key.hashCode());
902 <        return segmentFor(hash).containsKey(key, hash);
902 >        for (HashEntry<K,V> e = entryForHash(segmentForHash(hash), hash);
903 >             e != null; e = e.next) {
904 >            K k;
905 >            if ((k = e.key) == key || (e.hash == hash && key.equals(k)))
906 >                return true;
907 >        }
908 >        return false;
909      }
910  
911      /**
# Line 794 | Line 920 | public class ConcurrentHashMap<K, V> ext
920       * @throws NullPointerException if the specified value is null
921       */
922      public boolean containsValue(Object value) {
923 +        // Same idea as size() but using hashes as checksum
924          if (value == null)
925              throw new NullPointerException();
799
800        // See explanation of modCount use above
801
926          final Segment<K,V>[] segments = this.segments;
803        int[] mc = new int[segments.length];
804
805        // Try a few times without locking
806        for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
807            int sum = 0;
808            int mcsum = 0;
809            for (int i = 0; i < segments.length; ++i) {
810                int c = segments[i].count;
811                mcsum += mc[i] = segments[i].modCount;
812                if (segments[i].containsValue(value))
813                    return true;
814            }
815            boolean cleanSweep = true;
816            if (mcsum != 0) {
817                for (int i = 0; i < segments.length; ++i) {
818                    int c = segments[i].count;
819                    if (mc[i] != segments[i].modCount) {
820                        cleanSweep = false;
821                        break;
822                    }
823                }
824            }
825            if (cleanSweep)
826                return false;
827        }
828        // Resort to locking all segments
829        for (int i = 0; i < segments.length; ++i)
830            segments[i].lock();
927          boolean found = false;
928 +        long last = 0L;
929 +        int retries = -1;
930          try {
931 <            for (int i = 0; i < segments.length; ++i) {
932 <                if (segments[i].containsValue(value)) {
933 <                    found = true;
934 <                    break;
931 >            outer: for (;;) {
932 >                if (retries++ == RETRIES_BEFORE_LOCK) {
933 >                    for (int j = 0; j < segments.length; ++j)
934 >                        ensureSegment(j).lock(); // force creation
935                  }
936 +                long sum = 0L;
937 +                for (int j = 0; j < segments.length; ++j) {
938 +                    HashEntry<K,V>[] tab;
939 +                    Segment<K,V> seg = segmentAt(segments, j);
940 +                    if (seg != null && (tab = seg.table) != null) {
941 +                        for (int i = 0 ; i < tab.length; i++) {
942 +                            HashEntry<K,V> e;
943 +                            for (e = entryAt(tab, i); e != null; e = e.next) {
944 +                                V v = e.value;
945 +                                if (v != null && value.equals(v)) {
946 +                                    found = true;
947 +                                    break outer;
948 +                                }
949 +                                sum += e.hash;
950 +                            }
951 +                        }
952 +                    }
953 +                }
954 +                if (sum == last)
955 +                    break;
956 +                last = sum;
957              }
958          } finally {
959 <            for (int i = 0; i < segments.length; ++i)
960 <                segments[i].unlock();
959 >            if (retries > RETRIES_BEFORE_LOCK) {
960 >                for (int j = 0; j < segments.length; ++j)
961 >                    segmentAt(segments, j).unlock();
962 >            }
963          }
964          return found;
965      }
# Line 876 | Line 997 | public class ConcurrentHashMap<K, V> ext
997       * @throws NullPointerException if the specified key or value is null
998       */
999      public V put(K key, V value) {
1000 +        Segment<K,V> s;
1001          if (value == null)
1002              throw new NullPointerException();
1003          int hash = hash(key.hashCode());
1004 <        return segmentFor(hash).put(key, hash, value, false);
1004 >        int j = (hash >>> segmentShift) & segmentMask;
1005 >        return ((s = segmentAt(segments, j)) == null? ensureSegment(j) : s).
1006 >            put(key, hash, value, false);
1007      }
1008  
1009      /**
# Line 890 | Line 1014 | public class ConcurrentHashMap<K, V> ext
1014       * @throws NullPointerException if the specified key or value is null
1015       */
1016      public V putIfAbsent(K key, V value) {
1017 +        Segment<K,V> s;
1018          if (value == null)
1019              throw new NullPointerException();
1020          int hash = hash(key.hashCode());
1021 <        return segmentFor(hash).put(key, hash, value, true);
1021 >        int j = (hash >>> segmentShift) & segmentMask;
1022 >        return ((s = segmentAt(segments, j)) == null? ensureSegment(j) : s).
1023 >            put(key, hash, value, true);
1024      }
1025  
1026      /**
# Line 919 | Line 1046 | public class ConcurrentHashMap<K, V> ext
1046       */
1047      public V remove(Object key) {
1048          int hash = hash(key.hashCode());
1049 <        return segmentFor(hash).remove(key, hash, null);
1049 >        Segment<K,V> s = segmentForHash(hash);
1050 >        return s == null ? null : s.remove(key, hash, null);
1051      }
1052  
1053      /**
# Line 929 | Line 1057 | public class ConcurrentHashMap<K, V> ext
1057       */
1058      public boolean remove(Object key, Object value) {
1059          int hash = hash(key.hashCode());
1060 <        if (value == null)
1061 <            return false;
1062 <        return segmentFor(hash).remove(key, hash, value) != null;
1060 >        Segment<K,V> s;
1061 >        return value != null && (s = segmentForHash(hash)) != null &&
1062 >            s.remove(key, hash, value) != null;
1063      }
1064  
1065      /**
# Line 940 | Line 1068 | public class ConcurrentHashMap<K, V> ext
1068       * @throws NullPointerException if any of the arguments are null
1069       */
1070      public boolean replace(K key, V oldValue, V newValue) {
1071 +        int hash = hash(key.hashCode());
1072          if (oldValue == null || newValue == null)
1073              throw new NullPointerException();
1074 <        int hash = hash(key.hashCode());
1075 <        return segmentFor(hash).replace(key, hash, oldValue, newValue);
1074 >        Segment<K,V> s = segmentForHash(hash);
1075 >        return s != null && s.replace(key, hash, oldValue, newValue);
1076      }
1077  
1078      /**
# Line 954 | Line 1083 | public class ConcurrentHashMap<K, V> ext
1083       * @throws NullPointerException if the specified key or value is null
1084       */
1085      public V replace(K key, V value) {
1086 +        int hash = hash(key.hashCode());
1087          if (value == null)
1088              throw new NullPointerException();
1089 <        int hash = hash(key.hashCode());
1090 <        return segmentFor(hash).replace(key, hash, value);
1089 >        Segment<K,V> s = segmentForHash(hash);
1090 >        return s == null ? null : s.replace(key, hash, value);
1091      }
1092  
1093      /**
1094       * Removes all of the mappings from this map.
1095       */
1096      public void clear() {
1097 <        for (int i = 0; i < segments.length; ++i)
1098 <            segments[i].clear();
1097 >        final Segment<K,V>[] segments = this.segments;
1098 >        for (int j = 0; j < segments.length; ++j) {
1099 >            Segment<K,V> s = segmentAt(segments, j);
1100 >            if (s != null)
1101 >                s.clear();
1102 >        }
1103      }
1104  
1105      /**
# Line 1066 | Line 1200 | public class ConcurrentHashMap<K, V> ext
1200              advance();
1201          }
1202  
1203 <        public boolean hasMoreElements() { return hasNext(); }
1204 <
1203 >        /**
1204 >         * Set nextEntry to first node of next non-empty table
1205 >         * (in backwards order, to simplify checks).
1206 >         */
1207          final void advance() {
1208 <            if (nextEntry != null && (nextEntry = nextEntry.next) != null)
1209 <                return;
1210 <
1211 <            while (nextTableIndex >= 0) {
1212 <                if ( (nextEntry = currentTable[nextTableIndex--]) != null)
1077 <                    return;
1078 <            }
1079 <
1080 <            while (nextSegmentIndex >= 0) {
1081 <                Segment<K,V> seg = segments[nextSegmentIndex--];
1082 <                if (seg.count != 0) {
1083 <                    currentTable = seg.table;
1084 <                    for (int j = currentTable.length - 1; j >= 0; --j) {
1085 <                        if ( (nextEntry = currentTable[j]) != null) {
1086 <                            nextTableIndex = j - 1;
1087 <                            return;
1088 <                        }
1089 <                    }
1208 >            for (;;) {
1209 >                if (nextTableIndex >= 0) {
1210 >                    if ((nextEntry = entryAt(currentTable,
1211 >                                             nextTableIndex--)) != null)
1212 >                        break;
1213                  }
1214 +                else if (nextSegmentIndex >= 0) {
1215 +                    Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
1216 +                    if (seg != null && (currentTable = seg.table) != null)
1217 +                        nextTableIndex = currentTable.length - 1;
1218 +                }
1219 +                else
1220 +                    break;
1221              }
1222          }
1223  
1224 <        public boolean hasNext() { return nextEntry != null; }
1225 <
1226 <        HashEntry<K,V> nextEntry() {
1097 <            if (nextEntry == null)
1224 >        final HashEntry<K,V> nextEntry() {
1225 >            HashEntry<K,V> e = lastReturned = nextEntry;
1226 >            if (e == null)
1227                  throw new NoSuchElementException();
1228 <            lastReturned = nextEntry;
1229 <            advance();
1230 <            return lastReturned;
1228 >            if ((nextEntry = e.next) == null)
1229 >                advance();
1230 >            return e;
1231          }
1232  
1233 <        public void remove() {
1233 >        public final boolean hasNext() { return nextEntry != null; }
1234 >        public final boolean hasMoreElements() { return nextEntry != null; }
1235 >
1236 >        public final void remove() {
1237              if (lastReturned == null)
1238                  throw new IllegalStateException();
1239              ConcurrentHashMap.this.remove(lastReturned.key);
# Line 1113 | Line 1245 | public class ConcurrentHashMap<K, V> ext
1245          extends HashIterator
1246          implements Iterator<K>, Enumeration<K>
1247      {
1248 <        public K next()        { return super.nextEntry().key; }
1249 <        public K nextElement() { return super.nextEntry().key; }
1248 >        public final K next()        { return super.nextEntry().key; }
1249 >        public final K nextElement() { return super.nextEntry().key; }
1250      }
1251  
1252      final class ValueIterator
1253          extends HashIterator
1254          implements Iterator<V>, Enumeration<V>
1255      {
1256 <        public V next()        { return super.nextEntry().value; }
1257 <        public V nextElement() { return super.nextEntry().value; }
1256 >        public final V next()        { return super.nextEntry().value; }
1257 >        public final V nextElement() { return super.nextEntry().value; }
1258      }
1259  
1260      /**
# Line 1242 | Line 1374 | public class ConcurrentHashMap<K, V> ext
1374       * The key-value mappings are emitted in no particular order.
1375       */
1376      private void writeObject(java.io.ObjectOutputStream s) throws IOException {
1377 +        // force all segments for serialization compatibility
1378 +        for (int k = 0; k < segments.length; ++k)
1379 +            ensureSegment(k);
1380          s.defaultWriteObject();
1381  
1382 +        final Segment<K,V>[] segments = this.segments;
1383          for (int k = 0; k < segments.length; ++k) {
1384 <            Segment<K,V> seg = segments[k];
1384 >            Segment<K,V> seg = segmentAt(segments, k);
1385              seg.lock();
1386              try {
1387                  HashEntry<K,V>[] tab = seg.table;
1388                  for (int i = 0; i < tab.length; ++i) {
1389 <                    for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
1389 >                    HashEntry<K,V> e;
1390 >                    for (e = entryAt(tab, i); e != null; e = e.next) {
1391                          s.writeObject(e.key);
1392                          s.writeObject(e.value);
1393                      }
# Line 1268 | Line 1405 | public class ConcurrentHashMap<K, V> ext
1405       * stream (i.e., deserialize it).
1406       * @param s the stream
1407       */
1408 +    @SuppressWarnings("unchecked")
1409      private void readObject(java.io.ObjectInputStream s)
1410          throws IOException, ClassNotFoundException {
1411          s.defaultReadObject();
1412  
1413 <        // Initialize each segment to be minimally sized, and let grow.
1414 <        for (int i = 0; i < segments.length; ++i) {
1415 <            segments[i].setTable(new HashEntry[1]);
1413 >        // Re-initialize segments to be minimally sized, and let grow.
1414 >        int cap = MIN_SEGMENT_TABLE_CAPACITY;
1415 >        final Segment<K,V>[] segments = this.segments;
1416 >        for (int k = 0; k < segments.length; ++k) {
1417 >            Segment<K,V> seg = segments[k];
1418 >            if (seg != null) {
1419 >                seg.threshold = (int)(cap * seg.loadFactor);
1420 >                seg.table = (HashEntry<K,V>[]) new HashEntry[cap];
1421 >            }
1422          }
1423  
1424          // Read the keys and values, and put the mappings in the table
# Line 1286 | Line 1430 | public class ConcurrentHashMap<K, V> ext
1430              put(key, value);
1431          }
1432      }
1433 +
1434 +    // Unsafe mechanics
1435 +    private static final sun.misc.Unsafe UNSAFE;
1436 +    private static final long SBASE;
1437 +    private static final int SSHIFT;
1438 +    private static final long TBASE;
1439 +    private static final int TSHIFT;
1440 +
1441 +    static {
1442 +        int ss, ts;
1443 +        try {
1444 +            UNSAFE = sun.misc.Unsafe.getUnsafe();
1445 +            Class tc = HashEntry[].class;
1446 +            Class sc = Segment[].class;
1447 +            TBASE = UNSAFE.arrayBaseOffset(tc);
1448 +            SBASE = UNSAFE.arrayBaseOffset(sc);
1449 +            ts = UNSAFE.arrayIndexScale(tc);
1450 +            ss = UNSAFE.arrayIndexScale(sc);
1451 +        } catch (Exception e) {
1452 +            throw new Error(e);
1453 +        }
1454 +        if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
1455 +            throw new Error("data type scale not a power of two");
1456 +        SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
1457 +        TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
1458 +    }
1459 +
1460   }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines