ViewVC Help
View File | Revision Log | Show Annotations | Download File | Root Listing
root/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java
(Generate patch)

Comparing jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java (file contents):
Revision 1.3 by dl, Thu May 29 13:49:24 2003 UTC vs.
Revision 1.4 by dl, Fri Jun 6 14:17:16 2003 UTC

# Line 13 | Line 13 | import java.io.ObjectInputStream;
13   import java.io.ObjectOutputStream;
14  
15   /**
16 < * A version of Hashtable supporting
17 < * concurrency for both retrievals and updates.
16 > * A hash table supporting full concurrency of retrievals and
17 > * adjustable expected concurrency for updates. This class obeys the
18 > * same functional specification as
19 > * <tt>java.util.Hashtable</tt>. However, even though all operations
20 > * are thread-safe, retrieval operations do <em>not</em> entail
21 > * locking, and there is <em>not</em> any support for locking the
22 > * entire table in a way that prevents all access.  This class is
23 > * fully interoperable with Hashtable in programs that rely on its
24 > * thread safety but not on its synchronization details.
25 > *  
26 > * <p> Retrieval operations (including <tt>get</tt>) ordinarily
27 > * overlap with update operations (including <tt>put/tt> and
28 > * <tt>remove</tt>). Retrievals reflect the results of the most
29 > * recently <em>completed</em> update operations holding upon their
30 > * onset.  For aggregate operations such as <tt>putAll</tt> and
31 > * <tt>clear</tt>, concurrent retrievals may reflect insertion or
32 > * removal of only some entries.  Similarly, Iterators and
33 > * Enumerations return elements reflecting the state of the hash table
34 > * at some point at or since the creation of the iterator/enumeration.
35 > * They do <em>not</em> throw ConcurrentModificationException.
36 > * However, Iterators are designed to be used by only one thread at a
37 > * time.
38   *
39 < * <dl>
40 < * <dt> Retrievals
39 > * <p> The allowed concurrency among update operations is controlled
40 > * by the optional <tt>segments</tt> constructor argument (default
41 > * 16). The table is divided into this many independent parts; each of
42 > * which can be updated concurrently. Because placement in hash tables
43 > * is essentially random, the actual concurrency will vary. As a rough
44 > * rule of thumb, you should choose at least as many segments as you
45 > * expect concurrent threads. However, using more segments than you
46 > * need can waste space and time. Using a value of 1 for
47 > * <tt>segments</tt> results in a table that is concurrently readable
48 > * but can only be updated by one thread at a time.
49   *
50 < * <dd> Retrievals may overlap updates.  Successful retrievals using
51 < * get(key) and containsKey(key) usually run without
24 < * locking. Unsuccessful retrievals (i.e., when the key is not
25 < * present) do involve brief locking.  Because
26 < * retrieval operations can ordinarily overlap with update operations
27 < * (i.e., put, remove, and their derivatives), retrievals can only be
28 < * guaranteed to return the results of the most recently
29 < * <em>completed</em> operations holding upon their onset. Retrieval
30 < * operations may or may not return results reflecting in-progress
31 < * writing operations.  However, the retrieval operations do always
32 < * return consistent results -- either those holding before any single
33 < * modification or after it, but never a nonsense result.  For
34 < * aggregate operations such as putAll and clear, concurrent reads may
35 < * reflect insertion or removal of only some entries.  <p>
36 < *
37 < * Iterators and Enumerations (i.e., those returned by
38 < * keySet().iterator(), entrySet().iterator(), values().iterator(),
39 < * keys(), and elements()) return elements reflecting the state of the
40 < * hash table at some point at or since the creation of the
41 < * iterator/enumeration.  They will return at most one instance of
42 < * each element (via next()/nextElement()), but might or might not
43 < * reflect puts and removes that have been processed since
44 < * construction if the Iterator.  They do <em>not</em> throw
45 < * ConcurrentModificationException.  However, these iterators are
46 < * designed to be used by only one thread at a time. Passing an
47 < * iterator across multiple threads may lead to unpredictable traversal
48 < * if the table is being concurrently modified.  <p>
49 < *
50 < *
51 < * <dt> Updates
52 < *
53 < * <dd> This class supports a hard-wired preset <em>concurrency
54 < * level</em> of 32. This allows a maximum of 32 put and/or remove
55 < * operations to proceed concurrently. This level is an upper bound on
56 < * concurrency, not a guarantee, since it interacts with how
57 < * well-strewn elements are across bins of the table. (The preset
58 < * value in part reflects the fact that even on large multiprocessors,
59 < * factors other than synchronization tend to be bottlenecks when more
60 < * than 32 threads concurrently attempt updates.)
61 < * Additionally, operations triggering internal resizing and clearing
62 < * do not execute concurrently with any operation.
63 < * <p>
64 < *
65 < * There is <em>NOT</em> any support for locking the entire table to
66 < * prevent updates.
67 < *
68 < * </dl>
69 < *
70 < *
71 < * This class may be used as a direct replacement for
72 < * java.util.Hashtable in any application that does not rely
73 < * on the ability to lock the entire table to prevent updates.
74 < * Like Hashtable but unlike java.util.HashMap,
75 < * this class does NOT allow <tt>null</tt> to be used as a key or
76 < * value.
77 < * <p>
50 > * <p> Like Hashtable but unlike java.util.HashMap, this class does
51 > * NOT allow <tt>null</tt> to be used as a key or value.
52   *
53   **/
54   public class ConcurrentHashMap<K, V> extends AbstractMap<K, V>
55          implements ConcurrentMap<K, V>, Cloneable, Serializable {
56  
57      /*
58 <      The basic strategy is an optimistic-style scheme based on
59 <      the guarantee that the hash table and its lists are always
60 <      kept in a consistent enough state to be read without locking:
87 <
88 <      * Read operations first proceed without locking, by traversing the
89 <         apparently correct list of the apparently correct bin. If an
90 <         entry is found, but not invalidated (value field null), it is
91 <         returned. If not found, operations must recheck (after a memory
92 <         barrier) to make sure they are using both the right list and
93 <         the right table (which can change under resizes). If
94 <         invalidated, reads must acquire main update lock to wait out
95 <         the update, and then re-traverse.
96 <
97 <      * All list additions are at the front of each bin, making it easy
98 <         to check changes, and also fast to traverse.  Entry next
99 <         pointers are never assigned. Remove() builds new nodes when
100 <         necessary to preserve this.
101 <
102 <      * Remove() (also clear()) invalidates removed nodes to alert read
103 <         operations that they must wait out the full modifications.
104 <
105 <      * Locking for puts, removes (and, when necessary gets, etc)
106 <        is controlled by Segments, each covering a portion of the
107 <        table. During operations requiring global exclusivity (mainly
108 <        resize and clear), ALL of these locks are acquired at once.
109 <        Note that these segments are NOT contiguous -- they are based
110 <        on the least 5 bits of hashcodes. This ensures that the same
111 <        segment controls the same slots before and after resizing, which
112 <        is necessary for supporting concurrent retrievals. This
113 <        comes at the price of a mismatch of logical vs physical locality,
114 <        but this seems not to be a performance problem in practice.
115 <
116 <    */
117 <
118 <    /**
119 <     * The hash table data.
120 <     */
121 <    private transient Entry<K,V>[] table;
122 <
58 >     * The basic strategy is to subdivide the table among Segments,
59 >     * each of which itself is a concurrently readable hash table.
60 >     */
61  
62 +    /* ---------------- Constants -------------- */
63 +    
64      /**
65 <     * The number of concurrency control segments.
66 <     * The value can be at most 32 since ints are used
67 <     * as bitsets over segments. Emprically, it doesn't
68 <     * seem to pay to decrease it either, so the value should be at least 32.
129 <     * In other words, do not redefine this :-)
130 <     **/
131 <    private static final int CONCURRENCY_LEVEL = 32;
65 >     * The default initial number of table slots for this table (32).
66 >     * Used when not otherwise specified in constructor.
67 >     */
68 >    static int DEFAULT_INITIAL_CAPACITY = 16;
69  
70      /**
71 <     * Mask value for indexing into segments
72 <     **/
73 <    private static final int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
71 >     * The maximum capacity, used if a higher value is implicitly
72 >     * specified by either of the constructors with arguments.  MUST
73 >     * be a power of two <= 1<<30.
74 >     */
75 >    static final int MAXIMUM_CAPACITY = 1 << 30;
76 >  
77 >    /**
78 >     * The default load factor for this table.  Used when not
79 >     * otherwise specified in constructor.
80 >     */
81 >    static final float DEFAULT_LOAD_FACTOR = 0.75f;
82  
83      /**
84 <     * Bookkeeping for each concurrency control segment.
140 <     * Each segment contains a local count of the number of
141 <     * elements in its region.
142 <     * However, the main use of a Segment is for its lock.
84 >     * The default number of concurrency control segments.
85       **/
86 <    private final static class Segment extends ReentrantLock {
145 <        /**
146 <         * The number of elements in this segment's region.
147 <         **/
148 <        private int count;
86 >    private static final int DEFAULT_SEGMENTS = 16;
87  
88 <        /**
151 <         * Get the count under synch.
152 <         **/
153 <        private int getCount() {
154 <            lock();
155 <            try {
156 <                return count;
157 <            }
158 <            finally {
159 <                unlock();
160 <            }
161 <        }
162 <
163 <    }
88 >    /* ---------------- Fields -------------- */
89  
90      /**
91 <     * The array of concurrency control segments.
91 >     * Mask value for indexing into segments. The lower bits of a
92 >     * key's hash code are used to choose the segment, and the
93 >     * remaining bits are used as the placement hashcode used within
94 >     * the segment.
95       **/
96 <    private transient final Segment[] segments = new Segment[CONCURRENCY_LEVEL];
169 <
96 >    private final int segmentMask;
97  
98      /**
99 <     * The default initial number of table slots for this table (32).
173 <     * Used when not otherwise specified in constructor.
99 >     * Shift value for indexing within segments.
100       **/
101 <    public static int DEFAULT_INITIAL_CAPACITY = 32;
176 <
101 >    private final int segmentShift;
102  
103      /**
104 <     * The minimum capacity, used if a lower value is implicitly specified
180 <     * by either of the constructors with arguments.
181 <     * MUST be a power of two.
104 >     * The segments, each of which is a specialized hash table
105       */
106 <    private static final int MINIMUM_CAPACITY = 32;
106 >    private final Segment<K,V>[] segments;
107 >
108 >    private transient Set<K> keySet = null;
109 >    private transient Set/*<Map.Entry<K,V>>*/ entrySet = null;
110 >    private transient Collection<V> values = null;
111 >
112 >    /* ---------------- Small Utilities -------------- */
113  
114      /**
115 <     * The maximum capacity, used if a higher value is implicitly specified
116 <     * by either of the constructors with arguments.
188 <     * MUST be a power of two <= 1<<30.
115 >     * Return a hash code for non-null Object x.  
116 >     * Uses the same hash code spreader as most other j.u hash tables.
117       */
118 <    private static final int MAXIMUM_CAPACITY = 1 << 30;
118 >    private static int hash(Object x) {
119 >        int h = x.hashCode();
120 >        h += ~(h << 9);
121 >        h ^=  (h >>> 14);
122 >        h +=  (h << 4);
123 >        h ^=  (h >>> 10);
124 >        return h;
125 >    }
126  
127 <    /**
128 <     * The default load factor for this table (0.75)
194 <     * Used when not otherwise specified in constructor.
127 >    /**
128 >     * Check for equality of non-null references x and y.
129       **/
130 <    public static final float DEFAULT_LOAD_FACTOR = 0.75f;
130 >    private static boolean eq(Object x, Object y) {
131 >        return x == y || x.equals(y);
132 >    }
133  
134      /**
135 <     * The load factor for the hash table.
200 <     *
201 <     * @serial
135 >     * Return index for hash code h in table of given length.
136       */
137 <    private final float loadFactor;
137 >    private static int indexFor(int h, int length) {
138 >        return h & (length-1);
139 >    }
140  
141      /**
142 <     * Per-segment resize threshold.
207 <     *
208 <     * @serial
142 >     * Return the segment that should be used for key with given hash
143       */
144 <    private int threshold;
145 <
144 >    private Segment<K,V> segmentFor(int hash) {
145 >        return segments[hash & segmentMask];
146 >    }
147  
148      /**
149 <     * Number of segments voting for resize. The table is
150 <     * doubled when 1/4 of the segments reach threshold.
151 <     * Volatile but updated without synch since this is just a heuristic.
152 <     **/
153 <    private transient volatile int votesForResize;
149 >     * Strip the segment index from hash code to use as a per-segment hash.
150 >     */
151 >    private int segmentHashFor(int hash) {
152 >        return hash >>> segmentShift;
153 >    }
154  
155 +    /* ---------------- Inner Classes -------------- */
156  
157      /**
158 <     * Return the number of set bits in w.
159 <     * For a derivation of this algorithm, see
160 <     * "Algorithms and data structures with applications to
225 <     *  graphics and geometry", by Jurg Nievergelt and Klaus Hinrichs,
226 <     *  Prentice Hall, 1993.
227 <     * See also notes by Torsten Sillke at
228 <     * http://www.mathematik.uni-bielefeld.de/~sillke/PROBLEMS/bitcount
158 >     * Segments are specialized versions of hash tables.  This
159 >     * subclasses from ReentrantLock opportunistically, just to
160 >     * simplify some locking and avoid separate construction.
161       **/
162 <    private static int bitcount(int w) {
163 <        w -= (0xaaaaaaaa & w) >>> 1;
164 <        w = (w & 0x33333333) + ((w >>> 2) & 0x33333333);
165 <        w = (w + (w >>> 4)) & 0x0f0f0f0f;
166 <        w += w >>> 8;
167 <        w += w >>> 16;
168 <        return w & 0xff;
169 <    }
162 >    private final static class Segment<K,V> extends ReentrantLock implements Serializable {
163 >        /*
164 >         * Segments maintain a table of entry lists that are ALWAYS
165 >         * kept in a consistent state, so can be read without locking.
166 >         * Next fields of nodes are immutable (final).  All list
167 >         * additions are performed at the front of each bin. This
168 >         * makes it easy to check changes, and also fast to traverse.
169 >         * When nodes would otherwise be changed, new nodes are
170 >         * created to replace them. This works well for hash tables
171 >         * since the bin lists tend to be short. (The average length
172 >         * is less than two for the default load factor threshold.)
173 >         *
174 >         * Read operations can thus proceed without locking, but rely
175 >         * on a memory barrier to ensure that completed write
176 >         * operations performed by other threads are
177 >         * noticed. Conveniently, the "count" field, tracking the
178 >         * number of elements, can also serve as the volatile variable
179 >         * providing proper read/write barriers. This is convenient
180 >         * because this field needs to be read in many read operations
181 >         * anyway. The use of volatiles for this purpose is only
182 >         * guaranteed to work in accord with reuirements in
183 >         * multithreaded environments when run on JVMs conforming to
184 >         * the clarified JSR133 memory model specification.  This true
185 >         * for hotspot as of release 1.4.
186 >         *
187 >         * Implementors note. The basic rules for all this are:
188 >         *
189 >         *   - All unsynchronized read operations must first read the
190 >         *     "count" field, and should not look at table entries if
191 >         *     it is 0.
192 >         *    
193 >         *   - All synchronized write operations should write to
194 >         *     the "count" field after updating. The operations must not
195 >         *     take any action that could even momentarily cause
196 >         *     a concurrent read operation to see inconsistent
197 >         *     data. This is made easier by the nature of the read
198 >         *     operations in Map. For example, no operation
199 >         *     can reveal that the table has grown but the threshold
200 >         *     has not yet been updated, so there are no atomicity
201 >         *     requirements for this with respect to reads.
202 >         *
203 >         * As a guide, all critical volatile reads and writes are marked
204 >         * in code comments.
205 >         */
206 >        
207 >        /**
208 >         * The number of elements in this segment's region.
209 >         **/
210 >        transient volatile int count;
211  
212 <    /**
213 <     * Returns the appropriate capacity (power of two) for the specified
214 <     * initial capacity argument.
215 <     */
216 <    private int p2capacity(int initialCapacity) {
217 <        int cap = initialCapacity;
212 >        /**
213 >         * The table is rehashed when its size exceeds this threshold.
214 >         * (The value of this field is always (int)(capacity *
215 >         * loadFactor).)
216 >         */
217 >        transient private int threshold;
218  
219 <        // Compute the appropriate capacity
220 <        int result;
221 <        if (cap > MAXIMUM_CAPACITY || cap < 0) {
222 <            result = MAXIMUM_CAPACITY;
223 <        } else {
224 <            result = MINIMUM_CAPACITY;
225 <            while (result < cap)
226 <                result <<= 1;
219 >        /**
220 >         * The per-segment table
221 >         */
222 >        transient HashEntry<K,V>[] table;
223 >
224 >        /**
225 >         * The load factor for the hash table.  Even though this value
226 >         * is same for all segments, it is replicated to avoid needing
227 >         * links to outer object.
228 >         * @serial
229 >         */
230 >        private final float loadFactor;
231 >
232 >        Segment(int initialCapacity, float lf) {
233 >            loadFactor = lf;
234 >            setTable(new HashEntry<K,V>[initialCapacity]);
235 >        }
236 >
237 >        /**
238 >         * Set table to new HashEntry array.
239 >         * Call only while holding lock or in constructor.
240 >         **/
241 >        private void setTable(HashEntry<K,V>[] newTable) {
242 >            table = newTable;
243 >            threshold = (int)(newTable.length * loadFactor);
244 >            count = count; // write-volatile
245 >        }    
246 >
247 >        /* Specialized implementations of map methods */
248 >        
249 >        V get(K key, int hash) {
250 >            if (count != 0) { // read-volatile
251 >                HashEntry<K,V>[] tab = table;
252 >                int index = indexFor(hash, tab.length);
253 >                HashEntry<K,V> e = tab[index];
254 >                while (e != null) {
255 >                    if (e.hash == hash && eq(key, e.key))
256 >                        return e.value;
257 >                    e = e.next;
258 >                }
259 >            }
260 >            return null;
261 >        }
262 >
263 >        boolean containsKey(Object key, int hash) {
264 >            if (count != 0) { // read-volatile
265 >                HashEntry<K,V>[] tab = table;
266 >                int index = indexFor(hash, tab.length);
267 >                HashEntry<K,V> e = tab[index];
268 >                while (e != null) {
269 >                    if (e.hash == hash && eq(key, e.key))
270 >                        return true;
271 >                    e = e.next;
272 >                }
273 >            }
274 >            return false;
275 >        }
276 >        
277 >        boolean containsValue(Object value) {
278 >            if (count != 0) { // read-volatile
279 >                HashEntry<K,V> tab[] = table;
280 >                int len = tab.length;
281 >                for (int i = 0 ; i < len; i++)
282 >                    for (HashEntry<K,V> e = tab[i] ; e != null ; e = e.next)
283 >                        if (value.equals(e.value))
284 >                            return true;
285 >            }
286 >            return false;
287 >        }
288 >
289 >        V put(K key, int hash, V value, boolean onlyIfAbsent) {
290 >            lock();
291 >            try {
292 >                HashEntry<K,V>[] tab = table;
293 >                int index = indexFor(hash, tab.length);
294 >                HashEntry<K,V> first = tab[index];
295 >                
296 >                for (HashEntry<K,V> e = first; e != null; e = e.next) {
297 >                    if (e.hash == hash && eq(key, e.key)) {
298 >                        V oldValue = e.value;
299 >                        if (!onlyIfAbsent)
300 >                            e.value = value;
301 >                        count = count; // write-volatile
302 >                        return oldValue;
303 >                    }
304 >                }
305 >                
306 >                tab[index] = new HashEntry<K,V>(hash, key, value, first);
307 >                if (++count > threshold) // write-volatile
308 >                    rehash();
309 >                return null;
310 >            }
311 >            finally {
312 >                unlock();
313 >            }
314 >        }
315 >
316 >        private void rehash() {
317 >            HashEntry<K,V>[] oldTable = table;
318 >            int oldCapacity = oldTable.length;
319 >            if (oldCapacity >= MAXIMUM_CAPACITY)
320 >                return;
321 >
322 >            /*
323 >             * Reclassify nodes in each list to new Map.  Because we are
324 >             * using power-of-two expansion, the elements from each bin
325 >             * must either stay at same index, or move with a power of two
326 >             * offset. We eliminate unnecessary node creation by catching
327 >             * cases where old nodes can be reused because their next
328 >             * fields won't change. Statistically, at the default
329 >             * threshhold, only about one-sixth of them need cloning when
330 >             * a table doubles. The nodes they replace will be garbage
331 >             * collectable as soon as they are no longer referenced by any
332 >             * reader thread that may be in the midst of traversing table
333 >             * right now.
334 >             */
335 >            
336 >            HashEntry<K,V>[] newTable = new HashEntry<K,V>[oldCapacity << 1];
337 >            int sizeMask = newTable.length - 1;
338 >            for (int i = 0; i < oldCapacity ; i++) {
339 >                // We need to guarantee that any existing reads of old Map can
340 >                //  proceed. So we cannot yet null out each bin.  
341 >                HashEntry<K,V> e = oldTable[i];
342 >                
343 >                if (e != null) {
344 >                    HashEntry<K,V> next = e.next;
345 >                    int idx = e.hash & sizeMask;
346 >                    
347 >                    //  Single node on list
348 >                    if (next == null)
349 >                        newTable[idx] = e;
350 >                    
351 >                    else {    
352 >                        // Reuse trailing consecutive sequence at same slot
353 >                        HashEntry<K,V> lastRun = e;
354 >                        int lastIdx = idx;
355 >                        for (HashEntry<K,V> last = next;
356 >                             last != null;
357 >                             last = last.next) {
358 >                            int k = last.hash & sizeMask;
359 >                            if (k != lastIdx) {
360 >                                lastIdx = k;
361 >                                lastRun = last;
362 >                            }
363 >                        }
364 >                        newTable[lastIdx] = lastRun;
365 >                        
366 >                        // Clone all remaining nodes
367 >                        for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
368 >                            int k = p.hash & sizeMask;
369 >                            newTable[k] = new HashEntry<K,V>(p.hash, p.key,
370 >                                                             p.value, newTable[k]);
371 >                        }
372 >                    }
373 >                }
374 >            }
375 >            setTable(newTable);
376 >        }
377 >        
378 >        V remove(Object key, int hash, Object value) {
379 >            lock();
380 >            try {
381 >                HashEntry[] tab = table;
382 >                int index = indexFor(hash, tab.length);
383 >                HashEntry<K,V> first = tab[index];
384 >                
385 >                HashEntry<K,V> e = first;
386 >                while (true) {
387 >                    if (e == null)
388 >                        return null;
389 >                    if (e.hash == hash && eq(key, e.key))
390 >                        break;
391 >                    e = e.next;
392 >                }
393 >
394 >                V oldValue = e.value;
395 >                if (value != null && !value.equals(oldValue))
396 >                    return null;
397 >                
398 >                // All entries following removed node can stay in list, but
399 >                // all preceeding ones need to be cloned.  
400 >                HashEntry<K,V> newFirst = e.next;
401 >                for (HashEntry<K,V> p = first; p != e; p = p.next)
402 >                    newFirst = new HashEntry<K,V>(p.hash, p.key, p.value, newFirst);
403 >                tab[index] = newFirst;
404 >                
405 >                count--; // write-volatile
406 >                return e.value;
407 >            }
408 >            finally {
409 >                unlock();
410 >            }
411 >        }
412 >
413 >        void clear() {
414 >            lock();
415 >            try {
416 >                HashEntry<K,V> tab[] = table;
417 >                for (int i = 0; i < tab.length ; i++)
418 >                    tab[i] = null;
419 >                count = 0; // write-volatile
420 >            }
421 >            finally {
422 >                unlock();
423 >            }
424          }
255        return result;
425      }
426  
427      /**
428 <     * Return hash code for Object x. Since we are using power-of-two
260 <     * tables, it is worth the effort to improve hashcode via
261 <     * the same multiplicative scheme as used in IdentityHashMap.
428 >     * ConcurrentReaderHashMap list entry.
429       */
430 <    private static int hash(Object x) {
431 <        int h = x.hashCode();
432 <        // Multiply by 127 (quickly, via shifts), and mix in some high
433 <        // bits to help guard against bunching of codes that are
434 <        // consecutive or equally spaced.
435 <        return ((h << 7) - h + (h >>> 9) + (h >>> 17));
436 <    }
430 >    private static class HashEntry<K,V> implements Entry<K,V> {
431 >        private final K key;
432 >        private V value;
433 >        private final int hash;
434 >        private final HashEntry<K,V> next;
435 >
436 >        HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
437 >            this.value = value;
438 >            this.hash = hash;
439 >            this.key = key;
440 >            this.next = next;
441 >        }
442  
443 +        public K getKey() {
444 +            return key;
445 +        }
446  
447 <    /**
448 <     * Check for equality of non-null references x and y.
449 <     **/
275 <    private boolean eq(Object x, Object y) {
276 <        return x == y || x.equals(y);
277 <    }
447 >        public V getValue() {
448 >            return value;
449 >        }
450  
451 <    /** Create table array and set the per-segment threshold **/
452 <    private Entry<K,V>[] newTable(int capacity) {
453 <        threshold = (int)(capacity * loadFactor / CONCURRENCY_LEVEL) + 1;
454 <        return new Entry<K,V>[capacity];
451 >        public V setValue(V newValue) {
452 >            // We aren't required to, and don't provide any
453 >            // visibility barriers for setting value.
454 >            if (newValue == null)
455 >                throw new NullPointerException();
456 >            V oldValue = this.value;
457 >            this.value = newValue;
458 >            return oldValue;
459 >        }
460 >
461 >        public boolean equals(Object o) {
462 >            if (!(o instanceof Entry))
463 >                return false;
464 >            Entry<K,V> e = (Entry)o;
465 >            return (key.equals(e.getKey()) && value.equals(e.getValue()));
466 >        }
467 >    
468 >        public int hashCode() {
469 >            return  key.hashCode() ^ value.hashCode();
470 >        }
471 >
472 >        public String toString() {
473 >            return key + "=" + value;
474 >        }
475      }
476  
477 +    
478 +    /* ---------------- Public operations -------------- */
479 +
480      /**
481       * Constructs a new, empty map with the specified initial
482       * capacity and the specified load factor.
483       *
484 <     * @param initialCapacity the initial capacity.
485 <     *  The actual initial capacity is rounded up to the nearest power of two.
484 >     * @param initialCapacity the initial capacity.  The actual
485 >     * initial capacity is rounded up to the nearest power of two.
486       * @param loadFactor  the load factor threshold, used to control resizing.
487 <     *   This value is used in an approximate way: When at least
488 <     *   a quarter of the segments of the table reach per-segment threshold, or
489 <     *   one of the segments itself exceeds overall threshold,
490 <     *   the table is doubled.
491 <     *   This will on average cause resizing when the table-wide
492 <     *   load factor is slightly less than the threshold. If you'd like
493 <     *   to avoid resizing, you can set this to a ridiculously large
494 <     *   value.
495 <     * @throws IllegalArgumentException  if the load factor is nonpositive.
496 <     */
497 <    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
498 <        if (!(loadFactor > 0))
499 <            throw new IllegalArgumentException("Illegal Load factor: "+ loadFactor);
500 <        this.loadFactor = loadFactor;
501 <        for (int i = 0; i < segments.length; ++i)
502 <            segments[i] = new Segment();
503 <        int cap = p2capacity(initialCapacity);
504 <        table = newTable(cap);
487 >     * @param segments the number of concurrently accessible segments. the
488 >     * actual number of segments is rounded to the next power of two.
489 >     * @throws IllegalArgumentException if the initial capacity is
490 >     * negative or the load factor or number of segments are
491 >     * nonpositive.
492 >     */
493 >    public ConcurrentHashMap(int initialCapacity, float loadFactor, int segments) {
494 >        if (!(loadFactor > 0) || initialCapacity < 0 || segments <= 0)
495 >            throw new IllegalArgumentException();
496 >
497 >        // Find power-of-two sizes best matching arguments
498 >        int sshift = 0;
499 >        int ssize = 1;
500 >        while (ssize < segments) {
501 >            ++sshift;
502 >            ssize <<= 1;
503 >        }
504 >        segmentShift = sshift;
505 >        segmentMask = ssize-1;
506 >        this.segments = new Segment<K,V>[ssize];
507 >
508 >        if (initialCapacity > MAXIMUM_CAPACITY)
509 >            initialCapacity = MAXIMUM_CAPACITY;
510 >        int c = initialCapacity / ssize;
511 >        if (c * ssize < initialCapacity)
512 >            ++c;
513 >        int cap = 1;
514 >        while (cap < c)
515 >            cap <<= 1;
516 >
517 >        for (int i = 0; i < this.segments.length; ++i)
518 >            this.segments[i] = new Segment<K,V>(cap, loadFactor);
519      }
520  
521      /**
522       * Constructs a new, empty map with the specified initial
523 <     * capacity and default load factor.
523 >     * capacity,  and with default load factor and segments.
524       *
525 <     * @param   initialCapacity   the initial capacity of the
526 <     *                            ConcurrentHashMap.
527 <     * @throws    IllegalArgumentException if the initial maximum number
528 <     *              of elements is less
320 <     *              than zero.
525 >     * @param initialCapacity the initial capacity of the
526 >     * ConcurrentHashMap.
527 >     * @throws IllegalArgumentException if the initial capacity of
528 >     * elements is negative.
529       */
530      public ConcurrentHashMap(int initialCapacity) {
531 <        this(initialCapacity, DEFAULT_LOAD_FACTOR);
531 >        this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
532      }
533  
534      /**
535 <     * Constructs a new, empty map with a default initial capacity
536 <     * and default load factor.
535 >     * Constructs a new, empty map with a default initial capacity,
536 >     * load factor, and number of segments
537       */
538      public ConcurrentHashMap() {
539 <        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
539 >        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
540      }
541  
542      /**
543       * Constructs a new map with the same mappings as the given map.  The
544       * map is created with a capacity of twice the number of mappings in
545 <     * the given map or 32 (whichever is greater), and a default load factor.
545 >     * the given map or 11 (whichever is greater), and a default load factor.
546       */
547      public <A extends K, B extends V> ConcurrentHashMap(Map<A,B> t) {
548          this(Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1,
549 <            MINIMUM_CAPACITY),
550 <            DEFAULT_LOAD_FACTOR);
551 <            putAll(t);
549 >                      11),
550 >             DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS);
551 >        putAll(t);
552      }
553  
554 <    /**
347 <     * Returns the number of key-value mappings in this map.
348 <     *
349 <     * @return the number of key-value mappings in this map.
350 <     */
554 >    // inherit Map javadoc
555      public int size() {
556          int c = 0;
557          for (int i = 0; i < segments.length; ++i)
558 <            c += segments[i].getCount();
558 >            c += segments[i].count;
559          return c;
560      }
561  
562 <    /**
359 <     * Returns <tt>true</tt> if this map contains no key-value mappings.
360 <     *
361 <     * @return <tt>true</tt> if this map contains no key-value mappings.
362 <     */
562 >    // inherit Map javadoc
563      public boolean isEmpty() {
564          for (int i = 0; i < segments.length; ++i)
565 <            if (segments[i].getCount() != 0)
565 >            if (segments[i].count != 0)
566                  return false;
567          return true;
568      }
569  
370
570      /**
571       * Returns the value to which the specified key is mapped in this table.
572       *
# Line 379 | Line 578 | public class ConcurrentHashMap<K, V> ext
578       *               <code>null</code>.
579       * @see     #put(Object, Object)
580       */
581 <    public V get(K key) {
582 <        int hash = hash(key);     // throws null pointer exception if key null
583 <
385 <        // Try first without locking...
386 <        Entry<K,V>[] tab = table;
387 <        int index = hash & (tab.length - 1);
388 <        Entry<K,V> first = tab[index];
389 <        Entry<K,V> e;
390 <
391 <        for (e = first; e != null; e = e.next) {
392 <            if (e.hash == hash && eq(key, e.key)) {
393 <                V value = e.value;
394 <                if (value != null)
395 <                    return value;
396 <                else
397 <                    break;
398 <            }
399 <        }
400 <
401 <        // Recheck under synch if key apparently not there or interference
402 <        Segment seg = segments[hash & SEGMENT_MASK];
403 <        seg.lock();
404 <        try {
405 <            tab = table;
406 <            index = hash & (tab.length - 1);
407 <            Entry<K,V> newFirst = tab[index];
408 <            if (e != null || first != newFirst) {
409 <                for (e = newFirst; e != null; e = e.next) {
410 <                    if (e.hash == hash && eq(key, e.key))
411 <                        return e.value;
412 <                }
413 <            }
414 <            return null;
415 <        }
416 <        finally {
417 <            seg.unlock();
418 <        }
581 >    public V get(K key) {
582 >        int hash = hash(key); // throws NullPointerException if key null
583 >        return segmentFor(hash).get(key, segmentHashFor(hash));
584      }
585  
586      /**
587       * Tests if the specified object is a key in this table.
588 <     *
588 >     *
589       * @param   key   possible key.
590 <     * @return  <code>true</code> if and only if the specified object
591 <     *          is a key in this table, as determined by the
590 >     * @return  <code>true</code> if and only if the specified object
591 >     *          is a key in this table, as determined by the
592       *          <tt>equals</tt> method; <code>false</code> otherwise.
593       * @exception  NullPointerException  if the key is
594       *               <code>null</code>.
595       * @see     #contains(Object)
596       */
597      public boolean containsKey(Object key) {
598 <        // Annoyingly, for now, duplicate get, since can't call
599 <        // because different signatures.
600 <
436 <        int hash = hash(key);     // throws null pointer exception if key null
598 >        int hash = hash(key); // throws NullPointerException if key null
599 >        return segmentFor(hash).containsKey(key, segmentHashFor(hash));
600 >    }
601  
602 <        // Try first without locking...
603 <        Entry<K,V>[] tab = table;
604 <        int index = hash & (tab.length - 1);
605 <        Entry<K,V> first = tab[index];
606 <        Entry<K,V> e;
607 <
608 <        for (e = first; e != null; e = e.next) {
609 <            if (e.hash == hash && eq(key, e.key)) {
610 <                V value = e.value;
611 <                if (value != null)
612 <                    return true;
613 <                else
614 <                    break;
615 <            }
452 <        }
602 >    /**
603 >     * Returns <tt>true</tt> if this map maps one or more keys to the
604 >     * specified value. Note: This method requires a full internal
605 >     * traversal of the hash table, and so is much slower than
606 >     * method <tt>containsKey</tt>.
607 >     *
608 >     * @param value value whose presence in this map is to be tested.
609 >     * @return <tt>true</tt> if this map maps one or more keys to the
610 >     * specified value.  
611 >     * @exception  NullPointerException  if the value is <code>null</code>.
612 >     */
613 >    public boolean containsValue(Object value) {
614 >        if (value == null)
615 >            throw new NullPointerException();
616  
617 <        // Recheck under synch if key apparently not there or interference
618 <        Segment seg = segments[hash & SEGMENT_MASK];
619 <        seg.lock();
457 <        try {
458 <            tab = table;
459 <            index = hash & (tab.length - 1);
460 <            Entry<K,V> newFirst = tab[index];
461 <            if (e != null || first != newFirst) {
462 <                for (e = newFirst; e != null; e = e.next) {
463 <                    if (e.hash == hash && eq(key, e.key))
464 <                        return true;
465 <                }
466 <            }
467 <            return false;
468 <        }
469 <        finally {
470 <            seg.unlock();
617 >        for (int i = 0; i < segments.length; ++i) {
618 >            if (segments[i].containsValue(value))
619 >                return true;
620          }
621 +        return false;
622 +    }
623 +    /**
624 +     * Tests if some key maps into the specified value in this table.
625 +     * This operation is more expensive than the <code>containsKey</code>
626 +     * method.<p>
627 +     *
628 +     * Note that this method is identical in functionality to containsValue,
629 +     * (which is part of the Map interface in the collections framework).
630 +     *
631 +     * @param      value   a value to search for.
632 +     * @return     <code>true</code> if and only if some key maps to the
633 +     *             <code>value</code> argument in this table as
634 +     *             determined by the <tt>equals</tt> method;
635 +     *             <code>false</code> otherwise.
636 +     * @exception  NullPointerException  if the value is <code>null</code>.
637 +     * @see        #containsKey(Object)
638 +     * @see        #containsValue(Object)
639 +     * @see        Map
640 +     */
641 +    public boolean contains(Object value) {
642 +        return containsValue(value);
643      }
473
644  
645      /**
646 <     * Maps the specified <code>key</code> to the specified
647 <     * <code>value</code> in this table. Neither the key nor the
648 <     * value can be <code>null</code>. (Note that this policy is
479 <     * the same as for java.util.Hashtable, but unlike java.util.HashMap,
480 <     * which does accept nulls as valid keys and values.)<p>
646 >     * Maps the specified <code>key</code> to the specified
647 >     * <code>value</code> in this table. Neither the key nor the
648 >     * value can be <code>null</code>. <p>
649       *
650 <     * The value can be retrieved by calling the <code>get</code> method
651 <     * with a key that is equal to the original key.
650 >     * The value can be retrieved by calling the <code>get</code> method
651 >     * with a key that is equal to the original key.
652       *
653       * @param      key     the table key.
654       * @param      value   the value.
# Line 491 | Line 659 | public class ConcurrentHashMap<K, V> ext
659       * @see     Object#equals(Object)
660       * @see     #get(Object)
661       */
662 <    public V put(K key, V value) {
663 <        if (value == null)
662 >    public V put(K key, V value) {
663 >        if (value == null)
664              throw new NullPointerException();
665 <
666 <        int hash = hash(key);
499 <        Segment seg = segments[hash & SEGMENT_MASK];
500 <        int segcount;
501 <        Entry<K,V>[] tab;
502 <        int votes;
503 <
504 <        seg.lock();
505 <        try {
506 <            tab = table;
507 <            int index = hash & (tab.length-1);
508 <            Entry<K,V> first = tab[index];
509 <
510 <            for (Entry<K,V> e = first; e != null; e = e.next) {
511 <                if (e.hash == hash && eq(key, e.key)) {
512 <                    V oldValue = e.value;
513 <                    e.value = value;
514 <                    return oldValue;
515 <                }
516 <            }
517 <
518 <            //  Add to front of list
519 <            Entry<K,V> newEntry = new Entry<K,V>(hash, key, value, first);
520 <            tab[index] = newEntry;
521 <
522 <            if ((segcount = ++seg.count) < threshold)
523 <                return null;
524 <
525 <            int bit = (1 << (hash & SEGMENT_MASK));
526 <            votes = votesForResize;
527 <            if ((votes & bit) == 0)
528 <                votes = votesForResize |= bit;
529 <        }
530 <        finally {
531 <            seg.unlock();
532 <        }
533 <
534 <        // Attempt resize if 1/4 segs vote,
535 <        // or if this seg itself reaches the overall threshold.
536 <        // (The latter check is just a safeguard to avoid pathological cases.)
537 <        if (bitcount(votes) >= CONCURRENCY_LEVEL / 4  ||
538 <        segcount > (threshold * CONCURRENCY_LEVEL))
539 <            resize(tab);
540 <
541 <        return null;
665 >        int hash = hash(key);
666 >        return segmentFor(hash).put(key, segmentHashFor(hash), value, false);
667      }
668  
669 <    public V putIfAbsent(K key, V value) {
670 <        if (value == null)
669 >    /**
670 >     * If the specified key is not already associated
671 >     * with a value, associate it with the given value.
672 >     * This is equivalent to
673 >     * <pre>
674 >     *   if (!map.containsKey(key)) map.put(key, value);
675 >     *   return get(key);
676 >     * </pre>
677 >     * Except that the action is performed atomically.
678 >     * @param key key with which the specified value is to be associated.
679 >     * @param value value to be associated with the specified key.
680 >     * @return previous value associated with specified key, or <tt>null</tt>
681 >     *         if there was no mapping for key.  A <tt>null</tt> return can
682 >     *         also indicate that the map previously associated <tt>null</tt>
683 >     *         with the specified key, if the implementation supports
684 >     *         <tt>null</tt> values.
685 >     *
686 >     * @throws NullPointerException this map does not permit <tt>null</tt>
687 >     *            keys or values, and the specified key or value is
688 >     *            <tt>null</tt>.
689 >     *
690 >     **/
691 >    public V putIfAbsent(K key, V value) {
692 >        if (value == null)
693              throw new NullPointerException();
694 <
695 <        int hash = hash(key);
549 <        Segment seg = segments[hash & SEGMENT_MASK];
550 <        int segcount;
551 <        Entry<K,V>[] tab;
552 <        int votes;
553 <
554 <        seg.lock();
555 <        try {
556 <            tab = table;
557 <            int index = hash & (tab.length-1);
558 <            Entry<K,V> first = tab[index];
559 <
560 <            for (Entry<K,V> e = first; e != null; e = e.next) {
561 <                if (e.hash == hash && eq(key, e.key)) {
562 <                    V oldValue = e.value;
563 <                    return oldValue;
564 <                }
565 <            }
566 <
567 <            //  Add to front of list
568 <            Entry<K,V> newEntry = new Entry<K,V>(hash, key, value, first);
569 <            tab[index] = newEntry;
570 <
571 <            if ((segcount = ++seg.count) < threshold)
572 <                return null;
573 <
574 <            int bit = (1 << (hash & SEGMENT_MASK));
575 <            votes = votesForResize;
576 <            if ((votes & bit) == 0)
577 <                votes = votesForResize |= bit;
578 <        }
579 <        finally {
580 <            seg.unlock();
581 <        }
582 <
583 <        // Attempt resize if 1/4 segs vote,
584 <        // or if this seg itself reaches the overall threshold.
585 <        // (The latter check is just a safeguard to avoid pathological cases.)
586 <        if (bitcount(votes) >= CONCURRENCY_LEVEL / 4  ||
587 <        segcount > (threshold * CONCURRENCY_LEVEL))
588 <            resize(tab);
589 <
590 <        return value;
694 >        int hash = hash(key);
695 >        return segmentFor(hash).put(key, segmentHashFor(hash), value, true);
696      }
697  
593    /**
594     * Gather all locks in order to call rehash, by
595     * recursing within synch blocks for each segment index.
596     * @param index the current segment. initially call value must be 0
597     * @param assumedTab the state of table on first call to resize. If
598     * this changes on any call, the attempt is aborted because the
599     * table has already been resized by another thread.
600     */
601    private void resize(Entry<K,V>[] assumedTab) {
602        boolean ok = true;
603        int lastlocked = 0;
604        for (int i = 0; i < segments.length; ++i) {
605            segments[i].lock();
606            lastlocked = i;
607            if (table != assumedTab) {
608                ok = false;
609                break;
610            }
611        }
612        try {
613            if (ok)
614                rehash();
615        }
616        finally {
617            for (int i = lastlocked; i >= 0; --i)
618                segments[i].unlock();
619        }
620    }
698  
699      /**
700 <     * Rehashes the contents of this map into a new table
701 <     * with a larger capacity.
700 >     * Copies all of the mappings from the specified map to this one.
701 >     *
702 >     * These mappings replace any mappings that this map had for any of the
703 >     * keys currently in the specified Map.
704 >     *
705 >     * @param t Mappings to be stored in this map.
706       */
707 <    private void rehash() {
708 <        votesForResize = 0; // reset
709 <
710 <        Entry<K,V>[] oldTable = table;
711 <        int oldCapacity = oldTable.length;
631 <
632 <        if (oldCapacity >= MAXIMUM_CAPACITY) {
633 <            threshold = Integer.MAX_VALUE; // avoid retriggering
634 <            return;
707 >    public <K1 extends K, V1 extends V> void putAll(Map<K1,V1> t) {
708 >        Iterator<Map.Entry<K1,V1>> it = t.entrySet().iterator();
709 >        while (it.hasNext()) {
710 >            Entry<K,V> e = (Entry) it.next();
711 >            put(e.getKey(), e.getValue());
712          }
636
637        int newCapacity = oldCapacity << 1;
638        Entry<K,V>[] newTable = newTable(newCapacity);
639        int mask = newCapacity - 1;
640
641        /*
642         * Reclassify nodes in each list to new Map.  Because we are
643         * using power-of-two expansion, the elements from each bin
644         * must either stay at same index, or move to
645         * oldCapacity+index. We also eliminate unnecessary node
646         * creation by catching cases where old nodes can be reused
647         * because their next fields won't change. Statistically, at
648         * the default threshhold, only about one-sixth of them need
649         * cloning. (The nodes they replace will be garbage
650         * collectable as soon as they are no longer referenced by any
651         * reader thread that may be in the midst of traversing table
652         * right now.)
653         */
654
655        for (int i = 0; i < oldCapacity ; i++) {
656            // We need to guarantee that any existing reads of old Map can
657            //  proceed. So we cannot yet null out each bin.
658            Entry<K,V> e = oldTable[i];
659
660            if (e != null) {
661                int idx = e.hash & mask;
662                Entry<K,V> next = e.next;
663
664                //  Single node on list
665                if (next == null)
666                    newTable[idx] = e;
667
668                else {
669                    // Reuse trailing consecutive sequence of all same bit
670                    Entry<K,V> lastRun = e;
671                    int lastIdx = idx;
672                    for (Entry<K,V> last = next; last != null; last = last.next) {
673                        int k = last.hash & mask;
674                        if (k != lastIdx) {
675                            lastIdx = k;
676                            lastRun = last;
677                        }
678                    }
679                    newTable[lastIdx] = lastRun;
680
681                    // Clone all remaining nodes
682                    for (Entry<K,V> p = e; p != lastRun; p = p.next) {
683                        int k = p.hash & mask;
684                        newTable[k] = new Entry<K,V>(p.hash, p.key,
685                        p.value, newTable[k]);
686                    }
687                }
688            }
689        }
690
691        table = newTable;
713      }
714  
694
715      /**
716 <     * Removes the key (and its corresponding value) from this
716 >     * Removes the key (and its corresponding value) from this
717       * table. This method does nothing if the key is not in the table.
718       *
719       * @param   key   the key that needs to be removed.
# Line 703 | Line 723 | public class ConcurrentHashMap<K, V> ext
723       *               <code>null</code>.
724       */
725      public V remove(Object key) {
726 <        return remove(key, null);
726 >        int hash = hash(key);
727 >        return segmentFor(hash).remove(key, segmentHashFor(hash), null);
728      }
729  
709
730      /**
731       * Removes the (key, value) pair from this
732       * table. This method does nothing if the key is not in the table,
# Line 721 | Line 741 | public class ConcurrentHashMap<K, V> ext
741       * @exception  NullPointerException  if the key is
742       *               <code>null</code>.
743       */
744 <    private V remove(Object key, V value) {
745 <        /*
746 <          Find the entry, then
727 <            1. Set value field to null, to force get() to retry
728 <            2. Rebuild the list without this entry.
729 <               All entries following removed node can stay in list, but
730 <               all preceeding ones need to be cloned.  Traversals rely
731 <               on this strategy to ensure that elements will not be
732 <              repeated during iteration.
733 <         */
734 <
744 >    public V remove(Object key, Object value) {
745 >        if (value == null)
746 >            return null;
747          int hash = hash(key);
748 <        Segment seg = segments[hash & SEGMENT_MASK];
737 <
738 <        seg.lock();
739 <        try {
740 <            Entry<K,V>[] tab = table;
741 <            int index = hash & (tab.length-1);
742 <            Entry<K,V> first = tab[index];
743 <            Entry<K,V> e = first;
744 <
745 <            for (;;) {
746 <                if (e == null)
747 <                    return null;
748 <                if (e.hash == hash && eq(key, e.key))
749 <                    break;
750 <                e = e.next;
751 <            }
752 <
753 <            V oldValue = e.value;
754 <            if (value != null && !value.equals(oldValue))
755 <                return null;
756 <
757 <            e.value = null;
758 <
759 <            Entry<K,V> head = e.next;
760 <            for (Entry<K,V> p = first; p != e; p = p.next)
761 <                head = new Entry<K,V>(p.hash, p.key, p.value, head);
762 <            tab[index] = head;
763 <            seg.count--;
764 <            return oldValue;
765 <        }
766 <        finally {
767 <            seg.unlock();
768 <        }
769 <    }
770 <
771 <
772 <    /**
773 <     * Returns <tt>true</tt> if this map maps one or more keys to the
774 <     * specified value. Note: This method requires a full internal
775 <     * traversal of the hash table, and so is much slower than
776 <     * method <tt>containsKey</tt>.
777 <     *
778 <     * @param value value whose presence in this map is to be tested.
779 <     * @return <tt>true</tt> if this map maps one or more keys to the
780 <     * specified value.
781 <     * @exception  NullPointerException  if the value is <code>null</code>.
782 <     */
783 <    public boolean containsValue(Object value) {
784 <
785 <        if (value == null) throw new NullPointerException();
786 <
787 <        for (int s = 0; s < segments.length; ++s) {
788 <            Segment seg = segments[s];
789 <            Entry<K,V>[] tab;
790 <            seg.lock();
791 <            try {
792 <                tab = table;
793 <            }
794 <            finally {
795 <                seg.unlock();
796 <            }
797 <            for (int i = s; i < tab.length; i+= segments.length) {
798 <                for (Entry<K,V> e = tab[i]; e != null; e = e.next)
799 <                    if (value.equals(e.value))
800 <                        return true;
801 <            }
802 <        }
803 <        return false;
804 <    }
805 <
806 <    /**
807 <     * Tests if some key maps into the specified value in this table.
808 <     * This operation is more expensive than the <code>containsKey</code>
809 <     * method.<p>
810 <     *
811 <     * Note that this method is identical in functionality to containsValue,
812 <     * (which is part of the Map interface in the collections framework).
813 <     *
814 <     * @param      value   a value to search for.
815 <     * @return     <code>true</code> if and only if some key maps to the
816 <     *             <code>value</code> argument in this table as
817 <     *             determined by the <tt>equals</tt> method;
818 <     *             <code>false</code> otherwise.
819 <     * @exception  NullPointerException  if the value is <code>null</code>.
820 <     * @see        #containsKey(Object)
821 <     * @see        #containsValue(Object)
822 <     * @see        Map
823 <     */
824 <    public boolean contains(V value) {
825 <        return containsValue(value);
826 <    }
827 <
828 <    /**
829 <     * Copies all of the mappings from the specified map to this one.
830 <     *
831 <     * These mappings replace any mappings that this map had for any of the
832 <     * keys currently in the specified Map.
833 <     *
834 <     * @param t Mappings to be stored in this map.
835 <     */
836 <    public <A extends K, B extends V> void putAll(Map<A, B> t) {
837 <        int n = t.size();
838 <        if (n == 0)
839 <            return;
840 <
841 <        // Expand enough to hold at least n elements without resizing.
842 <        // We can only resize table by factor of two at a time.
843 <        // It is faster to rehash with fewer elements, so do it now.
844 <        for(;;) {
845 <            Entry<K,V>[] tab;
846 <            int max;
847 <            // must synch on some segment. pick 0.
848 <            segments[0].lock();
849 <            try {
850 <                tab = table;
851 <                max = threshold * CONCURRENCY_LEVEL;
852 <            }
853 <            finally {
854 <                segments[0].unlock();
855 <            }
856 <            if (n < max)
857 <                break;
858 <            resize(tab);
859 <        }
860 <
861 <        for (Iterator<Map.Entry<A,B>> it = t.entrySet().iterator(); it.hasNext();) {
862 <            Map.Entry<A,B> entry = (Map.Entry<A,B>) it.next();
863 <            put(entry.getKey(), entry.getValue());
864 <        }
748 >        return segmentFor(hash).remove(key, segmentHashFor(hash), value);
749      }
750  
751      /**
752       * Removes all mappings from this map.
753       */
754      public void clear() {
755 <        // We don't need all locks at once so long as locks
756 <        //   are obtained in low to high order
873 <        for (int s = 0; s < segments.length; ++s) {
874 <            Segment seg = segments[s];
875 <            seg.lock();
876 <            try {
877 <                Entry<K,V>[] tab = table;
878 <                for (int i = s; i < tab.length; i+= segments.length) {
879 <                    for (Entry<K,V> e = tab[i]; e != null; e = e.next)
880 <                        e.value = null;
881 <                    tab[i] = null;
882 <                    seg.count = 0;
883 <                }
884 <            }
885 <            finally {
886 <                seg.unlock();
887 <            }
888 <        }
755 >        for (int i = 0; i < segments.length; ++i)
756 >            segments[i].clear();
757      }
758  
759 +
760      /**
761       * Returns a shallow copy of this
762       * <tt>ConcurrentHashMap</tt> instance: the keys and
# Line 896 | Line 765 | public class ConcurrentHashMap<K, V> ext
765       * @return a shallow copy of this map.
766       */
767      public Object clone() {
768 <        // We cannot call super.clone, since it would share final segments array,
769 <        // and there's no way to reassign finals.
901 <        return new ConcurrentHashMap<K,V>(this);
902 <    }
768 >        // We cannot call super.clone, since it would share final
769 >        // segments array, and there's no way to reassign finals.
770  
771 <    // Views
772 <
773 <    private transient Set<K> keySet = null;
774 <    private transient Set<Map.Entry<K,V>> entrySet = null;
775 <    private transient Collection<V> values = null;
771 >        float lf = segments[0].loadFactor;
772 >        int segs = segments.length;
773 >        int cap = (int)(size() / lf);
774 >        if (cap < segs) cap = segs;
775 >        ConcurrentHashMap t = new ConcurrentHashMap(cap, lf, segs);
776 >        t.putAll(this);
777 >        return t;
778 >    }
779  
780      /**
781       * Returns a set view of the keys contained in this map.  The set is
# Line 923 | Line 793 | public class ConcurrentHashMap<K, V> ext
793          return (ks != null)? ks : (keySet = new KeySet());
794      }
795  
926    private class KeySet extends AbstractSet<K> {
927        public Iterator<K> iterator() {
928            return new KeyIterator();
929        }
930        public int size() {
931            return ConcurrentHashMap.this.size();
932        }
933        public boolean contains(Object o) {
934            return ConcurrentHashMap.this.containsKey(o);
935        }
936        public boolean remove(Object o) {
937            return ConcurrentHashMap.this.remove(o) != null;
938        }
939        public void clear() {
940            ConcurrentHashMap.this.clear();
941        }
942    }
796  
797      /**
798       * Returns a collection view of the values contained in this map.  The
# Line 957 | Line 810 | public class ConcurrentHashMap<K, V> ext
810          return (vs != null)? vs : (values = new Values());
811      }
812  
960    private class Values extends AbstractCollection<V> {
961        public Iterator<V> iterator() {
962            return new ValueIterator();
963        }
964        public int size() {
965            return ConcurrentHashMap.this.size();
966        }
967        public boolean contains(Object o) {
968            return ConcurrentHashMap.this.containsValue(o);
969        }
970        public void clear() {
971            ConcurrentHashMap.this.clear();
972        }
973    }
813  
814      /**
815       * Returns a collection view of the mappings contained in this map.  Each
# Line 989 | Line 828 | public class ConcurrentHashMap<K, V> ext
828          return (es != null) ? es : (entrySet = new EntrySet());
829      }
830  
992    private class EntrySet extends AbstractSet<Map.Entry<K,V>> {
993        public Iterator<Map.Entry<K,V>> iterator() {
994            return new EntryIterator();
995        }
996        public boolean contains(Map.Entry<K,V> entry) {
997            V v = ConcurrentHashMap.this.get(entry.getKey());
998            return v != null && v.equals(entry.getValue());
999        }
1000        public boolean remove(Map.Entry<K,V> e) {
1001            return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()) != null;
1002        }
1003        public int size() {
1004            return ConcurrentHashMap.this.size();
1005        }
1006        public void clear() {
1007            ConcurrentHashMap.this.clear();
1008        }
1009    }
831  
832      /**
833       * Returns an enumeration of the keys in this table.
# Line 1017 | Line 838 | public class ConcurrentHashMap<K, V> ext
838       * @see     #keySet()
839       * @see     Map
840       */
841 <    public Enumeration keys() {
841 >    public Enumeration<K> keys() {
842          return new KeyIterator();
843      }
844  
# Line 1032 | Line 853 | public class ConcurrentHashMap<K, V> ext
853       * @see     #values()
854       * @see     Map
855       */
856 <    public Enumeration elements() {
856 >    public Enumeration<V> elements() {
857          return new ValueIterator();
858      }
859  
860 <    /**
861 <     * ConcurrentHashMap collision list entry.
862 <     */
863 <    private static class Entry<K,V> implements Map.Entry<K,V> {
864 <        /*
865 <           The use of volatile for value field ensures that
866 <           we can detect status changes without synchronization.
867 <           The other fields are never changed, and are
1047 <           marked as final.
1048 <         */
1049 <
1050 <        private final K key;
1051 <        private volatile V value;
1052 <        private final int hash;
1053 <        private final Entry<K,V> next;
1054 <
1055 <        Entry(int hash, K key, V value, Entry<K,V> next) {
1056 <            this.value = value;
1057 <            this.hash = hash;
1058 <            this.key = key;
1059 <            this.next = next;
1060 <        }
1061 <
1062 <        // Map.Entry Ops
1063 <
1064 <        public K getKey() {
1065 <            return key;
1066 <        }
1067 <
1068 <        /**
1069 <         * Get the value.  Note: In an entrySet or entrySet.iterator,
1070 <         * unless you can guarantee lack of concurrent modification,
1071 <         * <tt>getValue</tt> <em>might</em> return null, reflecting the
1072 <         * fact that the entry has been concurrently removed. However,
1073 <         * there are no assurances that concurrent removals will be
1074 <         * reflected using this method.
1075 <         *
1076 <         * @return     the current value, or null if the entry has been
1077 <         * detectably removed.
1078 <         **/
1079 <        public V getValue() {
1080 <            return value;
1081 <        }
1082 <
1083 <        /**
1084 <         * Set the value of this entry.  Note: In an entrySet or
1085 <         * entrySet.iterator), unless you can guarantee lack of concurrent
1086 <         * modification, <tt>setValue</tt> is not strictly guaranteed to
1087 <         * actually replace the value field obtained via the <tt>get</tt>
1088 <         * operation of the underlying hash table in multithreaded
1089 <         * applications.  If iterator-wide synchronization is not used,
1090 <         * and any other concurrent <tt>put</tt> or <tt>remove</tt>
1091 <         * operations occur, sometimes even to <em>other</em> entries,
1092 <         * then this change is not guaranteed to be reflected in the hash
1093 <         * table. (It might, or it might not. There are no assurances
1094 <         * either way.)
1095 <         *
1096 <         * @param      value   the new value.
1097 <         * @return     the previous value, or null if entry has been detectably
1098 <         * removed.
1099 <         * @exception  NullPointerException  if the value is <code>null</code>.
1100 <         *
1101 <         **/
1102 <        public V setValue(V value) {
1103 <            if (value == null)
1104 <                throw new NullPointerException();
1105 <            V oldValue = this.value;
1106 <            this.value = value;
1107 <            return oldValue;
1108 <        }
1109 <
1110 <        public boolean equals(Object o) {
1111 <            if (!(o instanceof Map.Entry))
1112 <                return false;
1113 <            Map.Entry e = (Map.Entry)o;
1114 <            return (key.equals(e.getKey()) && value.equals(e.getValue()));
1115 <        }
1116 <
1117 <        public int hashCode() {
1118 <            return  key.hashCode() ^ value.hashCode();
1119 <        }
1120 <
1121 <        public String toString() {
1122 <            return key + "=" + value;
1123 <        }
1124 <
1125 <    }
1126 <
1127 <    private abstract class HashIterator<T> implements Iterator<T>, Enumeration {
1128 <        private final Entry<K,V>[] tab;      // snapshot of table
1129 <        private int index;                   // current slot
1130 <        Entry<K,V> entry = null;             // current node of slot
1131 <        K currentKey;                        // key for current node
1132 <        V currentValue;                      // value for current node
1133 <        private Entry lastReturned = null;   // last node returned by next
860 >    /* ---------------- Iterator Support -------------- */
861 >    
862 >    private abstract class HashIterator {
863 >        private int nextSegmentIndex;
864 >        private int nextTableIndex;
865 >        private HashEntry<K, V>[] currentTable;
866 >        private HashEntry<K, V> nextEntry;
867 >        private HashEntry<K, V> lastReturned;
868  
869          private HashIterator() {
870 <            // force all segments to synch
871 <            for (int i = 0; i < segments.length; ++i) {
872 <                segments[i].lock();
1139 <                segments[i].unlock();
1140 <            }
1141 <            tab = table;
1142 <            index = tab.length - 1;
870 >            nextSegmentIndex = segments.length-1;
871 >            nextTableIndex = -1;
872 >            advance();
873          }
874  
875          public boolean hasMoreElements() { return hasNext(); }
1146        public Object nextElement() { return next(); }
876  
877 <        public boolean hasNext() {
878 <           /*
879 <             currentkey and currentValue are set here to ensure that next()
880 <             returns normally if hasNext() returns true. This avoids
881 <             surprises especially when final element is removed during
882 <             traversal -- instead, we just ignore the removal during
883 <             current traversal.
884 <            */
885 <
886 <            while (true) {
887 <                if (entry != null) {
888 <                    V v = entry.value;
889 <                    if (v != null) {
890 <                        currentKey = entry.key;
891 <                        currentValue = v;
892 <                        return true;
877 >        private void advance() {
878 >            if (nextEntry != null && (nextEntry = nextEntry.next) != null)
879 >                return;
880 >                
881 >            while (nextTableIndex >= 0) {
882 >                if ( (nextEntry = currentTable[nextTableIndex--]) != null)
883 >                    return;
884 >            }
885 >                
886 >            while (nextSegmentIndex >= 0) {
887 >                Segment<K,V> seg = segments[nextSegmentIndex--];
888 >                if (seg.count != 0) {
889 >                    currentTable = seg.table;
890 >                    for (int j = currentTable.length-1; j >= 0; --j) {
891 >                        if ( (nextEntry = currentTable[j]) != null) {
892 >                            nextTableIndex = j-1;
893 >                            return;
894 >                        }
895                      }
1165                    else
1166                        entry = entry.next;
1167                }
1168
1169                while (entry == null && index >= 0)
1170                    entry = tab[index--];
1171
1172                if (entry == null) {
1173                    currentKey = null;
1174                    currentValue = null;
1175                    return false;
896                  }
897              }
898          }
899  
900 <        abstract T returnValueOfNext();
900 >        public boolean hasNext() { return nextEntry != null; }
901  
902 <        public T next() {
903 <            if (currentKey == null && !hasNext())
902 >        HashEntry<K,V> nextEntry() {
903 >            if (nextEntry == null)
904                  throw new NoSuchElementException();
905 <
906 <            T result = returnValueOfNext();
907 <            lastReturned = entry;
1188 <            currentKey = null;
1189 <            currentValue = null;
1190 <            entry = entry.next;
1191 <            return result;
905 >            lastReturned = nextEntry;
906 >            advance();
907 >            return lastReturned;
908          }
909  
910          public void remove() {
# Line 1197 | Line 913 | public class ConcurrentHashMap<K, V> ext
913              ConcurrentHashMap.this.remove(lastReturned.key);
914              lastReturned = null;
915          }
916 +    }
917  
918 +    private class KeyIterator extends HashIterator implements Iterator<K>, Enumeration<K> {
919 +        public K next() { return super.nextEntry().key; }
920 +        public K nextElement() { return super.nextEntry().key; }
921      }
922  
923 <    private class KeyIterator extends HashIterator<K> {
924 <        K returnValueOfNext() { return currentKey; }
925 <        public K next() { return super.next(); }
923 >    private class ValueIterator extends HashIterator implements Iterator<V>, Enumeration<V> {
924 >        public V next() { return super.nextEntry().value; }
925 >        public V nextElement() { return super.nextEntry().value; }
926      }
927  
928 <    private class ValueIterator extends HashIterator<V> {
929 <        V returnValueOfNext() { return currentValue; }
1210 <        public V next() { return super.next(); }
928 >    private class EntryIterator extends HashIterator implements Iterator<Entry<K,V>> {
929 >        public Map.Entry<K,V> next() { return super.nextEntry(); }
930      }
931  
932 <    private class EntryIterator extends HashIterator<Map.Entry<K,V>> {
933 <        Map.Entry<K,V> returnValueOfNext() { return entry; }
934 <        public Map.Entry<K,V> next() { return super.next(); }
932 >    private class KeySet extends AbstractSet<K> {
933 >        public Iterator<K> iterator() {
934 >            return new KeyIterator();
935 >        }
936 >        public int size() {
937 >            return ConcurrentHashMap.this.size();
938 >        }
939 >        public boolean contains(Object o) {
940 >            return ConcurrentHashMap.this.containsKey(o);
941 >        }
942 >        public boolean remove(Object o) {
943 >            return ConcurrentHashMap.this.remove(o) != null;
944 >        }
945 >        public void clear() {
946 >            ConcurrentHashMap.this.clear();
947 >        }
948      }
949  
950 +    private class Values extends AbstractCollection<V> {
951 +        public Iterator<V> iterator() {
952 +            return new ValueIterator();
953 +        }
954 +        public int size() {
955 +            return ConcurrentHashMap.this.size();
956 +        }
957 +        public boolean contains(Object o) {
958 +            return ConcurrentHashMap.this.containsValue(o);
959 +        }
960 +        public void clear() {
961 +            ConcurrentHashMap.this.clear();
962 +        }
963 +    }
964 +
965 +    private class EntrySet extends AbstractSet {
966 +        public Iterator<Map.Entry<K,V>> iterator() {
967 +            return new EntryIterator();
968 +        }
969 +        public boolean contains(Object o) {
970 +            if (!(o instanceof Map.Entry))
971 +                return false;
972 +            Map.Entry<K,V> e = (Map.Entry<K,V>)o;
973 +            V v = ConcurrentHashMap.this.get(e.getKey());
974 +            return v != null && v.equals(e.getValue());
975 +        }
976 +        public boolean remove(Object o) {
977 +            if (!(o instanceof Map.Entry))
978 +                return false;
979 +            Map.Entry<K,V> e = (Map.Entry<K,V>)o;
980 +            return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()) != null;
981 +        }
982 +        public int size() {
983 +            return ConcurrentHashMap.this.size();
984 +        }
985 +        public void clear() {
986 +            ConcurrentHashMap.this.clear();
987 +        }
988 +    }
989 +
990 +    /* ---------------- Serialization Support -------------- */
991 +
992      /**
993       * Save the state of the <tt>ConcurrentHashMap</tt>
994       * instance to a stream (i.e.,
995       * serialize it).
1222     *
996       * @serialData
1224     * An estimate of the table size, followed by
997       * the key (Object) and value (Object)
998       * for each key-value mapping, followed by a null pair.
999       * The key-value mappings are emitted in no particular order.
1000       */
1001      private void writeObject(java.io.ObjectOutputStream s) throws IOException  {
1230        // Write out the loadfactor, and any hidden stuff
1002          s.defaultWriteObject();
1003  
1233        // Write out capacity estimate. It is OK if this
1234        // changes during the write, since it is only used by
1235        // readObject to set initial capacity, to avoid needless resizings.
1236
1237        int cap;
1238        segments[0].lock();
1239        try {
1240            cap = table.length;
1241        }
1242        finally {
1243            segments[0].unlock();
1244        }
1245        s.writeInt(cap);
1246
1247        // Write out keys and values (alternating)
1004          for (int k = 0; k < segments.length; ++k) {
1005 <            Segment seg = segments[k];
1250 <            Entry[] tab;
1005 >            Segment<K,V> seg = segments[k];
1006              seg.lock();
1007              try {
1008 <                tab = table;
1008 >                HashEntry<K,V>[] tab = seg.table;
1009 >                for (int i = 0; i < tab.length; ++i) {
1010 >                    for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
1011 >                        s.writeObject(e.key);
1012 >                        s.writeObject(e.value);
1013 >                    }
1014 >                }
1015              }
1016              finally {
1017                  seg.unlock();
1018              }
1258            for (int i = k; i < tab.length; i+= segments.length) {
1259                for (Entry e = tab[i]; e != null; e = e.next) {
1260                    s.writeObject(e.key);
1261                    s.writeObject(e.value);
1262                }
1263            }
1019          }
1265
1020          s.writeObject(null);
1021          s.writeObject(null);
1022      }
# Line 1274 | Line 1028 | public class ConcurrentHashMap<K, V> ext
1028       */
1029      private void readObject(java.io.ObjectInputStream s)
1030          throws IOException, ClassNotFoundException  {
1277
1278        // Read in the threshold, loadfactor, and any hidden stuff
1031          s.defaultReadObject();
1032  
1033 <        int cap = s.readInt();
1034 <        table = newTable(cap);
1035 <        for (int i = 0; i < segments.length; ++i)
1036 <            segments[i] = new Segment();
1285 <
1033 >        // Initialize each segment to be minimally sized, and let grow.
1034 >        for (int i = 0; i < segments.length; ++i) {
1035 >            segments[i].setTable(new HashEntry<K,V>[1]);
1036 >        }
1037  
1038          // Read the keys and values, and put the mappings in the table
1039          while (true) {
# Line 1294 | Line 1045 | public class ConcurrentHashMap<K, V> ext
1045          }
1046      }
1047   }
1048 +        

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines