1 |
|
/* |
2 |
|
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
|
* Expert Group and released to the public domain, as explained at |
4 |
< |
* http://creativecommons.org/licenses/publicdomain |
4 |
> |
* http://creativecommons.org/publicdomain/zero/1.0/ |
5 |
|
*/ |
6 |
|
|
7 |
|
package java.util.concurrent; |
62 |
|
* does <em>not</em> allow <tt>null</tt> to be used as a key or value. |
63 |
|
* |
64 |
|
* <p>This class is a member of the |
65 |
< |
* <a href="{@docRoot}/../guide/collections/index.html"> |
65 |
> |
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
66 |
|
* Java Collections Framework</a>. |
67 |
|
* |
68 |
|
* @since 1.5 |
76 |
|
|
77 |
|
/* |
78 |
|
* The basic strategy is to subdivide the table among Segments, |
79 |
< |
* each of which itself is a concurrently readable hash table. |
79 |
> |
* each of which itself is a concurrently readable hash table. To |
80 |
> |
* reduce footprint, all but one segments are constructed only |
81 |
> |
* when first needed (see ensureSegment). To maintain visibility |
82 |
> |
* in the presence of lazy construction, accesses to segments as |
83 |
> |
* well as elements of segment's table must use volatile access, |
84 |
> |
* which is done via Unsafe within methods segmentAt etc |
85 |
> |
* below. These provide the functionality of AtomicReferenceArrays |
86 |
> |
* but reduce the levels of indirection. Additionally, |
87 |
> |
* volatile-writes of table elements and entry "next" fields |
88 |
> |
* within locked operations use the cheaper "lazySet" forms of |
89 |
> |
* writes (via putOrderedObject) because these writes are always |
90 |
> |
* followed by lock releases that maintain sequential consistency |
91 |
> |
* of table updates. |
92 |
> |
* |
93 |
> |
* Historical note: The previous version of this class relied |
94 |
> |
* heavily on "final" fields, which avoided some volatile reads at |
95 |
> |
* the expense of a large initial footprint. Some remnants of |
96 |
> |
* that design (including forced construction of segment 0) exist |
97 |
> |
* to ensure serialization compatibility. |
98 |
|
*/ |
99 |
|
|
100 |
|
/* ---------------- Constants -------------- */ |
126 |
|
static final int MAXIMUM_CAPACITY = 1 << 30; |
127 |
|
|
128 |
|
/** |
129 |
+ |
* The minimum capacity for per-segment tables. Must be a power |
130 |
+ |
* of two, at least two to avoid immediate resizing on next use |
131 |
+ |
* after lazy construction. |
132 |
+ |
*/ |
133 |
+ |
static final int MIN_SEGMENT_TABLE_CAPACITY = 2; |
134 |
+ |
|
135 |
+ |
/** |
136 |
|
* The maximum number of segments to allow; used to bound |
137 |
< |
* constructor arguments. |
137 |
> |
* constructor arguments. Must be power of two less than 1 << 24. |
138 |
|
*/ |
139 |
|
static final int MAX_SEGMENTS = 1 << 16; // slightly conservative |
140 |
|
|
160 |
|
final int segmentShift; |
161 |
|
|
162 |
|
/** |
163 |
< |
* The segments, each of which is a specialized hash table |
163 |
> |
* The segments, each of which is a specialized hash table. |
164 |
|
*/ |
165 |
|
final Segment<K,V>[] segments; |
166 |
|
|
168 |
|
transient Set<Map.Entry<K,V>> entrySet; |
169 |
|
transient Collection<V> values; |
170 |
|
|
146 |
– |
/* ---------------- Small Utilities -------------- */ |
147 |
– |
|
148 |
– |
/** |
149 |
– |
* Returns a hash code for non-null Object x. |
150 |
– |
* Uses the same hash code spreader as most other java.util hash tables. |
151 |
– |
* @param x the object serving as a key |
152 |
– |
* @return the hash code |
153 |
– |
*/ |
154 |
– |
static int hash(Object x) { |
155 |
– |
int h = x.hashCode(); |
156 |
– |
h += ~(h << 9); |
157 |
– |
h ^= (h >>> 14); |
158 |
– |
h += (h << 4); |
159 |
– |
h ^= (h >>> 10); |
160 |
– |
return h; |
161 |
– |
} |
162 |
– |
|
163 |
– |
/** |
164 |
– |
* Returns the segment that should be used for key with given hash |
165 |
– |
* @param hash the hash code for the key |
166 |
– |
* @return the segment |
167 |
– |
*/ |
168 |
– |
final Segment<K,V> segmentFor(int hash) { |
169 |
– |
return segments[(hash >>> segmentShift) & segmentMask]; |
170 |
– |
} |
171 |
– |
|
172 |
– |
/* ---------------- Inner Classes -------------- */ |
173 |
– |
|
171 |
|
/** |
172 |
|
* ConcurrentHashMap list entry. Note that this is never exported |
173 |
|
* out as a user-visible Map.Entry. |
177 |
– |
* |
178 |
– |
* Because the value field is volatile, not final, it is legal wrt |
179 |
– |
* the Java Memory Model for an unsynchronized reader to see null |
180 |
– |
* instead of initial value when read via a data race. Although a |
181 |
– |
* reordering leading to this is not likely to ever actually |
182 |
– |
* occur, the Segment.readValueUnderLock method is used as a |
183 |
– |
* backup in case a null (pre-initialized) value is ever seen in |
184 |
– |
* an unsynchronized access method. |
174 |
|
*/ |
175 |
|
static final class HashEntry<K,V> { |
187 |
– |
final K key; |
176 |
|
final int hash; |
177 |
+ |
final K key; |
178 |
|
volatile V value; |
179 |
< |
final HashEntry<K,V> next; |
179 |
> |
volatile HashEntry<K,V> next; |
180 |
|
|
181 |
< |
HashEntry(K key, int hash, HashEntry<K,V> next, V value) { |
193 |
< |
this.key = key; |
181 |
> |
HashEntry(int hash, K key, V value, HashEntry<K,V> next) { |
182 |
|
this.hash = hash; |
183 |
< |
this.next = next; |
183 |
> |
this.key = key; |
184 |
|
this.value = value; |
185 |
+ |
this.next = next; |
186 |
|
} |
187 |
|
|
188 |
< |
@SuppressWarnings("unchecked") |
189 |
< |
static final <K,V> HashEntry<K,V>[] newArray(int i) { |
190 |
< |
return new HashEntry[i]; |
191 |
< |
} |
188 |
> |
/** |
189 |
> |
* Sets next field with volatile write semantics. (See above |
190 |
> |
* about use of putOrderedObject.) |
191 |
> |
*/ |
192 |
> |
final void setNext(HashEntry<K,V> n) { |
193 |
> |
UNSAFE.putOrderedObject(this, nextOffset, n); |
194 |
> |
} |
195 |
> |
|
196 |
> |
// Unsafe mechanics |
197 |
> |
static final sun.misc.Unsafe UNSAFE; |
198 |
> |
static final long nextOffset; |
199 |
> |
static { |
200 |
> |
try { |
201 |
> |
UNSAFE = sun.misc.Unsafe.getUnsafe(); |
202 |
> |
Class<?> k = HashEntry.class; |
203 |
> |
nextOffset = UNSAFE.objectFieldOffset |
204 |
> |
(k.getDeclaredField("next")); |
205 |
> |
} catch (Exception e) { |
206 |
> |
throw new Error(e); |
207 |
> |
} |
208 |
> |
} |
209 |
> |
} |
210 |
> |
|
211 |
> |
/** |
212 |
> |
* Gets the ith element of given table (if nonnull) with volatile |
213 |
> |
* read semantics. Note: This is manually integrated into a few |
214 |
> |
* performance-sensitive methods to reduce call overhead. |
215 |
> |
*/ |
216 |
> |
@SuppressWarnings("unchecked") |
217 |
> |
static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) { |
218 |
> |
return (tab == null) ? null : |
219 |
> |
(HashEntry<K,V>) UNSAFE.getObjectVolatile |
220 |
> |
(tab, ((long)i << TSHIFT) + TBASE); |
221 |
> |
} |
222 |
> |
|
223 |
> |
/** |
224 |
> |
* Sets the ith element of given table, with volatile write |
225 |
> |
* semantics. (See above about use of putOrderedObject.) |
226 |
> |
*/ |
227 |
> |
static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i, |
228 |
> |
HashEntry<K,V> e) { |
229 |
> |
UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e); |
230 |
> |
} |
231 |
> |
|
232 |
> |
/** |
233 |
> |
* Applies a supplemental hash function to a given hashCode, which |
234 |
> |
* defends against poor quality hash functions. This is critical |
235 |
> |
* because ConcurrentHashMap uses power-of-two length hash tables, |
236 |
> |
* that otherwise encounter collisions for hashCodes that do not |
237 |
> |
* differ in lower or upper bits. |
238 |
> |
*/ |
239 |
> |
private static int hash(int h) { |
240 |
> |
// Spread bits to regularize both segment and index locations, |
241 |
> |
// using variant of single-word Wang/Jenkins hash. |
242 |
> |
h += (h << 15) ^ 0xffffcd7d; |
243 |
> |
h ^= (h >>> 10); |
244 |
> |
h += (h << 3); |
245 |
> |
h ^= (h >>> 6); |
246 |
> |
h += (h << 2) + (h << 14); |
247 |
> |
return h ^ (h >>> 16); |
248 |
|
} |
249 |
|
|
250 |
|
/** |
254 |
|
*/ |
255 |
|
static final class Segment<K,V> extends ReentrantLock implements Serializable { |
256 |
|
/* |
257 |
< |
* Segments maintain a table of entry lists that are ALWAYS |
258 |
< |
* kept in a consistent state, so can be read without locking. |
259 |
< |
* Next fields of nodes are immutable (final). All list |
260 |
< |
* additions are performed at the front of each bin. This |
261 |
< |
* makes it easy to check changes, and also fast to traverse. |
262 |
< |
* When nodes would otherwise be changed, new nodes are |
218 |
< |
* created to replace them. This works well for hash tables |
219 |
< |
* since the bin lists tend to be short. (The average length |
220 |
< |
* is less than two for the default load factor threshold.) |
221 |
< |
* |
222 |
< |
* Read operations can thus proceed without locking, but rely |
223 |
< |
* on selected uses of volatiles to ensure that completed |
224 |
< |
* write operations performed by other threads are |
225 |
< |
* noticed. For most purposes, the "count" field, tracking the |
226 |
< |
* number of elements, serves as that volatile variable |
227 |
< |
* ensuring visibility. This is convenient because this field |
228 |
< |
* needs to be read in many read operations anyway: |
229 |
< |
* |
230 |
< |
* - All (unsynchronized) read operations must first read the |
231 |
< |
* "count" field, and should not look at table entries if |
232 |
< |
* it is 0. |
257 |
> |
* Segments maintain a table of entry lists that are always |
258 |
> |
* kept in a consistent state, so can be read (via volatile |
259 |
> |
* reads of segments and tables) without locking. This |
260 |
> |
* requires replicating nodes when necessary during table |
261 |
> |
* resizing, so the old lists can be traversed by readers |
262 |
> |
* still using old version of table. |
263 |
|
* |
264 |
< |
* - All (synchronized) write operations should write to |
265 |
< |
* the "count" field after structurally changing any bin. |
266 |
< |
* The operations must not take any action that could even |
267 |
< |
* momentarily cause a concurrent read operation to see |
268 |
< |
* inconsistent data. This is made easier by the nature of |
269 |
< |
* the read operations in Map. For example, no operation |
270 |
< |
* can reveal that the table has grown but the threshold |
271 |
< |
* has not yet been updated, so there are no atomicity |
272 |
< |
* requirements for this with respect to reads. |
273 |
< |
* |
274 |
< |
* As a guide, all critical volatile reads and writes to the |
275 |
< |
* count field are marked in code comments. |
264 |
> |
* This class defines only mutative methods requiring locking. |
265 |
> |
* Except as noted, the methods of this class perform the |
266 |
> |
* per-segment versions of ConcurrentHashMap methods. (Other |
267 |
> |
* methods are integrated directly into ConcurrentHashMap |
268 |
> |
* methods.) These mutative methods use a form of controlled |
269 |
> |
* spinning on contention via methods scanAndLock and |
270 |
> |
* scanAndLockForPut. These intersperse tryLocks with |
271 |
> |
* traversals to locate nodes. The main benefit is to absorb |
272 |
> |
* cache misses (which are very common for hash tables) while |
273 |
> |
* obtaining locks so that traversal is faster once |
274 |
> |
* acquired. We do not actually use the found nodes since they |
275 |
> |
* must be re-acquired under lock anyway to ensure sequential |
276 |
> |
* consistency of updates (and in any case may be undetectably |
277 |
> |
* stale), but they will normally be much faster to re-locate. |
278 |
> |
* Also, scanAndLockForPut speculatively creates a fresh node |
279 |
> |
* to use in put if no node is found. |
280 |
|
*/ |
281 |
|
|
282 |
|
private static final long serialVersionUID = 2249069246763182397L; |
283 |
|
|
284 |
|
/** |
285 |
< |
* The number of elements in this segment's region. |
285 |
> |
* The maximum number of times to tryLock in a prescan before |
286 |
> |
* possibly blocking on acquire in preparation for a locked |
287 |
> |
* segment operation. On multiprocessors, using a bounded |
288 |
> |
* number of retries maintains cache acquired while locating |
289 |
> |
* nodes. |
290 |
|
*/ |
291 |
< |
transient volatile int count; |
291 |
> |
static final int MAX_SCAN_RETRIES = |
292 |
> |
Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; |
293 |
|
|
294 |
|
/** |
295 |
< |
* Number of updates that alter the size of the table. This is |
296 |
< |
* used during bulk-read methods to make sure they see a |
297 |
< |
* consistent snapshot: If modCounts change during a traversal |
298 |
< |
* of segments computing size or checking containsValue, then |
299 |
< |
* we might have an inconsistent view of state so (usually) |
300 |
< |
* must retry. |
295 |
> |
* The per-segment table. Elements are accessed via |
296 |
> |
* entryAt/setEntryAt providing volatile semantics. |
297 |
> |
*/ |
298 |
> |
transient volatile HashEntry<K,V>[] table; |
299 |
> |
|
300 |
> |
/** |
301 |
> |
* The number of elements. Accessed only either within locks |
302 |
> |
* or among other volatile reads that maintain visibility. |
303 |
> |
*/ |
304 |
> |
transient int count; |
305 |
> |
|
306 |
> |
/** |
307 |
> |
* The total number of mutative operations in this segment. |
308 |
> |
* Even though this may overflows 32 bits, it provides |
309 |
> |
* sufficient accuracy for stability checks in CHM isEmpty() |
310 |
> |
* and size() methods. Accessed only either within locks or |
311 |
> |
* among other volatile reads that maintain visibility. |
312 |
|
*/ |
313 |
|
transient int modCount; |
314 |
|
|
320 |
|
transient int threshold; |
321 |
|
|
322 |
|
/** |
273 |
– |
* The per-segment table. |
274 |
– |
*/ |
275 |
– |
transient volatile HashEntry<K,V>[] table; |
276 |
– |
|
277 |
– |
/** |
323 |
|
* The load factor for the hash table. Even though this value |
324 |
|
* is same for all segments, it is replicated to avoid needing |
325 |
|
* links to outer object. |
327 |
|
*/ |
328 |
|
final float loadFactor; |
329 |
|
|
330 |
< |
Segment(int initialCapacity, float lf) { |
331 |
< |
loadFactor = lf; |
332 |
< |
setTable(HashEntry.<K,V>newArray(initialCapacity)); |
333 |
< |
} |
289 |
< |
|
290 |
< |
@SuppressWarnings("unchecked") |
291 |
< |
static final <K,V> Segment<K,V>[] newArray(int i) { |
292 |
< |
return new Segment[i]; |
293 |
< |
} |
294 |
< |
|
295 |
< |
/** |
296 |
< |
* Sets table to new HashEntry array. |
297 |
< |
* Call only while holding lock or in constructor. |
298 |
< |
*/ |
299 |
< |
void setTable(HashEntry<K,V>[] newTable) { |
300 |
< |
threshold = (int)(newTable.length * loadFactor); |
301 |
< |
table = newTable; |
302 |
< |
} |
303 |
< |
|
304 |
< |
/** |
305 |
< |
* Returns properly casted first entry of bin for given hash. |
306 |
< |
*/ |
307 |
< |
HashEntry<K,V> getFirst(int hash) { |
308 |
< |
HashEntry<K,V>[] tab = table; |
309 |
< |
return tab[hash & (tab.length - 1)]; |
330 |
> |
Segment(float lf, int threshold, HashEntry<K,V>[] tab) { |
331 |
> |
this.loadFactor = lf; |
332 |
> |
this.threshold = threshold; |
333 |
> |
this.table = tab; |
334 |
|
} |
335 |
|
|
336 |
< |
/** |
337 |
< |
* Reads value field of an entry under lock. Called if value |
338 |
< |
* field ever appears to be null. This is possible only if a |
339 |
< |
* compiler happens to reorder a HashEntry initialization with |
316 |
< |
* its table assignment, which is legal under memory model |
317 |
< |
* but is not known to ever occur. |
318 |
< |
*/ |
319 |
< |
V readValueUnderLock(HashEntry<K,V> e) { |
320 |
< |
lock(); |
336 |
> |
final V put(K key, int hash, V value, boolean onlyIfAbsent) { |
337 |
> |
HashEntry<K,V> node = tryLock() ? null : |
338 |
> |
scanAndLockForPut(key, hash, value); |
339 |
> |
V oldValue; |
340 |
|
try { |
322 |
– |
return e.value; |
323 |
– |
} finally { |
324 |
– |
unlock(); |
325 |
– |
} |
326 |
– |
} |
327 |
– |
|
328 |
– |
/* Specialized implementations of map methods */ |
329 |
– |
|
330 |
– |
V get(Object key, int hash) { |
331 |
– |
if (count != 0) { // read-volatile |
332 |
– |
HashEntry<K,V> e = getFirst(hash); |
333 |
– |
while (e != null) { |
334 |
– |
if (e.hash == hash && key.equals(e.key)) { |
335 |
– |
V v = e.value; |
336 |
– |
if (v != null) |
337 |
– |
return v; |
338 |
– |
return readValueUnderLock(e); // recheck |
339 |
– |
} |
340 |
– |
e = e.next; |
341 |
– |
} |
342 |
– |
} |
343 |
– |
return null; |
344 |
– |
} |
345 |
– |
|
346 |
– |
boolean containsKey(Object key, int hash) { |
347 |
– |
if (count != 0) { // read-volatile |
348 |
– |
HashEntry<K,V> e = getFirst(hash); |
349 |
– |
while (e != null) { |
350 |
– |
if (e.hash == hash && key.equals(e.key)) |
351 |
– |
return true; |
352 |
– |
e = e.next; |
353 |
– |
} |
354 |
– |
} |
355 |
– |
return false; |
356 |
– |
} |
357 |
– |
|
358 |
– |
boolean containsValue(Object value) { |
359 |
– |
if (count != 0) { // read-volatile |
341 |
|
HashEntry<K,V>[] tab = table; |
342 |
< |
int len = tab.length; |
343 |
< |
for (int i = 0 ; i < len; i++) { |
344 |
< |
for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) { |
345 |
< |
V v = e.value; |
346 |
< |
if (v == null) // recheck |
347 |
< |
v = readValueUnderLock(e); |
348 |
< |
if (value.equals(v)) |
349 |
< |
return true; |
342 |
> |
int index = (tab.length - 1) & hash; |
343 |
> |
HashEntry<K,V> first = entryAt(tab, index); |
344 |
> |
for (HashEntry<K,V> e = first;;) { |
345 |
> |
if (e != null) { |
346 |
> |
K k; |
347 |
> |
if ((k = e.key) == key || |
348 |
> |
(e.hash == hash && key.equals(k))) { |
349 |
> |
oldValue = e.value; |
350 |
> |
if (!onlyIfAbsent) { |
351 |
> |
e.value = value; |
352 |
> |
++modCount; |
353 |
> |
} |
354 |
> |
break; |
355 |
> |
} |
356 |
> |
e = e.next; |
357 |
> |
} |
358 |
> |
else { |
359 |
> |
if (node != null) |
360 |
> |
node.setNext(first); |
361 |
> |
else |
362 |
> |
node = new HashEntry<K,V>(hash, key, value, first); |
363 |
> |
int c = count + 1; |
364 |
> |
if (c > threshold && tab.length < MAXIMUM_CAPACITY) |
365 |
> |
rehash(node); |
366 |
> |
else |
367 |
> |
setEntryAt(tab, index, node); |
368 |
> |
++modCount; |
369 |
> |
count = c; |
370 |
> |
oldValue = null; |
371 |
> |
break; |
372 |
|
} |
373 |
|
} |
371 |
– |
} |
372 |
– |
return false; |
373 |
– |
} |
374 |
– |
|
375 |
– |
boolean replace(K key, int hash, V oldValue, V newValue) { |
376 |
– |
lock(); |
377 |
– |
try { |
378 |
– |
HashEntry<K,V> e = getFirst(hash); |
379 |
– |
while (e != null && (e.hash != hash || !key.equals(e.key))) |
380 |
– |
e = e.next; |
381 |
– |
|
382 |
– |
boolean replaced = false; |
383 |
– |
if (e != null && oldValue.equals(e.value)) { |
384 |
– |
replaced = true; |
385 |
– |
e.value = newValue; |
386 |
– |
} |
387 |
– |
return replaced; |
388 |
– |
} finally { |
389 |
– |
unlock(); |
390 |
– |
} |
391 |
– |
} |
392 |
– |
|
393 |
– |
V replace(K key, int hash, V newValue) { |
394 |
– |
lock(); |
395 |
– |
try { |
396 |
– |
HashEntry<K,V> e = getFirst(hash); |
397 |
– |
while (e != null && (e.hash != hash || !key.equals(e.key))) |
398 |
– |
e = e.next; |
399 |
– |
|
400 |
– |
V oldValue = null; |
401 |
– |
if (e != null) { |
402 |
– |
oldValue = e.value; |
403 |
– |
e.value = newValue; |
404 |
– |
} |
405 |
– |
return oldValue; |
406 |
– |
} finally { |
407 |
– |
unlock(); |
408 |
– |
} |
409 |
– |
} |
410 |
– |
|
411 |
– |
|
412 |
– |
V put(K key, int hash, V value, boolean onlyIfAbsent) { |
413 |
– |
lock(); |
414 |
– |
try { |
415 |
– |
int c = count; |
416 |
– |
if (c++ > threshold) // ensure capacity |
417 |
– |
rehash(); |
418 |
– |
HashEntry<K,V>[] tab = table; |
419 |
– |
int index = hash & (tab.length - 1); |
420 |
– |
HashEntry<K,V> first = tab[index]; |
421 |
– |
HashEntry<K,V> e = first; |
422 |
– |
while (e != null && (e.hash != hash || !key.equals(e.key))) |
423 |
– |
e = e.next; |
424 |
– |
|
425 |
– |
V oldValue; |
426 |
– |
if (e != null) { |
427 |
– |
oldValue = e.value; |
428 |
– |
if (!onlyIfAbsent) |
429 |
– |
e.value = value; |
430 |
– |
} |
431 |
– |
else { |
432 |
– |
oldValue = null; |
433 |
– |
++modCount; |
434 |
– |
tab[index] = new HashEntry<K,V>(key, hash, first, value); |
435 |
– |
count = c; // write-volatile |
436 |
– |
} |
437 |
– |
return oldValue; |
374 |
|
} finally { |
375 |
|
unlock(); |
376 |
|
} |
377 |
+ |
return oldValue; |
378 |
|
} |
379 |
|
|
380 |
< |
void rehash() { |
381 |
< |
HashEntry<K,V>[] oldTable = table; |
382 |
< |
int oldCapacity = oldTable.length; |
383 |
< |
if (oldCapacity >= MAXIMUM_CAPACITY) |
384 |
< |
return; |
385 |
< |
|
380 |
> |
/** |
381 |
> |
* Doubles size of table and repacks entries, also adding the |
382 |
> |
* given node to new table |
383 |
> |
*/ |
384 |
> |
@SuppressWarnings("unchecked") |
385 |
> |
private void rehash(HashEntry<K,V> node) { |
386 |
|
/* |
387 |
< |
* Reclassify nodes in each list to new Map. Because we are |
388 |
< |
* using power-of-two expansion, the elements from each bin |
389 |
< |
* must either stay at same index, or move with a power of two |
390 |
< |
* offset. We eliminate unnecessary node creation by catching |
391 |
< |
* cases where old nodes can be reused because their next |
392 |
< |
* fields won't change. Statistically, at the default |
393 |
< |
* threshold, only about one-sixth of them need cloning when |
394 |
< |
* a table doubles. The nodes they replace will be garbage |
395 |
< |
* collectable as soon as they are no longer referenced by any |
396 |
< |
* reader thread that may be in the midst of traversing table |
397 |
< |
* right now. |
387 |
> |
* Reclassify nodes in each list to new table. Because we |
388 |
> |
* are using power-of-two expansion, the elements from |
389 |
> |
* each bin must either stay at same index, or move with a |
390 |
> |
* power of two offset. We eliminate unnecessary node |
391 |
> |
* creation by catching cases where old nodes can be |
392 |
> |
* reused because their next fields won't change. |
393 |
> |
* Statistically, at the default threshold, only about |
394 |
> |
* one-sixth of them need cloning when a table |
395 |
> |
* doubles. The nodes they replace will be garbage |
396 |
> |
* collectable as soon as they are no longer referenced by |
397 |
> |
* any reader thread that may be in the midst of |
398 |
> |
* concurrently traversing table. Entry accesses use plain |
399 |
> |
* array indexing because they are followed by volatile |
400 |
> |
* table write. |
401 |
|
*/ |
402 |
< |
|
403 |
< |
HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1); |
404 |
< |
threshold = (int)(newTable.length * loadFactor); |
405 |
< |
int sizeMask = newTable.length - 1; |
402 |
> |
HashEntry<K,V>[] oldTable = table; |
403 |
> |
int oldCapacity = oldTable.length; |
404 |
> |
int newCapacity = oldCapacity << 1; |
405 |
> |
threshold = (int)(newCapacity * loadFactor); |
406 |
> |
HashEntry<K,V>[] newTable = |
407 |
> |
(HashEntry<K,V>[]) new HashEntry<?,?>[newCapacity]; |
408 |
> |
int sizeMask = newCapacity - 1; |
409 |
|
for (int i = 0; i < oldCapacity ; i++) { |
467 |
– |
// We need to guarantee that any existing reads of old Map can |
468 |
– |
// proceed. So we cannot yet null out each bin. |
410 |
|
HashEntry<K,V> e = oldTable[i]; |
470 |
– |
|
411 |
|
if (e != null) { |
412 |
|
HashEntry<K,V> next = e.next; |
413 |
|
int idx = e.hash & sizeMask; |
414 |
< |
|
475 |
< |
// Single node on list |
476 |
< |
if (next == null) |
414 |
> |
if (next == null) // Single node on list |
415 |
|
newTable[idx] = e; |
416 |
< |
|
479 |
< |
else { |
480 |
< |
// Reuse trailing consecutive sequence at same slot |
416 |
> |
else { // Reuse consecutive sequence at same slot |
417 |
|
HashEntry<K,V> lastRun = e; |
418 |
|
int lastIdx = idx; |
419 |
|
for (HashEntry<K,V> last = next; |
426 |
|
} |
427 |
|
} |
428 |
|
newTable[lastIdx] = lastRun; |
429 |
< |
|
494 |
< |
// Clone all remaining nodes |
429 |
> |
// Clone remaining nodes |
430 |
|
for (HashEntry<K,V> p = e; p != lastRun; p = p.next) { |
431 |
< |
int k = p.hash & sizeMask; |
431 |
> |
V v = p.value; |
432 |
> |
int h = p.hash; |
433 |
> |
int k = h & sizeMask; |
434 |
|
HashEntry<K,V> n = newTable[k]; |
435 |
< |
newTable[k] = new HashEntry<K,V>(p.key, p.hash, |
499 |
< |
n, p.value); |
435 |
> |
newTable[k] = new HashEntry<K,V>(h, p.key, v, n); |
436 |
|
} |
437 |
|
} |
438 |
|
} |
439 |
|
} |
440 |
+ |
int nodeIndex = node.hash & sizeMask; // add the new node |
441 |
+ |
node.setNext(newTable[nodeIndex]); |
442 |
+ |
newTable[nodeIndex] = node; |
443 |
|
table = newTable; |
444 |
|
} |
445 |
|
|
446 |
|
/** |
447 |
+ |
* Scans for a node containing given key while trying to |
448 |
+ |
* acquire lock, creating and returning one if not found. Upon |
449 |
+ |
* return, guarantees that lock is held. Unlike in most |
450 |
+ |
* methods, calls to method equals are not screened: Since |
451 |
+ |
* traversal speed doesn't matter, we might as well help warm |
452 |
+ |
* up the associated code and accesses as well. |
453 |
+ |
* |
454 |
+ |
* @return a new node if key not found, else null |
455 |
+ |
*/ |
456 |
+ |
private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) { |
457 |
+ |
HashEntry<K,V> first = entryForHash(this, hash); |
458 |
+ |
HashEntry<K,V> e = first; |
459 |
+ |
HashEntry<K,V> node = null; |
460 |
+ |
int retries = -1; // negative while locating node |
461 |
+ |
while (!tryLock()) { |
462 |
+ |
HashEntry<K,V> f; // to recheck first below |
463 |
+ |
if (retries < 0) { |
464 |
+ |
if (e == null) { |
465 |
+ |
if (node == null) // speculatively create node |
466 |
+ |
node = new HashEntry<K,V>(hash, key, value, null); |
467 |
+ |
retries = 0; |
468 |
+ |
} |
469 |
+ |
else if (key.equals(e.key)) |
470 |
+ |
retries = 0; |
471 |
+ |
else |
472 |
+ |
e = e.next; |
473 |
+ |
} |
474 |
+ |
else if (++retries > MAX_SCAN_RETRIES) { |
475 |
+ |
lock(); |
476 |
+ |
break; |
477 |
+ |
} |
478 |
+ |
else if ((retries & 1) == 0 && |
479 |
+ |
(f = entryForHash(this, hash)) != first) { |
480 |
+ |
e = first = f; // re-traverse if entry changed |
481 |
+ |
retries = -1; |
482 |
+ |
} |
483 |
+ |
} |
484 |
+ |
return node; |
485 |
+ |
} |
486 |
+ |
|
487 |
+ |
/** |
488 |
+ |
* Scans for a node containing the given key while trying to |
489 |
+ |
* acquire lock for a remove or replace operation. Upon |
490 |
+ |
* return, guarantees that lock is held. Note that we must |
491 |
+ |
* lock even if the key is not found, to ensure sequential |
492 |
+ |
* consistency of updates. |
493 |
+ |
*/ |
494 |
+ |
private void scanAndLock(Object key, int hash) { |
495 |
+ |
// similar to but simpler than scanAndLockForPut |
496 |
+ |
HashEntry<K,V> first = entryForHash(this, hash); |
497 |
+ |
HashEntry<K,V> e = first; |
498 |
+ |
int retries = -1; |
499 |
+ |
while (!tryLock()) { |
500 |
+ |
HashEntry<K,V> f; |
501 |
+ |
if (retries < 0) { |
502 |
+ |
if (e == null || key.equals(e.key)) |
503 |
+ |
retries = 0; |
504 |
+ |
else |
505 |
+ |
e = e.next; |
506 |
+ |
} |
507 |
+ |
else if (++retries > MAX_SCAN_RETRIES) { |
508 |
+ |
lock(); |
509 |
+ |
break; |
510 |
+ |
} |
511 |
+ |
else if ((retries & 1) == 0 && |
512 |
+ |
(f = entryForHash(this, hash)) != first) { |
513 |
+ |
e = first = f; |
514 |
+ |
retries = -1; |
515 |
+ |
} |
516 |
+ |
} |
517 |
+ |
} |
518 |
+ |
|
519 |
+ |
/** |
520 |
|
* Remove; match on key only if value null, else match both. |
521 |
|
*/ |
522 |
< |
V remove(Object key, int hash, Object value) { |
523 |
< |
lock(); |
522 |
> |
final V remove(Object key, int hash, Object value) { |
523 |
> |
if (!tryLock()) |
524 |
> |
scanAndLock(key, hash); |
525 |
> |
V oldValue = null; |
526 |
|
try { |
513 |
– |
int c = count - 1; |
527 |
|
HashEntry<K,V>[] tab = table; |
528 |
< |
int index = hash & (tab.length - 1); |
529 |
< |
HashEntry<K,V> first = tab[index]; |
530 |
< |
HashEntry<K,V> e = first; |
531 |
< |
while (e != null && (e.hash != hash || !key.equals(e.key))) |
532 |
< |
e = e.next; |
528 |
> |
int index = (tab.length - 1) & hash; |
529 |
> |
HashEntry<K,V> e = entryAt(tab, index); |
530 |
> |
HashEntry<K,V> pred = null; |
531 |
> |
while (e != null) { |
532 |
> |
K k; |
533 |
> |
HashEntry<K,V> next = e.next; |
534 |
> |
if ((k = e.key) == key || |
535 |
> |
(e.hash == hash && key.equals(k))) { |
536 |
> |
V v = e.value; |
537 |
> |
if (value == null || value == v || value.equals(v)) { |
538 |
> |
if (pred == null) |
539 |
> |
setEntryAt(tab, index, next); |
540 |
> |
else |
541 |
> |
pred.setNext(next); |
542 |
> |
++modCount; |
543 |
> |
--count; |
544 |
> |
oldValue = v; |
545 |
> |
} |
546 |
> |
break; |
547 |
> |
} |
548 |
> |
pred = e; |
549 |
> |
e = next; |
550 |
> |
} |
551 |
> |
} finally { |
552 |
> |
unlock(); |
553 |
> |
} |
554 |
> |
return oldValue; |
555 |
> |
} |
556 |
|
|
557 |
< |
V oldValue = null; |
558 |
< |
if (e != null) { |
559 |
< |
V v = e.value; |
560 |
< |
if (value == null || value.equals(v)) { |
561 |
< |
oldValue = v; |
562 |
< |
// All entries following removed node can stay |
563 |
< |
// in list, but all preceding ones need to be |
564 |
< |
// cloned. |
557 |
> |
final boolean replace(K key, int hash, V oldValue, V newValue) { |
558 |
> |
if (!tryLock()) |
559 |
> |
scanAndLock(key, hash); |
560 |
> |
boolean replaced = false; |
561 |
> |
try { |
562 |
> |
HashEntry<K,V> e; |
563 |
> |
for (e = entryForHash(this, hash); e != null; e = e.next) { |
564 |
> |
K k; |
565 |
> |
if ((k = e.key) == key || |
566 |
> |
(e.hash == hash && key.equals(k))) { |
567 |
> |
if (oldValue.equals(e.value)) { |
568 |
> |
e.value = newValue; |
569 |
> |
++modCount; |
570 |
> |
replaced = true; |
571 |
> |
} |
572 |
> |
break; |
573 |
> |
} |
574 |
> |
} |
575 |
> |
} finally { |
576 |
> |
unlock(); |
577 |
> |
} |
578 |
> |
return replaced; |
579 |
> |
} |
580 |
> |
|
581 |
> |
final V replace(K key, int hash, V value) { |
582 |
> |
if (!tryLock()) |
583 |
> |
scanAndLock(key, hash); |
584 |
> |
V oldValue = null; |
585 |
> |
try { |
586 |
> |
HashEntry<K,V> e; |
587 |
> |
for (e = entryForHash(this, hash); e != null; e = e.next) { |
588 |
> |
K k; |
589 |
> |
if ((k = e.key) == key || |
590 |
> |
(e.hash == hash && key.equals(k))) { |
591 |
> |
oldValue = e.value; |
592 |
> |
e.value = value; |
593 |
|
++modCount; |
594 |
< |
HashEntry<K,V> newFirst = e.next; |
531 |
< |
for (HashEntry<K,V> p = first; p != e; p = p.next) |
532 |
< |
newFirst = new HashEntry<K,V>(p.key, p.hash, |
533 |
< |
newFirst, p.value); |
534 |
< |
tab[index] = newFirst; |
535 |
< |
count = c; // write-volatile |
594 |
> |
break; |
595 |
|
} |
596 |
|
} |
538 |
– |
return oldValue; |
597 |
|
} finally { |
598 |
|
unlock(); |
599 |
|
} |
600 |
+ |
return oldValue; |
601 |
|
} |
602 |
|
|
603 |
< |
void clear() { |
604 |
< |
if (count != 0) { |
605 |
< |
lock(); |
606 |
< |
try { |
607 |
< |
HashEntry<K,V>[] tab = table; |
608 |
< |
for (int i = 0; i < tab.length ; i++) |
609 |
< |
tab[i] = null; |
610 |
< |
++modCount; |
611 |
< |
count = 0; // write-volatile |
612 |
< |
} finally { |
613 |
< |
unlock(); |
603 |
> |
final void clear() { |
604 |
> |
lock(); |
605 |
> |
try { |
606 |
> |
HashEntry<K,V>[] tab = table; |
607 |
> |
for (int i = 0; i < tab.length ; i++) |
608 |
> |
setEntryAt(tab, i, null); |
609 |
> |
++modCount; |
610 |
> |
count = 0; |
611 |
> |
} finally { |
612 |
> |
unlock(); |
613 |
> |
} |
614 |
> |
} |
615 |
> |
} |
616 |
> |
|
617 |
> |
// Accessing segments |
618 |
> |
|
619 |
> |
/** |
620 |
> |
* Gets the jth element of given segment array (if nonnull) with |
621 |
> |
* volatile element access semantics via Unsafe. (The null check |
622 |
> |
* can trigger harmlessly only during deserialization.) Note: |
623 |
> |
* because each element of segments array is set only once (using |
624 |
> |
* fully ordered writes), some performance-sensitive methods rely |
625 |
> |
* on this method only as a recheck upon null reads. |
626 |
> |
*/ |
627 |
> |
@SuppressWarnings("unchecked") |
628 |
> |
static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) { |
629 |
> |
long u = (j << SSHIFT) + SBASE; |
630 |
> |
return ss == null ? null : |
631 |
> |
(Segment<K,V>) UNSAFE.getObjectVolatile(ss, u); |
632 |
> |
} |
633 |
> |
|
634 |
> |
/** |
635 |
> |
* Returns the segment for the given index, creating it and |
636 |
> |
* recording in segment table (via CAS) if not already present. |
637 |
> |
* |
638 |
> |
* @param k the index |
639 |
> |
* @return the segment |
640 |
> |
*/ |
641 |
> |
@SuppressWarnings("unchecked") |
642 |
> |
private Segment<K,V> ensureSegment(int k) { |
643 |
> |
final Segment<K,V>[] ss = this.segments; |
644 |
> |
long u = (k << SSHIFT) + SBASE; // raw offset |
645 |
> |
Segment<K,V> seg; |
646 |
> |
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { |
647 |
> |
Segment<K,V> proto = ss[0]; // use segment 0 as prototype |
648 |
> |
int cap = proto.table.length; |
649 |
> |
float lf = proto.loadFactor; |
650 |
> |
int threshold = (int)(cap * lf); |
651 |
> |
HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry<?,?>[cap]; |
652 |
> |
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) |
653 |
> |
== null) { // recheck |
654 |
> |
Segment<K,V> s = new Segment<K,V>(lf, threshold, tab); |
655 |
> |
while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) |
656 |
> |
== null) { |
657 |
> |
if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) |
658 |
> |
break; |
659 |
|
} |
660 |
|
} |
661 |
|
} |
662 |
+ |
return seg; |
663 |
|
} |
664 |
|
|
665 |
+ |
// Hash-based segment and entry accesses |
666 |
|
|
667 |
+ |
/** |
668 |
+ |
* Gets the segment for the given hash code. |
669 |
+ |
*/ |
670 |
+ |
@SuppressWarnings("unchecked") |
671 |
+ |
private Segment<K,V> segmentForHash(int h) { |
672 |
+ |
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; |
673 |
+ |
return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u); |
674 |
+ |
} |
675 |
+ |
|
676 |
+ |
/** |
677 |
+ |
* Gets the table entry for the given segment and hash code. |
678 |
+ |
*/ |
679 |
+ |
@SuppressWarnings("unchecked") |
680 |
+ |
static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) { |
681 |
+ |
HashEntry<K,V>[] tab; |
682 |
+ |
return (seg == null || (tab = seg.table) == null) ? null : |
683 |
+ |
(HashEntry<K,V>) UNSAFE.getObjectVolatile |
684 |
+ |
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); |
685 |
+ |
} |
686 |
|
|
687 |
|
/* ---------------- Public operations -------------- */ |
688 |
|
|
702 |
|
* negative or the load factor or concurrencyLevel are |
703 |
|
* nonpositive. |
704 |
|
*/ |
705 |
+ |
@SuppressWarnings("unchecked") |
706 |
|
public ConcurrentHashMap(int initialCapacity, |
707 |
|
float loadFactor, int concurrencyLevel) { |
708 |
|
if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) |
709 |
|
throw new IllegalArgumentException(); |
584 |
– |
|
710 |
|
if (concurrencyLevel > MAX_SEGMENTS) |
711 |
|
concurrencyLevel = MAX_SEGMENTS; |
587 |
– |
|
712 |
|
// Find power-of-two sizes best matching arguments |
713 |
|
int sshift = 0; |
714 |
|
int ssize = 1; |
716 |
|
++sshift; |
717 |
|
ssize <<= 1; |
718 |
|
} |
719 |
< |
segmentShift = 32 - sshift; |
720 |
< |
segmentMask = ssize - 1; |
597 |
< |
this.segments = Segment.newArray(ssize); |
598 |
< |
|
719 |
> |
this.segmentShift = 32 - sshift; |
720 |
> |
this.segmentMask = ssize - 1; |
721 |
|
if (initialCapacity > MAXIMUM_CAPACITY) |
722 |
|
initialCapacity = MAXIMUM_CAPACITY; |
723 |
|
int c = initialCapacity / ssize; |
724 |
|
if (c * ssize < initialCapacity) |
725 |
|
++c; |
726 |
< |
int cap = 1; |
726 |
> |
int cap = MIN_SEGMENT_TABLE_CAPACITY; |
727 |
|
while (cap < c) |
728 |
|
cap <<= 1; |
729 |
< |
|
730 |
< |
for (int i = 0; i < this.segments.length; ++i) |
731 |
< |
this.segments[i] = new Segment<K,V>(cap, loadFactor); |
729 |
> |
// create segments and segments[0] |
730 |
> |
Segment<K,V> s0 = |
731 |
> |
new Segment<K,V>(loadFactor, (int)(cap * loadFactor), |
732 |
> |
(HashEntry<K,V>[])new HashEntry<?,?>[cap]); |
733 |
> |
Segment<K,V>[] ss = (Segment<K,V>[])new Segment<?,?>[ssize]; |
734 |
> |
UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0] |
735 |
> |
this.segments = ss; |
736 |
|
} |
737 |
|
|
738 |
|
/** |
739 |
|
* Creates a new, empty map with the specified initial capacity |
740 |
< |
* and load factor and with the default concurrencyLevel |
615 |
< |
* (<tt>16</tt>). |
740 |
> |
* and load factor and with the default concurrencyLevel (16). |
741 |
|
* |
742 |
|
* @param initialCapacity The implementation performs internal |
743 |
|
* sizing to accommodate this many elements. |
746 |
|
* bin exceeds this threshold. |
747 |
|
* @throws IllegalArgumentException if the initial capacity of |
748 |
|
* elements is negative or the load factor is nonpositive |
749 |
+ |
* |
750 |
+ |
* @since 1.6 |
751 |
|
*/ |
752 |
|
public ConcurrentHashMap(int initialCapacity, float loadFactor) { |
753 |
|
this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); |
755 |
|
|
756 |
|
/** |
757 |
|
* Creates a new, empty map with the specified initial capacity, |
758 |
< |
* and with default load factor (<tt>0.75f</tt>) |
632 |
< |
* and concurrencyLevel (<tt>16</tt>). |
758 |
> |
* and with default load factor (0.75) and concurrencyLevel (16). |
759 |
|
* |
760 |
|
* @param initialCapacity the initial capacity. The implementation |
761 |
|
* performs internal sizing to accommodate this many elements. |
767 |
|
} |
768 |
|
|
769 |
|
/** |
770 |
< |
* Creates a new, empty map with a default initial capacity |
771 |
< |
* (<tt>16</tt>), load factor |
646 |
< |
* (<tt>0.75f</tt>), and concurrencyLevel |
647 |
< |
* (<tt>16</tt>). |
770 |
> |
* Creates a new, empty map with a default initial capacity (16), |
771 |
> |
* load factor (0.75) and concurrencyLevel (16). |
772 |
|
*/ |
773 |
|
public ConcurrentHashMap() { |
774 |
|
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); |
775 |
|
} |
776 |
|
|
777 |
|
/** |
778 |
< |
* Creates a new map with the same mappings as the given map. The |
779 |
< |
* map is created with a capacity of 1.5 times the number of |
780 |
< |
* mappings in the given map or <tt>16</tt> |
781 |
< |
* (whichever is greater), and a default load factor |
782 |
< |
* (<tt>0.75f</tt>) and concurrencyLevel |
659 |
< |
* (<tt>16</tt>). |
778 |
> |
* Creates a new map with the same mappings as the given map. |
779 |
> |
* The map is created with a capacity of 1.5 times the number |
780 |
> |
* of mappings in the given map or 16 (whichever is greater), |
781 |
> |
* and a default load factor (0.75) and concurrencyLevel (16). |
782 |
> |
* |
783 |
|
* @param m the map |
784 |
|
*/ |
785 |
|
public ConcurrentHashMap(Map<? extends K, ? extends V> m) { |
795 |
|
* @return <tt>true</tt> if this map contains no key-value mappings |
796 |
|
*/ |
797 |
|
public boolean isEmpty() { |
675 |
– |
final Segment<K,V>[] segments = this.segments; |
798 |
|
/* |
799 |
< |
* We keep track of per-segment modCounts to avoid ABA |
800 |
< |
* problems in which an element in one segment was added and |
801 |
< |
* in another removed during traversal, in which case the |
802 |
< |
* table was never actually empty at any point. Note the |
803 |
< |
* similar use of modCounts in the size() and containsValue() |
804 |
< |
* methods, which are the only other methods also susceptible |
805 |
< |
* to ABA problems. |
799 |
> |
* Sum per-segment modCounts to avoid mis-reporting when |
800 |
> |
* elements are concurrently added and removed in one segment |
801 |
> |
* while checking another, in which case the table was never |
802 |
> |
* actually empty at any point. (The sum ensures accuracy up |
803 |
> |
* through at least 1<<31 per-segment modifications before |
804 |
> |
* recheck.) Methods size() and containsValue() use similar |
805 |
> |
* constructions for stability checks. |
806 |
|
*/ |
807 |
< |
int[] mc = new int[segments.length]; |
808 |
< |
int mcsum = 0; |
809 |
< |
for (int i = 0; i < segments.length; ++i) { |
810 |
< |
if (segments[i].count != 0) |
811 |
< |
return false; |
812 |
< |
else |
691 |
< |
mcsum += mc[i] = segments[i].modCount; |
692 |
< |
} |
693 |
< |
// If mcsum happens to be zero, then we know we got a snapshot |
694 |
< |
// before any modifications at all were made. This is |
695 |
< |
// probably common enough to bother tracking. |
696 |
< |
if (mcsum != 0) { |
697 |
< |
for (int i = 0; i < segments.length; ++i) { |
698 |
< |
if (segments[i].count != 0 || |
699 |
< |
mc[i] != segments[i].modCount) |
807 |
> |
long sum = 0L; |
808 |
> |
final Segment<K,V>[] segments = this.segments; |
809 |
> |
for (int j = 0; j < segments.length; ++j) { |
810 |
> |
Segment<K,V> seg = segmentAt(segments, j); |
811 |
> |
if (seg != null) { |
812 |
> |
if (seg.count != 0) |
813 |
|
return false; |
814 |
+ |
sum += seg.modCount; |
815 |
+ |
} |
816 |
+ |
} |
817 |
+ |
if (sum != 0L) { // recheck unless no modifications |
818 |
+ |
for (int j = 0; j < segments.length; ++j) { |
819 |
+ |
Segment<K,V> seg = segmentAt(segments, j); |
820 |
+ |
if (seg != null) { |
821 |
+ |
if (seg.count != 0) |
822 |
+ |
return false; |
823 |
+ |
sum -= seg.modCount; |
824 |
+ |
} |
825 |
|
} |
826 |
+ |
if (sum != 0L) |
827 |
+ |
return false; |
828 |
|
} |
829 |
|
return true; |
830 |
|
} |
837 |
|
* @return the number of key-value mappings in this map |
838 |
|
*/ |
839 |
|
public int size() { |
714 |
– |
final Segment<K,V>[] segments = this.segments; |
715 |
– |
long sum = 0; |
716 |
– |
long check = 0; |
717 |
– |
int[] mc = new int[segments.length]; |
840 |
|
// Try a few times to get accurate count. On failure due to |
841 |
|
// continuous async changes in table, resort to locking. |
842 |
< |
for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) { |
843 |
< |
check = 0; |
844 |
< |
sum = 0; |
845 |
< |
int mcsum = 0; |
846 |
< |
for (int i = 0; i < segments.length; ++i) { |
847 |
< |
sum += segments[i].count; |
848 |
< |
mcsum += mc[i] = segments[i].modCount; |
849 |
< |
} |
850 |
< |
if (mcsum != 0) { |
851 |
< |
for (int i = 0; i < segments.length; ++i) { |
852 |
< |
check += segments[i].count; |
853 |
< |
if (mc[i] != segments[i].modCount) { |
732 |
< |
check = -1; // force retry |
733 |
< |
break; |
734 |
< |
} |
842 |
> |
final Segment<K,V>[] segments = this.segments; |
843 |
> |
final int segmentCount = segments.length; |
844 |
> |
|
845 |
> |
long previousSum = 0L; |
846 |
> |
for (int retries = -1; retries < RETRIES_BEFORE_LOCK; retries++) { |
847 |
> |
long sum = 0L; // sum of modCounts |
848 |
> |
long size = 0L; |
849 |
> |
for (int i = 0; i < segmentCount; i++) { |
850 |
> |
Segment<K,V> segment = segmentAt(segments, i); |
851 |
> |
if (segment != null) { |
852 |
> |
sum += segment.modCount; |
853 |
> |
size += segment.count; |
854 |
|
} |
855 |
|
} |
856 |
< |
if (check == sum) |
857 |
< |
break; |
856 |
> |
if (sum == previousSum) |
857 |
> |
return ((size >>> 31) == 0) ? (int) size : Integer.MAX_VALUE; |
858 |
> |
previousSum = sum; |
859 |
> |
} |
860 |
> |
|
861 |
> |
long size = 0L; |
862 |
> |
for (int i = 0; i < segmentCount; i++) { |
863 |
> |
Segment<K,V> segment = ensureSegment(i); |
864 |
> |
segment.lock(); |
865 |
> |
size += segment.count; |
866 |
|
} |
867 |
< |
if (check != sum) { // Resort to locking all segments |
868 |
< |
sum = 0; |
869 |
< |
for (int i = 0; i < segments.length; ++i) |
743 |
< |
segments[i].lock(); |
744 |
< |
for (int i = 0; i < segments.length; ++i) |
745 |
< |
sum += segments[i].count; |
746 |
< |
for (int i = 0; i < segments.length; ++i) |
747 |
< |
segments[i].unlock(); |
748 |
< |
} |
749 |
< |
if (sum > Integer.MAX_VALUE) |
750 |
< |
return Integer.MAX_VALUE; |
751 |
< |
else |
752 |
< |
return (int)sum; |
867 |
> |
for (int i = 0; i < segmentCount; i++) |
868 |
> |
segments[i].unlock(); |
869 |
> |
return ((size >>> 31) == 0) ? (int) size : Integer.MAX_VALUE; |
870 |
|
} |
871 |
|
|
872 |
|
/** |
873 |
< |
* Returns the value to which this map maps the specified key, or |
874 |
< |
* <tt>null</tt> if the map contains no mapping for the key. |
875 |
< |
* |
876 |
< |
* @param key key whose associated value is to be returned |
877 |
< |
* @return the value associated with <tt>key</tt> in this map, or |
878 |
< |
* <tt>null</tt> if there is no mapping for <tt>key</tt> |
873 |
> |
* Returns the value to which the specified key is mapped, |
874 |
> |
* or {@code null} if this map contains no mapping for the key. |
875 |
> |
* |
876 |
> |
* <p>More formally, if this map contains a mapping from a key |
877 |
> |
* {@code k} to a value {@code v} such that {@code key.equals(k)}, |
878 |
> |
* then this method returns {@code v}; otherwise it returns |
879 |
> |
* {@code null}. (There can be at most one such mapping.) |
880 |
> |
* |
881 |
|
* @throws NullPointerException if the specified key is null |
882 |
|
*/ |
883 |
|
public V get(Object key) { |
884 |
< |
int hash = hash(key); // throws NullPointerException if key null |
885 |
< |
return segmentFor(hash).get(key, hash); |
884 |
> |
Segment<K,V> s; // manually integrate access methods to reduce overhead |
885 |
> |
HashEntry<K,V>[] tab; |
886 |
> |
int h = hash(key.hashCode()); |
887 |
> |
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; |
888 |
> |
if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null && |
889 |
> |
(tab = s.table) != null) { |
890 |
> |
for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile |
891 |
> |
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); |
892 |
> |
e != null; e = e.next) { |
893 |
> |
K k; |
894 |
> |
if ((k = e.key) == key || (e.hash == h && key.equals(k))) |
895 |
> |
return e.value; |
896 |
> |
} |
897 |
> |
} |
898 |
> |
return null; |
899 |
|
} |
900 |
|
|
901 |
|
/** |
907 |
|
* <tt>equals</tt> method; <tt>false</tt> otherwise. |
908 |
|
* @throws NullPointerException if the specified key is null |
909 |
|
*/ |
910 |
+ |
@SuppressWarnings("unchecked") |
911 |
|
public boolean containsKey(Object key) { |
912 |
< |
int hash = hash(key); // throws NullPointerException if key null |
913 |
< |
return segmentFor(hash).containsKey(key, hash); |
912 |
> |
Segment<K,V> s; // same as get() except no need for volatile value read |
913 |
> |
HashEntry<K,V>[] tab; |
914 |
> |
int h = hash(key.hashCode()); |
915 |
> |
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; |
916 |
> |
if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null && |
917 |
> |
(tab = s.table) != null) { |
918 |
> |
for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile |
919 |
> |
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); |
920 |
> |
e != null; e = e.next) { |
921 |
> |
K k; |
922 |
> |
if ((k = e.key) == key || (e.hash == h && key.equals(k))) |
923 |
> |
return true; |
924 |
> |
} |
925 |
> |
} |
926 |
> |
return false; |
927 |
|
} |
928 |
|
|
929 |
|
/** |
938 |
|
* @throws NullPointerException if the specified value is null |
939 |
|
*/ |
940 |
|
public boolean containsValue(Object value) { |
941 |
+ |
// Same idea as size() |
942 |
|
if (value == null) |
943 |
|
throw new NullPointerException(); |
797 |
– |
|
798 |
– |
// See explanation of modCount use above |
799 |
– |
|
944 |
|
final Segment<K,V>[] segments = this.segments; |
945 |
< |
int[] mc = new int[segments.length]; |
946 |
< |
|
803 |
< |
// Try a few times without locking |
804 |
< |
for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) { |
805 |
< |
int sum = 0; |
806 |
< |
int mcsum = 0; |
807 |
< |
for (int i = 0; i < segments.length; ++i) { |
808 |
< |
int c = segments[i].count; |
809 |
< |
mcsum += mc[i] = segments[i].modCount; |
810 |
< |
if (segments[i].containsValue(value)) |
811 |
< |
return true; |
812 |
< |
} |
813 |
< |
boolean cleanSweep = true; |
814 |
< |
if (mcsum != 0) { |
815 |
< |
for (int i = 0; i < segments.length; ++i) { |
816 |
< |
int c = segments[i].count; |
817 |
< |
if (mc[i] != segments[i].modCount) { |
818 |
< |
cleanSweep = false; |
819 |
< |
break; |
820 |
< |
} |
821 |
< |
} |
822 |
< |
} |
823 |
< |
if (cleanSweep) |
824 |
< |
return false; |
825 |
< |
} |
826 |
< |
// Resort to locking all segments |
827 |
< |
for (int i = 0; i < segments.length; ++i) |
828 |
< |
segments[i].lock(); |
829 |
< |
boolean found = false; |
945 |
> |
long previousSum = 0L; |
946 |
> |
int lockCount = 0; |
947 |
|
try { |
948 |
< |
for (int i = 0; i < segments.length; ++i) { |
949 |
< |
if (segments[i].containsValue(value)) { |
950 |
< |
found = true; |
951 |
< |
break; |
948 |
> |
for (int retries = -1; ; retries++) { |
949 |
> |
long sum = 0L; // sum of modCounts |
950 |
> |
for (int j = 0; j < segments.length; j++) { |
951 |
> |
Segment<K,V> segment; |
952 |
> |
if (retries == RETRIES_BEFORE_LOCK) { |
953 |
> |
segment = ensureSegment(j); |
954 |
> |
segment.lock(); |
955 |
> |
lockCount++; |
956 |
> |
} else { |
957 |
> |
segment = segmentAt(segments, j); |
958 |
> |
if (segment == null) |
959 |
> |
continue; |
960 |
> |
} |
961 |
> |
HashEntry<K,V>[] tab = segment.table; |
962 |
> |
if (tab != null) { |
963 |
> |
for (int i = 0 ; i < tab.length; i++) { |
964 |
> |
HashEntry<K,V> e; |
965 |
> |
for (e = entryAt(tab, i); e != null; e = e.next) { |
966 |
> |
V v = e.value; |
967 |
> |
if (v != null && value.equals(v)) |
968 |
> |
return true; |
969 |
> |
} |
970 |
> |
} |
971 |
> |
sum += segment.modCount; |
972 |
> |
} |
973 |
|
} |
974 |
+ |
if ((retries >= 0 && sum == previousSum) || lockCount > 0) |
975 |
+ |
return false; |
976 |
+ |
previousSum = sum; |
977 |
|
} |
978 |
|
} finally { |
979 |
< |
for (int i = 0; i < segments.length; ++i) |
980 |
< |
segments[i].unlock(); |
979 |
> |
for (int j = 0; j < lockCount; j++) |
980 |
> |
segments[j].unlock(); |
981 |
|
} |
841 |
– |
return found; |
982 |
|
} |
983 |
|
|
984 |
|
/** |
988 |
|
* full compatibility with class {@link java.util.Hashtable}, |
989 |
|
* which supported this method prior to introduction of the |
990 |
|
* Java Collections framework. |
991 |
< |
|
991 |
> |
* |
992 |
|
* @param value a value to search for |
993 |
|
* @return <tt>true</tt> if and only if some key maps to the |
994 |
|
* <tt>value</tt> argument in this table as |
1013 |
|
* <tt>null</tt> if there was no mapping for <tt>key</tt> |
1014 |
|
* @throws NullPointerException if the specified key or value is null |
1015 |
|
*/ |
1016 |
+ |
@SuppressWarnings("unchecked") |
1017 |
|
public V put(K key, V value) { |
1018 |
+ |
Segment<K,V> s; |
1019 |
|
if (value == null) |
1020 |
|
throw new NullPointerException(); |
1021 |
< |
int hash = hash(key); |
1022 |
< |
return segmentFor(hash).put(key, hash, value, false); |
1021 |
> |
int hash = hash(key.hashCode()); |
1022 |
> |
int j = (hash >>> segmentShift) & segmentMask; |
1023 |
> |
if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck |
1024 |
> |
(segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment |
1025 |
> |
s = ensureSegment(j); |
1026 |
> |
return s.put(key, hash, value, false); |
1027 |
|
} |
1028 |
|
|
1029 |
|
/** |
1033 |
|
* or <tt>null</tt> if there was no mapping for the key |
1034 |
|
* @throws NullPointerException if the specified key or value is null |
1035 |
|
*/ |
1036 |
+ |
@SuppressWarnings("unchecked") |
1037 |
|
public V putIfAbsent(K key, V value) { |
1038 |
+ |
Segment<K,V> s; |
1039 |
|
if (value == null) |
1040 |
|
throw new NullPointerException(); |
1041 |
< |
int hash = hash(key); |
1042 |
< |
return segmentFor(hash).put(key, hash, value, true); |
1041 |
> |
int hash = hash(key.hashCode()); |
1042 |
> |
int j = (hash >>> segmentShift) & segmentMask; |
1043 |
> |
if ((s = (Segment<K,V>)UNSAFE.getObject |
1044 |
> |
(segments, (j << SSHIFT) + SBASE)) == null) |
1045 |
> |
s = ensureSegment(j); |
1046 |
> |
return s.put(key, hash, value, true); |
1047 |
|
} |
1048 |
|
|
1049 |
|
/** |
1054 |
|
* @param m mappings to be stored in this map |
1055 |
|
*/ |
1056 |
|
public void putAll(Map<? extends K, ? extends V> m) { |
1057 |
< |
for (Iterator<? extends Map.Entry<? extends K, ? extends V>> it = (Iterator<? extends Map.Entry<? extends K, ? extends V>>) m.entrySet().iterator(); it.hasNext(); ) { |
906 |
< |
Entry<? extends K, ? extends V> e = it.next(); |
1057 |
> |
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) |
1058 |
|
put(e.getKey(), e.getValue()); |
908 |
– |
} |
1059 |
|
} |
1060 |
|
|
1061 |
|
/** |
1064 |
|
* |
1065 |
|
* @param key the key that needs to be removed |
1066 |
|
* @return the previous value associated with <tt>key</tt>, or |
1067 |
< |
* <tt>null</tt> if there was no mapping for <tt>key</tt>. |
1067 |
> |
* <tt>null</tt> if there was no mapping for <tt>key</tt> |
1068 |
|
* @throws NullPointerException if the specified key is null |
1069 |
|
*/ |
1070 |
|
public V remove(Object key) { |
1071 |
< |
int hash = hash(key); |
1072 |
< |
return segmentFor(hash).remove(key, hash, null); |
1071 |
> |
int hash = hash(key.hashCode()); |
1072 |
> |
Segment<K,V> s = segmentForHash(hash); |
1073 |
> |
return s == null ? null : s.remove(key, hash, null); |
1074 |
|
} |
1075 |
|
|
1076 |
|
/** |
1079 |
|
* @throws NullPointerException if the specified key is null |
1080 |
|
*/ |
1081 |
|
public boolean remove(Object key, Object value) { |
1082 |
< |
if (value == null) |
1083 |
< |
return false; |
1084 |
< |
int hash = hash(key); |
1085 |
< |
return segmentFor(hash).remove(key, hash, value) != null; |
1082 |
> |
int hash = hash(key.hashCode()); |
1083 |
> |
Segment<K,V> s; |
1084 |
> |
return value != null && (s = segmentForHash(hash)) != null && |
1085 |
> |
s.remove(key, hash, value) != null; |
1086 |
|
} |
1087 |
|
|
1088 |
|
/** |
1091 |
|
* @throws NullPointerException if any of the arguments are null |
1092 |
|
*/ |
1093 |
|
public boolean replace(K key, V oldValue, V newValue) { |
1094 |
+ |
int hash = hash(key.hashCode()); |
1095 |
|
if (oldValue == null || newValue == null) |
1096 |
|
throw new NullPointerException(); |
1097 |
< |
int hash = hash(key); |
1098 |
< |
return segmentFor(hash).replace(key, hash, oldValue, newValue); |
1097 |
> |
Segment<K,V> s = segmentForHash(hash); |
1098 |
> |
return s != null && s.replace(key, hash, oldValue, newValue); |
1099 |
|
} |
1100 |
|
|
1101 |
|
/** |
1106 |
|
* @throws NullPointerException if the specified key or value is null |
1107 |
|
*/ |
1108 |
|
public V replace(K key, V value) { |
1109 |
+ |
int hash = hash(key.hashCode()); |
1110 |
|
if (value == null) |
1111 |
|
throw new NullPointerException(); |
1112 |
< |
int hash = hash(key); |
1113 |
< |
return segmentFor(hash).replace(key, hash, value); |
1112 |
> |
Segment<K,V> s = segmentForHash(hash); |
1113 |
> |
return s == null ? null : s.replace(key, hash, value); |
1114 |
|
} |
1115 |
|
|
1116 |
|
/** |
1117 |
|
* Removes all of the mappings from this map. |
1118 |
|
*/ |
1119 |
|
public void clear() { |
1120 |
< |
for (int i = 0; i < segments.length; ++i) |
1121 |
< |
segments[i].clear(); |
1120 |
> |
final Segment<K,V>[] segments = this.segments; |
1121 |
> |
for (int j = 0; j < segments.length; ++j) { |
1122 |
> |
Segment<K,V> s = segmentAt(segments, j); |
1123 |
> |
if (s != null) |
1124 |
> |
s.clear(); |
1125 |
> |
} |
1126 |
|
} |
1127 |
|
|
1128 |
|
/** |
1192 |
|
* Returns an enumeration of the keys in this table. |
1193 |
|
* |
1194 |
|
* @return an enumeration of the keys in this table |
1195 |
< |
* @see #keySet |
1195 |
> |
* @see #keySet() |
1196 |
|
*/ |
1197 |
|
public Enumeration<K> keys() { |
1198 |
|
return new KeyIterator(); |
1202 |
|
* Returns an enumeration of the values in this table. |
1203 |
|
* |
1204 |
|
* @return an enumeration of the values in this table |
1205 |
< |
* @see #values |
1205 |
> |
* @see #values() |
1206 |
|
*/ |
1207 |
|
public Enumeration<V> elements() { |
1208 |
|
return new ValueIterator(); |
1223 |
|
advance(); |
1224 |
|
} |
1225 |
|
|
1226 |
< |
public boolean hasMoreElements() { return hasNext(); } |
1227 |
< |
|
1226 |
> |
/** |
1227 |
> |
* Sets nextEntry to first node of next non-empty table |
1228 |
> |
* (in backwards order, to simplify checks). |
1229 |
> |
*/ |
1230 |
|
final void advance() { |
1231 |
< |
if (nextEntry != null && (nextEntry = nextEntry.next) != null) |
1232 |
< |
return; |
1233 |
< |
|
1234 |
< |
while (nextTableIndex >= 0) { |
1235 |
< |
if ( (nextEntry = currentTable[nextTableIndex--]) != null) |
1236 |
< |
return; |
1237 |
< |
} |
1238 |
< |
|
1239 |
< |
while (nextSegmentIndex >= 0) { |
1240 |
< |
Segment<K,V> seg = segments[nextSegmentIndex--]; |
1082 |
< |
if (seg.count != 0) { |
1083 |
< |
currentTable = seg.table; |
1084 |
< |
for (int j = currentTable.length - 1; j >= 0; --j) { |
1085 |
< |
if ( (nextEntry = currentTable[j]) != null) { |
1086 |
< |
nextTableIndex = j - 1; |
1087 |
< |
return; |
1088 |
< |
} |
1089 |
< |
} |
1231 |
> |
for (;;) { |
1232 |
> |
if (nextTableIndex >= 0) { |
1233 |
> |
if ((nextEntry = entryAt(currentTable, |
1234 |
> |
nextTableIndex--)) != null) |
1235 |
> |
break; |
1236 |
> |
} |
1237 |
> |
else if (nextSegmentIndex >= 0) { |
1238 |
> |
Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--); |
1239 |
> |
if (seg != null && (currentTable = seg.table) != null) |
1240 |
> |
nextTableIndex = currentTable.length - 1; |
1241 |
|
} |
1242 |
+ |
else |
1243 |
+ |
break; |
1244 |
|
} |
1245 |
|
} |
1246 |
|
|
1247 |
< |
public boolean hasNext() { return nextEntry != null; } |
1248 |
< |
|
1249 |
< |
HashEntry<K,V> nextEntry() { |
1097 |
< |
if (nextEntry == null) |
1247 |
> |
final HashEntry<K,V> nextEntry() { |
1248 |
> |
HashEntry<K,V> e = nextEntry; |
1249 |
> |
if (e == null) |
1250 |
|
throw new NoSuchElementException(); |
1251 |
< |
lastReturned = nextEntry; |
1252 |
< |
advance(); |
1253 |
< |
return lastReturned; |
1251 |
> |
lastReturned = e; // cannot assign until after null check |
1252 |
> |
if ((nextEntry = e.next) == null) |
1253 |
> |
advance(); |
1254 |
> |
return e; |
1255 |
|
} |
1256 |
|
|
1257 |
< |
public void remove() { |
1257 |
> |
public final boolean hasNext() { return nextEntry != null; } |
1258 |
> |
public final boolean hasMoreElements() { return nextEntry != null; } |
1259 |
> |
|
1260 |
> |
public final void remove() { |
1261 |
|
if (lastReturned == null) |
1262 |
|
throw new IllegalStateException(); |
1263 |
|
ConcurrentHashMap.this.remove(lastReturned.key); |
1265 |
|
} |
1266 |
|
} |
1267 |
|
|
1268 |
< |
final class KeyIterator extends HashIterator implements Iterator<K>, Enumeration<K> { |
1269 |
< |
public K next() { return super.nextEntry().key; } |
1270 |
< |
public K nextElement() { return super.nextEntry().key; } |
1271 |
< |
} |
1272 |
< |
|
1273 |
< |
final class ValueIterator extends HashIterator implements Iterator<V>, Enumeration<V> { |
1274 |
< |
public V next() { return super.nextEntry().value; } |
1275 |
< |
public V nextElement() { return super.nextEntry().value; } |
1268 |
> |
final class KeyIterator |
1269 |
> |
extends HashIterator |
1270 |
> |
implements Iterator<K>, Enumeration<K> |
1271 |
> |
{ |
1272 |
> |
public final K next() { return super.nextEntry().key; } |
1273 |
> |
public final K nextElement() { return super.nextEntry().key; } |
1274 |
> |
} |
1275 |
> |
|
1276 |
> |
final class ValueIterator |
1277 |
> |
extends HashIterator |
1278 |
> |
implements Iterator<V>, Enumeration<V> |
1279 |
> |
{ |
1280 |
> |
public final V next() { return super.nextEntry().value; } |
1281 |
> |
public final V nextElement() { return super.nextEntry().value; } |
1282 |
|
} |
1283 |
|
|
1122 |
– |
|
1123 |
– |
|
1284 |
|
/** |
1285 |
< |
* Entry iterator. Exported Entry objects must write-through |
1286 |
< |
* changes in setValue, even if the nodes have been cloned. So we |
1287 |
< |
* cannot return internal HashEntry objects. Instead, the iterator |
1288 |
< |
* itself acts as a forwarding pseudo-entry. |
1289 |
< |
*/ |
1290 |
< |
final class EntryIterator extends HashIterator implements Map.Entry<K,V>, Iterator<Entry<K,V>> { |
1291 |
< |
public Map.Entry<K,V> next() { |
1292 |
< |
nextEntry(); |
1133 |
< |
return this; |
1134 |
< |
} |
1135 |
< |
|
1136 |
< |
public K getKey() { |
1137 |
< |
if (lastReturned == null) |
1138 |
< |
throw new IllegalStateException("Entry was removed"); |
1139 |
< |
return lastReturned.key; |
1140 |
< |
} |
1141 |
< |
|
1142 |
< |
public V getValue() { |
1143 |
< |
if (lastReturned == null) |
1144 |
< |
throw new IllegalStateException("Entry was removed"); |
1145 |
< |
return ConcurrentHashMap.this.get(lastReturned.key); |
1285 |
> |
* Custom Entry class used by EntryIterator.next(), that relays |
1286 |
> |
* setValue changes to the underlying map. |
1287 |
> |
*/ |
1288 |
> |
final class WriteThroughEntry |
1289 |
> |
extends AbstractMap.SimpleEntry<K,V> |
1290 |
> |
{ |
1291 |
> |
WriteThroughEntry(K k, V v) { |
1292 |
> |
super(k,v); |
1293 |
|
} |
1294 |
|
|
1295 |
+ |
/** |
1296 |
+ |
* Sets our entry's value and writes through to the map. The |
1297 |
+ |
* value to return is somewhat arbitrary here. Since a |
1298 |
+ |
* WriteThroughEntry does not necessarily track asynchronous |
1299 |
+ |
* changes, the most recent "previous" value could be |
1300 |
+ |
* different from what we return (or could even have been |
1301 |
+ |
* removed in which case the put will re-establish). We do not |
1302 |
+ |
* and cannot guarantee more. |
1303 |
+ |
*/ |
1304 |
|
public V setValue(V value) { |
1305 |
< |
if (lastReturned == null) |
1306 |
< |
throw new IllegalStateException("Entry was removed"); |
1307 |
< |
return ConcurrentHashMap.this.put(lastReturned.key, value); |
1308 |
< |
} |
1153 |
< |
|
1154 |
< |
public boolean equals(Object o) { |
1155 |
< |
// If not acting as entry, just use default. |
1156 |
< |
if (lastReturned == null) |
1157 |
< |
return super.equals(o); |
1158 |
< |
if (!(o instanceof Map.Entry)) |
1159 |
< |
return false; |
1160 |
< |
Map.Entry<?,?> e = (Map.Entry<?,?>)o; |
1161 |
< |
return eq(getKey(), e.getKey()) && eq(getValue(), e.getValue()); |
1162 |
< |
} |
1163 |
< |
|
1164 |
< |
public int hashCode() { |
1165 |
< |
// If not acting as entry, just use default. |
1166 |
< |
if (lastReturned == null) |
1167 |
< |
return super.hashCode(); |
1168 |
< |
|
1169 |
< |
Object k = getKey(); |
1170 |
< |
Object v = getValue(); |
1171 |
< |
return ((k == null) ? 0 : k.hashCode()) ^ |
1172 |
< |
((v == null) ? 0 : v.hashCode()); |
1173 |
< |
} |
1174 |
< |
|
1175 |
< |
public String toString() { |
1176 |
< |
// If not acting as entry, just use default. |
1177 |
< |
if (lastReturned == null) |
1178 |
< |
return super.toString(); |
1179 |
< |
else |
1180 |
< |
return getKey() + "=" + getValue(); |
1305 |
> |
if (value == null) throw new NullPointerException(); |
1306 |
> |
V v = super.setValue(value); |
1307 |
> |
ConcurrentHashMap.this.put(getKey(), value); |
1308 |
> |
return v; |
1309 |
|
} |
1310 |
+ |
} |
1311 |
|
|
1312 |
< |
boolean eq(Object o1, Object o2) { |
1313 |
< |
return (o1 == null ? o2 == null : o1.equals(o2)); |
1312 |
> |
final class EntryIterator |
1313 |
> |
extends HashIterator |
1314 |
> |
implements Iterator<Entry<K,V>> |
1315 |
> |
{ |
1316 |
> |
public Map.Entry<K,V> next() { |
1317 |
> |
HashEntry<K,V> e = super.nextEntry(); |
1318 |
> |
return new WriteThroughEntry(e.key, e.value); |
1319 |
|
} |
1186 |
– |
|
1320 |
|
} |
1321 |
|
|
1322 |
|
final class KeySet extends AbstractSet<K> { |
1326 |
|
public int size() { |
1327 |
|
return ConcurrentHashMap.this.size(); |
1328 |
|
} |
1329 |
+ |
public boolean isEmpty() { |
1330 |
+ |
return ConcurrentHashMap.this.isEmpty(); |
1331 |
+ |
} |
1332 |
|
public boolean contains(Object o) { |
1333 |
|
return ConcurrentHashMap.this.containsKey(o); |
1334 |
|
} |
1338 |
|
public void clear() { |
1339 |
|
ConcurrentHashMap.this.clear(); |
1340 |
|
} |
1205 |
– |
public Object[] toArray() { |
1206 |
– |
Collection<K> c = new ArrayList<K>(); |
1207 |
– |
for (Iterator<K> i = iterator(); i.hasNext(); ) |
1208 |
– |
c.add(i.next()); |
1209 |
– |
return c.toArray(); |
1210 |
– |
} |
1211 |
– |
public <T> T[] toArray(T[] a) { |
1212 |
– |
Collection<K> c = new ArrayList<K>(); |
1213 |
– |
for (Iterator<K> i = iterator(); i.hasNext(); ) |
1214 |
– |
c.add(i.next()); |
1215 |
– |
return c.toArray(a); |
1216 |
– |
} |
1341 |
|
} |
1342 |
|
|
1343 |
|
final class Values extends AbstractCollection<V> { |
1347 |
|
public int size() { |
1348 |
|
return ConcurrentHashMap.this.size(); |
1349 |
|
} |
1350 |
+ |
public boolean isEmpty() { |
1351 |
+ |
return ConcurrentHashMap.this.isEmpty(); |
1352 |
+ |
} |
1353 |
|
public boolean contains(Object o) { |
1354 |
|
return ConcurrentHashMap.this.containsValue(o); |
1355 |
|
} |
1356 |
|
public void clear() { |
1357 |
|
ConcurrentHashMap.this.clear(); |
1358 |
|
} |
1232 |
– |
public Object[] toArray() { |
1233 |
– |
Collection<V> c = new ArrayList<V>(); |
1234 |
– |
for (Iterator<V> i = iterator(); i.hasNext(); ) |
1235 |
– |
c.add(i.next()); |
1236 |
– |
return c.toArray(); |
1237 |
– |
} |
1238 |
– |
public <T> T[] toArray(T[] a) { |
1239 |
– |
Collection<V> c = new ArrayList<V>(); |
1240 |
– |
for (Iterator<V> i = iterator(); i.hasNext(); ) |
1241 |
– |
c.add(i.next()); |
1242 |
– |
return c.toArray(a); |
1243 |
– |
} |
1359 |
|
} |
1360 |
|
|
1361 |
|
final class EntrySet extends AbstractSet<Map.Entry<K,V>> { |
1378 |
|
public int size() { |
1379 |
|
return ConcurrentHashMap.this.size(); |
1380 |
|
} |
1381 |
+ |
public boolean isEmpty() { |
1382 |
+ |
return ConcurrentHashMap.this.isEmpty(); |
1383 |
+ |
} |
1384 |
|
public void clear() { |
1385 |
|
ConcurrentHashMap.this.clear(); |
1386 |
|
} |
1269 |
– |
public Object[] toArray() { |
1270 |
– |
// Since we don't ordinarily have distinct Entry objects, we |
1271 |
– |
// must pack elements using exportable SimpleEntry |
1272 |
– |
Collection<Map.Entry<K,V>> c = new ArrayList<Map.Entry<K,V>>(size()); |
1273 |
– |
for (Iterator<Map.Entry<K,V>> i = iterator(); i.hasNext(); ) |
1274 |
– |
c.add(new AbstractMap.SimpleEntry<K,V>(i.next())); |
1275 |
– |
return c.toArray(); |
1276 |
– |
} |
1277 |
– |
public <T> T[] toArray(T[] a) { |
1278 |
– |
Collection<Map.Entry<K,V>> c = new ArrayList<Map.Entry<K,V>>(size()); |
1279 |
– |
for (Iterator<Map.Entry<K,V>> i = iterator(); i.hasNext(); ) |
1280 |
– |
c.add(new AbstractMap.SimpleEntry<K,V>(i.next())); |
1281 |
– |
return c.toArray(a); |
1282 |
– |
} |
1283 |
– |
|
1387 |
|
} |
1388 |
|
|
1389 |
|
/* ---------------- Serialization Support -------------- */ |
1390 |
|
|
1391 |
|
/** |
1392 |
< |
* Save the state of the <tt>ConcurrentHashMap</tt> instance to a |
1393 |
< |
* stream (i.e., serialize it). |
1392 |
> |
* Saves the state of the <tt>ConcurrentHashMap</tt> instance to a |
1393 |
> |
* stream (i.e., serializes it). |
1394 |
|
* @param s the stream |
1395 |
|
* @serialData |
1396 |
|
* the key (Object) and value (Object) |
1397 |
|
* for each key-value mapping, followed by a null pair. |
1398 |
|
* The key-value mappings are emitted in no particular order. |
1399 |
|
*/ |
1400 |
< |
private void writeObject(java.io.ObjectOutputStream s) throws IOException { |
1400 |
> |
private void writeObject(java.io.ObjectOutputStream s) throws IOException { |
1401 |
> |
// force all segments for serialization compatibility |
1402 |
> |
for (int k = 0; k < segments.length; ++k) |
1403 |
> |
ensureSegment(k); |
1404 |
|
s.defaultWriteObject(); |
1405 |
|
|
1406 |
+ |
final Segment<K,V>[] segments = this.segments; |
1407 |
|
for (int k = 0; k < segments.length; ++k) { |
1408 |
< |
Segment<K,V> seg = segments[k]; |
1408 |
> |
Segment<K,V> seg = segmentAt(segments, k); |
1409 |
|
seg.lock(); |
1410 |
|
try { |
1411 |
|
HashEntry<K,V>[] tab = seg.table; |
1412 |
|
for (int i = 0; i < tab.length; ++i) { |
1413 |
< |
for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) { |
1413 |
> |
HashEntry<K,V> e; |
1414 |
> |
for (e = entryAt(tab, i); e != null; e = e.next) { |
1415 |
|
s.writeObject(e.key); |
1416 |
|
s.writeObject(e.value); |
1417 |
|
} |
1425 |
|
} |
1426 |
|
|
1427 |
|
/** |
1428 |
< |
* Reconstitute the <tt>ConcurrentHashMap</tt> instance from a |
1429 |
< |
* stream (i.e., deserialize it). |
1428 |
> |
* Reconstitutes the <tt>ConcurrentHashMap</tt> instance from a |
1429 |
> |
* stream (i.e., deserializes it). |
1430 |
|
* @param s the stream |
1431 |
|
*/ |
1432 |
+ |
@SuppressWarnings("unchecked") |
1433 |
|
private void readObject(java.io.ObjectInputStream s) |
1434 |
< |
throws IOException, ClassNotFoundException { |
1434 |
> |
throws IOException, ClassNotFoundException { |
1435 |
|
s.defaultReadObject(); |
1436 |
|
|
1437 |
< |
// Initialize each segment to be minimally sized, and let grow. |
1438 |
< |
for (int i = 0; i < segments.length; ++i) { |
1439 |
< |
segments[i].setTable(new HashEntry[1]); |
1437 |
> |
// Re-initialize segments to be minimally sized, and let grow. |
1438 |
> |
int cap = MIN_SEGMENT_TABLE_CAPACITY; |
1439 |
> |
final Segment<K,V>[] segments = this.segments; |
1440 |
> |
for (int k = 0; k < segments.length; ++k) { |
1441 |
> |
Segment<K,V> seg = segments[k]; |
1442 |
> |
if (seg != null) { |
1443 |
> |
seg.threshold = (int)(cap * seg.loadFactor); |
1444 |
> |
seg.table = (HashEntry<K,V>[]) new HashEntry<?,?>[cap]; |
1445 |
> |
} |
1446 |
|
} |
1447 |
|
|
1448 |
|
// Read the keys and values, and put the mappings in the table |
1454 |
|
put(key, value); |
1455 |
|
} |
1456 |
|
} |
1457 |
+ |
|
1458 |
+ |
// Unsafe mechanics |
1459 |
+ |
private static final sun.misc.Unsafe UNSAFE; |
1460 |
+ |
private static final long SBASE; |
1461 |
+ |
private static final int SSHIFT; |
1462 |
+ |
private static final long TBASE; |
1463 |
+ |
private static final int TSHIFT; |
1464 |
+ |
|
1465 |
+ |
static { |
1466 |
+ |
int ss, ts; |
1467 |
+ |
try { |
1468 |
+ |
UNSAFE = sun.misc.Unsafe.getUnsafe(); |
1469 |
+ |
Class<?> tc = HashEntry[].class; |
1470 |
+ |
Class<?> sc = Segment[].class; |
1471 |
+ |
TBASE = UNSAFE.arrayBaseOffset(tc); |
1472 |
+ |
SBASE = UNSAFE.arrayBaseOffset(sc); |
1473 |
+ |
ts = UNSAFE.arrayIndexScale(tc); |
1474 |
+ |
ss = UNSAFE.arrayIndexScale(sc); |
1475 |
+ |
} catch (Exception e) { |
1476 |
+ |
throw new Error(e); |
1477 |
+ |
} |
1478 |
+ |
if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0) |
1479 |
+ |
throw new Error("data type scale not a power of two"); |
1480 |
+ |
SSHIFT = 31 - Integer.numberOfLeadingZeros(ss); |
1481 |
+ |
TSHIFT = 31 - Integer.numberOfLeadingZeros(ts); |
1482 |
+ |
} |
1483 |
+ |
|
1484 |
|
} |