40 |
|
* <li><b>Optimistic Reading.</b> Method {@link #tryOptimisticRead} |
41 |
|
* returns a non-zero stamp only if the lock is not currently held |
42 |
|
* in write mode. Method {@link #validate} returns true if the lock |
43 |
< |
* has not since been acquired in write mode. This mode can be |
44 |
< |
* thought of as an extremely weak version of a read-lock, that can |
45 |
< |
* be broken by a writer at any time. The use of optimistic mode |
46 |
< |
* for short read-only code segments often reduces contention and |
47 |
< |
* improves throughput. However, its use is inherently fragile. |
48 |
< |
* Optimistic read sections should only read fields and hold them in |
49 |
< |
* local variables for later use after validation. Fields read while |
50 |
< |
* in optimistic mode may be wildly inconsistent, so usage applies |
51 |
< |
* only when you are familiar enough with data representations to |
52 |
< |
* check consistency and/or repeatedly invoke method {@code |
53 |
< |
* validate()}. For example, such steps are typically required when |
54 |
< |
* first reading an object or array reference, and then accessing |
55 |
< |
* one of its fields, elements or methods. </li> |
43 |
> |
* has not been acquired in write mode since obtaining a given |
44 |
> |
* stamp. This mode can be thought of as an extremely weak version |
45 |
> |
* of a read-lock, that can be broken by a writer at any time. The |
46 |
> |
* use of optimistic mode for short read-only code segments often |
47 |
> |
* reduces contention and improves throughput. However, its use is |
48 |
> |
* inherently fragile. Optimistic read sections should only read |
49 |
> |
* fields and hold them in local variables for later use after |
50 |
> |
* validation. Fields read while in optimistic mode may be wildly |
51 |
> |
* inconsistent, so usage applies only when you are familiar enough |
52 |
> |
* with data representations to check consistency and/or repeatedly |
53 |
> |
* invoke method {@code validate()}. For example, such steps are |
54 |
> |
* typically required when first reading an object or array |
55 |
> |
* reference, and then accessing one of its fields, elements or |
56 |
> |
* methods. </li> |
57 |
|
* |
58 |
|
* </ul> |
59 |
|
* |
233 |
|
* |
234 |
|
* Nearly all of these mechanics are carried out in methods |
235 |
|
* acquireWrite and acquireRead, that, as typical of such code, |
236 |
< |
* sprawl out because actions and retries rely on consitent sets |
236 |
> |
* sprawl out because actions and retries rely on consistent sets |
237 |
|
* of locally cached reads. |
238 |
|
* |
239 |
|
* As noted in Boehm's paper (above), sequence validation (mainly |
332 |
|
* @return a stamp that can be used to unlock or convert mode |
333 |
|
*/ |
334 |
|
public long writeLock() { |
335 |
< |
long s, next; // bypass acquireWrite in fully onlocked case only |
335 |
> |
long s, next; // bypass acquireWrite in fully unlocked case only |
336 |
|
return ((((s = state) & ABITS) == 0L && |
337 |
|
U.compareAndSwapLong(this, STATE, s, next = s + WBIT)) ? |
338 |
|
next : acquireWrite(false, 0L)); |
404 |
|
* @return a stamp that can be used to unlock or convert mode |
405 |
|
*/ |
406 |
|
public long readLock() { |
407 |
< |
long s, next; // bypass acquireRead on fully onlocked case only |
407 |
> |
long s, next; // bypass acquireRead on fully unlocked case only |
408 |
|
return ((((s = state) & ABITS) == 0L && |
409 |
|
U.compareAndSwapLong(this, STATE, s, next = s + RUNIT)) ? |
410 |
|
next : acquireRead(false, 0L)); |
491 |
|
* Returns true if the lock has not been exclusively acquired |
492 |
|
* since issuance of the given stamp. Always returns false if the |
493 |
|
* stamp is zero. Always returns true if the stamp represents a |
494 |
< |
* currently held lock. |
494 |
> |
* currently held lock. Invoking this method with a value not |
495 |
> |
* obtained from {@link #tryOptimisticRead} or a locking method |
496 |
> |
* for this lock has no defined effect or result. |
497 |
|
* |
498 |
|
* @return true if the lock has not been exclusively acquired |
499 |
|
* since issuance of the given stamp; else false |
1036 |
|
return cancelWaiter(node, null, false); |
1037 |
|
node.thread = Thread.currentThread(); |
1038 |
|
if (node.prev == p && p.status == WAITING && // recheck |
1039 |
< |
(p != whead || (state & ABITS) != 0L)) { |
1039 |
> |
(p != whead || (state & ABITS) != 0L)) |
1040 |
|
U.park(false, time); |
1038 |
– |
if (interruptible && Thread.interrupted()) |
1039 |
– |
return cancelWaiter(node, null, true); |
1040 |
– |
} |
1041 |
|
node.thread = null; |
1042 |
+ |
if (interruptible && Thread.interrupted()) |
1043 |
+ |
return cancelWaiter(node, null, true); |
1044 |
|
} |
1045 |
|
} |
1046 |
|
} |
1103 |
|
node.cowait = p.cowait, node)) { |
1104 |
|
node.thread = Thread.currentThread(); |
1105 |
|
for (long time;;) { |
1106 |
+ |
if (interruptible && Thread.interrupted()) |
1107 |
+ |
return cancelWaiter(node, p, true); |
1108 |
|
if (deadline == 0L) |
1109 |
|
time = 0L; |
1110 |
|
else if ((time = deadline - System.nanoTime()) <= 0L) |
1119 |
|
if (node.thread == null) // must recheck |
1120 |
|
break; |
1121 |
|
U.park(false, time); |
1118 |
– |
if (interruptible && Thread.interrupted()) |
1119 |
– |
return cancelWaiter(node, p, true); |
1122 |
|
} |
1123 |
|
group = p; |
1124 |
|
} |
1176 |
|
return cancelWaiter(node, null, false); |
1177 |
|
node.thread = Thread.currentThread(); |
1178 |
|
if (node.prev == p && p.status == WAITING && |
1179 |
< |
(p != whead || (state & ABITS) != WBIT)) { |
1179 |
> |
(p != whead || (state & ABITS) != WBIT)) |
1180 |
|
U.park(false, time); |
1179 |
– |
if (interruptible && Thread.interrupted()) |
1180 |
– |
return cancelWaiter(node, null, true); |
1181 |
– |
} |
1181 |
|
node.thread = null; |
1182 |
+ |
if (interruptible && Thread.interrupted()) |
1183 |
+ |
return cancelWaiter(node, null, true); |
1184 |
|
} |
1185 |
|
} |
1186 |
|
} |
1187 |
|
|
1188 |
|
/** |
1189 |
< |
* If node non-null, forces cancel status and unsplices from queue |
1190 |
< |
* if possible. This is a variant of cancellation methods in |
1191 |
< |
* AbstractQueuedSynchronizer (see its detailed explanation in AQS |
1192 |
< |
* internal documentation) that more conservatively wakes up other |
1193 |
< |
* threads that may have had their links changed, so as to preserve |
1194 |
< |
* liveness in the main signalling methods. |
1189 |
> |
* If node non-null, forces cancel status and unsplices it from |
1190 |
> |
* queue if possible and wakes up any cowaiters. This is a variant |
1191 |
> |
* of cancellation methods in AbstractQueuedSynchronizer (see its |
1192 |
> |
* detailed explanation in AQS internal documentation) that more |
1193 |
> |
* conservatively wakes up other threads that may have had their |
1194 |
> |
* links changed, so as to preserve liveness in the main |
1195 |
> |
* signalling methods. |
1196 |
> |
* |
1197 |
> |
* @param node if nonnull, the waiter |
1198 |
> |
* @param group, if nonnull, the group current thread is cowaiting with |
1199 |
> |
* @param interrupted if already interrupted |
1200 |
> |
* @return INTERRUPTED if interrupted or Thread.interrupted, else zero |
1201 |
|
*/ |
1202 |
|
private long cancelWaiter(WNode node, WNode group, boolean interrupted) { |
1203 |
|
if (node != null) { |
1204 |
|
node.thread = null; |
1205 |
|
node.status = CANCELLED; |
1206 |
< |
if (group != null) { |
1207 |
< |
for (WNode p = group, q; p != null; p = q) { |
1208 |
< |
if ((q = p.cowait) != null && q.status == CANCELLED) { |
1209 |
< |
U.compareAndSwapObject(p, WCOWAIT, q, q.cowait); |
1210 |
< |
break; |
1211 |
< |
} |
1206 |
> |
Thread w; // wake up co-waiters; unsplice cancelled ones |
1207 |
> |
for (WNode q, p = (group != null) ? group : node; p != null; ) { |
1208 |
> |
if ((q = p.cowait) == null) |
1209 |
> |
break; |
1210 |
> |
if ((w = q.thread) != null) { |
1211 |
> |
q.thread = null; |
1212 |
> |
U.unpark(w); |
1213 |
|
} |
1214 |
+ |
if (q.status == CANCELLED) |
1215 |
+ |
U.compareAndSwapObject(p, WCOWAIT, q, q.cowait); |
1216 |
+ |
else |
1217 |
+ |
p = q; |
1218 |
|
} |
1219 |
< |
else { |
1219 |
> |
if (group == null) { // unsplice both prev and next links |
1220 |
|
for (WNode pred = node.prev; pred != null; ) { |
1221 |
< |
WNode succ, pp; Thread w; |
1221 |
> |
WNode succ, pp; // first unsplice next |
1222 |
|
while ((succ = node.next) == null || |
1223 |
|
succ.status == CANCELLED) { |
1224 |
< |
WNode q = null; |
1224 |
> |
WNode q = null; // find successor the slow way |
1225 |
|
for (WNode t = wtail; t != null && t != node; t = t.prev) |
1226 |
|
if (t.status != CANCELLED) |
1227 |
< |
q = t; |
1228 |
< |
if (succ == q || |
1227 |
> |
q = t; // don't link if succ cancelled |
1228 |
> |
if (succ == q || // ensure accurate successor |
1229 |
|
U.compareAndSwapObject(node, WNEXT, |
1230 |
|
succ, succ = q)) { |
1231 |
|
if (succ == null && node == wtail) |
1233 |
|
break; |
1234 |
|
} |
1235 |
|
} |
1236 |
< |
if (pred.next == node) |
1236 |
> |
if (pred.next == node) // unsplice pred link |
1237 |
|
U.compareAndSwapObject(pred, WNEXT, node, succ); |
1238 |
< |
if (succ != null && (w = succ.thread) != null) |
1239 |
< |
U.unpark(w); |
1238 |
> |
if (succ != null && (w = succ.thread) != null) { |
1239 |
> |
succ.thread = null; |
1240 |
> |
U.unpark(w); // conservatively wake up new succ |
1241 |
> |
} |
1242 |
|
if (pred.status != CANCELLED || (pp = pred.prev) == null) |
1243 |
|
break; |
1244 |
< |
node.prev = pp; // repeat for new pred |
1244 |
> |
node.prev = pp; // repeat in case new pred wrong/cancelled |
1245 |
|
U.compareAndSwapObject(pp, WNEXT, pred, succ); |
1246 |
|
pred = pp; |
1247 |
|
} |