11 |
|
|
12 |
|
/** |
13 |
|
* A capability-based lock with three modes for controlling read/write |
14 |
< |
* access. The state of a StampedLock consists of a version and |
15 |
< |
* mode. Lock acquisition methods return a stamp that represents and |
14 |
> |
* access. The state of a StampedLock consists of a version and mode. |
15 |
> |
* Lock acquisition methods return a stamp that represents and |
16 |
|
* controls access with respect to a lock state; "try" versions of |
17 |
|
* these methods may instead return the special value zero to |
18 |
|
* represent failure to acquire access. Lock release and conversion |
26 |
|
* in method {@link #unlockWrite} to release the lock. Untimed and |
27 |
|
* timed versions of {@code tryWriteLock} are also provided. When |
28 |
|
* the lock is held in write mode, no read locks may be obtained, |
29 |
< |
* and all observer validations will fail. </li> |
29 |
> |
* and all optimistic read validations will fail. </li> |
30 |
|
* |
31 |
|
* <li><b>Reading.</b> Method {@link #readLock} possibly blocks |
32 |
|
* waiting for non-exclusive access, returning a stamp that can be |
55 |
|
* <p>This class also supports methods that conditionally provide |
56 |
|
* conversions across the three modes. For example, method {@link |
57 |
|
* #tryConvertToWriteLock} attempts to "upgrade" a mode, returning |
58 |
< |
* valid write stamp if (1) already in writing mode (2) in reading |
58 |
> |
* a valid write stamp if (1) already in writing mode (2) in reading |
59 |
|
* mode and there are no other readers or (3) in optimistic mode and |
60 |
|
* the lock is available. The forms of these methods are designed to |
61 |
|
* help reduce some of the code bloat that otherwise occurs in |
76 |
|
* into initial unlocked state, so they are not useful for remote |
77 |
|
* locking. |
78 |
|
* |
79 |
< |
* <p>The scheduling policy of StampedLock does not consistently prefer |
80 |
< |
* readers over writers or vice versa. |
79 |
> |
* <p>The scheduling policy of StampedLock does not consistently |
80 |
> |
* prefer readers over writers or vice versa. A zero return from any |
81 |
> |
* "try" method for acquiring or converting locks does not carry any |
82 |
> |
* information about the state of the lock; a subsequent invocation |
83 |
> |
* may succeed. |
84 |
|
* |
85 |
|
* <p><b>Sample Usage.</b> The following illustrates some usage idioms |
86 |
|
* in a class that maintains simple two-dimensional points. The sample |
90 |
|
* |
91 |
|
* <pre>{@code |
92 |
|
* class Point { |
93 |
< |
* private volatile double x, y; |
93 |
> |
* private double x, y; |
94 |
|
* private final StampedLock sl = new StampedLock(); |
95 |
|
* |
96 |
|
* void move(double deltaX, double deltaY) { // an exclusively locked method |
122 |
|
* } |
123 |
|
* |
124 |
|
* double distanceFromOriginV2() { // combines code paths |
125 |
< |
* for (long stamp = sl.optimisticRead(); ; stamp = sl.readLock()) { |
125 |
> |
* for (long stamp = sl.tryOptimisticRead(); ; stamp = sl.readLock()) { |
126 |
|
* double currentX, currentY; |
127 |
|
* try { |
128 |
|
* currentX = x; |
216 |
|
* in-progress spins/signals, and others do not account for |
217 |
|
* cancellations. |
218 |
|
* |
219 |
+ |
* Controlled, randomized spinning is used in the two await |
220 |
+ |
* methods to reduce (increasingly expensive) context switching |
221 |
+ |
* while also avoiding sustained memory thrashing among many |
222 |
+ |
* threads. Both await methods use a similar spin strategy: If |
223 |
+ |
* the associated queue appears to be empty, then the thread |
224 |
+ |
* spin-waits up to SPINS times (where each iteration decreases |
225 |
+ |
* spin count with 50% probablility) before enqueing, and then, if |
226 |
+ |
* it is the first thread to be enqueued, spins again up to SPINS |
227 |
+ |
* times before blocking. If, upon wakening it fails to obtain |
228 |
+ |
* lock, and is still (or becomes) the first waiting thread (which |
229 |
+ |
* indicates that some other thread barged and obtained lock), it |
230 |
+ |
* escalates spins (up to MAX_HEAD_SPINS) to reduce the likelihood |
231 |
+ |
* of continually losing to barging threads. |
232 |
+ |
* |
233 |
|
* As noted in Boehm's paper (above), sequence validation (mainly |
234 |
|
* method validate()) requires stricter ordering rules than apply |
235 |
|
* to normal volatile reads (of "state"). In the absence of (but |
254 |
|
/** Maximum number of retries before blocking on acquisition */ |
255 |
|
private static final int SPINS = (NCPU > 1) ? 1 << 6 : 1; |
256 |
|
|
257 |
< |
/** Maximum number of retries before re-blocking on write lock */ |
257 |
> |
/** Maximum number of retries before re-blocking */ |
258 |
|
private static final int MAX_HEAD_SPINS = (NCPU > 1) ? 1 << 12 : 1; |
259 |
|
|
260 |
|
/** The period for yielding when waiting for overflow spinlock */ |
908 |
|
/** |
909 |
|
* RNG for local spins. The first call from await{Read,Write} |
910 |
|
* produces a thread-local value. Unless zero, subsequent calls |
911 |
< |
* use an xorShift to further reduce memory traffic. Both await |
895 |
< |
* methods use a similar spin strategy: If associated queue |
896 |
< |
* appears to be empty, then the thread spin-waits up to SPINS |
897 |
< |
* times before enqueing, and then, if the first thread to be |
898 |
< |
* enqueued, spins again up to SPINS times before blocking. If, |
899 |
< |
* upon wakening it fails to obtain lock, and is still (or |
900 |
< |
* becomes) the first waiting thread (which indicates that some |
901 |
< |
* other thread barged and obtained lock), it escalates spins (up |
902 |
< |
* to MAX_HEAD_SPINS) to reduce the likelihood of continually |
903 |
< |
* losing to barging threads. |
911 |
> |
* use an xorShift to further reduce memory traffic. |
912 |
|
*/ |
913 |
|
private static int nextRandom(int r) { |
914 |
|
if (r == 0) |
957 |
|
p.next = node; |
958 |
|
for (int headSpins = SPINS;;) { |
959 |
|
WNode np; int ps; |
960 |
< |
if ((np = node.prev) != p && np != null) |
961 |
< |
(p = np).next = node; // stale |
960 |
> |
if ((np = node.prev) != p && np != null && |
961 |
> |
(p = np).next != node) |
962 |
> |
p.next = node; // stale |
963 |
|
if (p == whead) { |
964 |
|
for (int k = headSpins;;) { |
965 |
|
if (((s = state) & ABITS) == 0L) { |
1016 |
|
WNode predNext = pred.next; |
1017 |
|
node.status = CANCELLED; |
1018 |
|
if (predNext != null) { |
1019 |
< |
Thread w = null; |
1019 |
> |
Thread w; |
1020 |
|
WNode succ = node.next; |
1021 |
< |
while (succ != null && succ.status == CANCELLED) |
1022 |
< |
succ = succ.next; |
1023 |
< |
if (succ != null) |
1024 |
< |
w = succ.thread; |
1025 |
< |
else if (node == wtail) |
1026 |
< |
U.compareAndSwapObject(this, WTAIL, node, pred); |
1021 |
> |
if (succ == null || succ.status == CANCELLED) { |
1022 |
> |
succ = null; |
1023 |
> |
for (WNode t = wtail; t != null && t != node; t = t.prev) |
1024 |
> |
if (t.status <= 0) |
1025 |
> |
succ = t; |
1026 |
> |
if (succ == null && node == wtail) |
1027 |
> |
U.compareAndSwapObject(this, WTAIL, node, pred); |
1028 |
> |
} |
1029 |
|
U.compareAndSwapObject(pred, WNEXT, predNext, succ); |
1030 |
< |
if (w != null) |
1030 |
> |
if (succ != null && (w = succ.thread) != null) |
1031 |
|
U.unpark(w); |
1032 |
|
} |
1033 |
|
} |
1114 |
|
/** |
1115 |
|
* If node non-null, forces cancel status and unsplices from queue |
1116 |
|
* if possible, by traversing entire queue looking for cancelled |
1117 |
< |
* nodes, cleaning out all at head, but stopping upon first |
1107 |
< |
* encounter otherwise. |
1117 |
> |
* nodes. |
1118 |
|
*/ |
1119 |
|
private long cancelReader(RNode node, boolean interrupted) { |
1120 |
|
Thread w; |
1129 |
|
} |
1130 |
|
else { |
1131 |
|
U.compareAndSwapObject(pred, RNEXT, p, q); |
1132 |
< |
break; |
1132 |
> |
p = pred.next; |
1133 |
|
} |
1134 |
|
} |
1135 |
|
else { |