11 |
|
|
12 |
|
/** |
13 |
|
* A capability-based lock with three modes for controlling read/write |
14 |
< |
* access. The state of a StampedLock consists of a version and |
15 |
< |
* mode. Lock acquisition methods return a stamp that represents and |
14 |
> |
* access. The state of a StampedLock consists of a version and mode. |
15 |
> |
* Lock acquisition methods return a stamp that represents and |
16 |
|
* controls access with respect to a lock state; "try" versions of |
17 |
|
* these methods may instead return the special value zero to |
18 |
|
* represent failure to acquire access. Lock release and conversion |
78 |
|
* |
79 |
|
* <p>The scheduling policy of StampedLock does not consistently |
80 |
|
* prefer readers over writers or vice versa. A zero return from any |
81 |
< |
* "try" method for acquiring or converting locks does carry any |
81 |
> |
* "try" method for acquiring or converting locks does not carry any |
82 |
|
* information about the state of the lock; a subsequent invocation |
83 |
|
* may succeed. |
84 |
|
* |
216 |
|
* in-progress spins/signals, and others do not account for |
217 |
|
* cancellations. |
218 |
|
* |
219 |
+ |
* Controlled, randomized spinning is used in the two await |
220 |
+ |
* methods to reduce (increasingly expensive) context switching |
221 |
+ |
* while also avoiding sustained memory thrashing among many |
222 |
+ |
* threads. Both await methods use a similar spin strategy: If |
223 |
+ |
* the associated queue appears to be empty, then the thread |
224 |
+ |
* spin-waits up to SPINS times (where each iteration decreases |
225 |
+ |
* spin count with 50% probablility) before enqueing, and then, if |
226 |
+ |
* it is the first thread to be enqueued, spins again up to SPINS |
227 |
+ |
* times before blocking. If, upon wakening it fails to obtain |
228 |
+ |
* lock, and is still (or becomes) the first waiting thread (which |
229 |
+ |
* indicates that some other thread barged and obtained lock), it |
230 |
+ |
* escalates spins (up to MAX_HEAD_SPINS) to reduce the likelihood |
231 |
+ |
* of continually losing to barging threads. |
232 |
+ |
* |
233 |
|
* As noted in Boehm's paper (above), sequence validation (mainly |
234 |
|
* method validate()) requires stricter ordering rules than apply |
235 |
|
* to normal volatile reads (of "state"). In the absence of (but |
908 |
|
/** |
909 |
|
* RNG for local spins. The first call from await{Read,Write} |
910 |
|
* produces a thread-local value. Unless zero, subsequent calls |
911 |
< |
* use an xorShift to further reduce memory traffic. Both await |
898 |
< |
* methods use a similar spin strategy: If associated queue |
899 |
< |
* appears to be empty, then the thread spin-waits up to SPINS |
900 |
< |
* times before enqueing, and then, if the first thread to be |
901 |
< |
* enqueued, spins again up to SPINS times before blocking. If, |
902 |
< |
* upon wakening it fails to obtain lock, and is still (or |
903 |
< |
* becomes) the first waiting thread (which indicates that some |
904 |
< |
* other thread barged and obtained lock), it escalates spins (up |
905 |
< |
* to MAX_HEAD_SPINS) to reduce the likelihood of continually |
906 |
< |
* losing to barging threads. |
911 |
> |
* use an xorShift to further reduce memory traffic. |
912 |
|
*/ |
913 |
|
private static int nextRandom(int r) { |
914 |
|
if (r == 0) |
957 |
|
p.next = node; |
958 |
|
for (int headSpins = SPINS;;) { |
959 |
|
WNode np; int ps; |
960 |
< |
if ((np = node.prev) != p && np != null) |
961 |
< |
(p = np).next = node; // stale |
960 |
> |
if ((np = node.prev) != p && np != null && |
961 |
> |
(p = np).next != node) |
962 |
> |
p.next = node; // stale |
963 |
|
if (p == whead) { |
964 |
|
for (int k = headSpins;;) { |
965 |
|
if (((s = state) & ABITS) == 0L) { |
1016 |
|
WNode predNext = pred.next; |
1017 |
|
node.status = CANCELLED; |
1018 |
|
if (predNext != null) { |
1019 |
< |
Thread w = null; |
1019 |
> |
Thread w; |
1020 |
|
WNode succ = node.next; |
1021 |
< |
while (succ != null && succ.status == CANCELLED) |
1022 |
< |
succ = succ.next; |
1023 |
< |
if (succ != null) |
1024 |
< |
w = succ.thread; |
1025 |
< |
else if (node == wtail) |
1026 |
< |
U.compareAndSwapObject(this, WTAIL, node, pred); |
1021 |
> |
if (succ == null || succ.status == CANCELLED) { |
1022 |
> |
succ = null; |
1023 |
> |
for (WNode t = wtail; t != null && t != node; t = t.prev) |
1024 |
> |
if (t.status <= 0) |
1025 |
> |
succ = t; |
1026 |
> |
if (succ == null && node == wtail) |
1027 |
> |
U.compareAndSwapObject(this, WTAIL, node, pred); |
1028 |
> |
} |
1029 |
|
U.compareAndSwapObject(pred, WNEXT, predNext, succ); |
1030 |
< |
if (w != null) |
1030 |
> |
if (succ != null && (w = succ.thread) != null) |
1031 |
|
U.unpark(w); |
1032 |
|
} |
1033 |
|
} |