1 |
dl |
1.2 |
/* |
2 |
|
|
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
|
|
* Expert Group and released to the public domain. Use, modify, and |
4 |
|
|
* redistribute this code in any way without acknowledgement. |
5 |
|
|
*/ |
6 |
|
|
|
7 |
tim |
1.1 |
package java.util.concurrent; |
8 |
dl |
1.2 |
import java.util.concurrent.atomic.*; |
9 |
|
|
import java.util.Date; |
10 |
tim |
1.1 |
|
11 |
dl |
1.3 |
|
12 |
tim |
1.1 |
/** |
13 |
|
|
* A reentrant mutual exclusion lock. |
14 |
|
|
* <p><tt>ReentrantLock</tt> defines a stand-alone {@link Lock} class with |
15 |
|
|
* the same basic behavior and semantics as the implicit |
16 |
|
|
* monitor lock accessed by the use of <tt>synchronized</tt> methods and |
17 |
|
|
* statements, but without the forced block-structured locking and unlocking |
18 |
|
|
* that occurs with <tt>synchronized</tt> methods and |
19 |
|
|
* statements. |
20 |
|
|
* |
21 |
|
|
* <p>The order in which blocked threads are granted the lock is not |
22 |
dl |
1.2 |
* specified. |
23 |
tim |
1.1 |
* |
24 |
|
|
* <p>If you want a non-reentrant mutual exclusion lock then it is a simple |
25 |
|
|
* matter to use a reentrant lock in a non-reentrant way by ensuring that |
26 |
|
|
* the lock is not held by the current thread prior to locking. |
27 |
|
|
* See {@link #getHoldCount} for a way to check this. |
28 |
|
|
* |
29 |
|
|
* |
30 |
|
|
* <p><code>ReentrantLock</code> instances are intended to be used primarily |
31 |
|
|
* in before/after constructions such as: |
32 |
|
|
* |
33 |
|
|
* <pre> |
34 |
|
|
* class X { |
35 |
|
|
* ReentrantLock lock; |
36 |
|
|
* // ... |
37 |
|
|
* |
38 |
|
|
* public void m() { |
39 |
|
|
* lock.lock(); // block until condition holds |
40 |
|
|
* try { |
41 |
|
|
* // ... method body |
42 |
|
|
* } finally { |
43 |
|
|
* lock.unlock() |
44 |
|
|
* } |
45 |
|
|
* } |
46 |
|
|
* } |
47 |
|
|
* </pre> |
48 |
|
|
* |
49 |
|
|
* <p>This class supports the interruption of lock acquisition and provides a |
50 |
|
|
* {@link #newCondition Condition} implementation that supports the |
51 |
|
|
* interruption of thread suspension. |
52 |
|
|
* |
53 |
|
|
* <p>Except where noted, passing a <tt>null</tt> value for any parameter |
54 |
|
|
* will result in a {@link NullPointerException} being thrown. |
55 |
|
|
* |
56 |
dl |
1.2 |
* <h3>Implementation Notes</h3> |
57 |
tim |
1.1 |
* <p> To-BE_DONE |
58 |
|
|
* |
59 |
|
|
* |
60 |
|
|
* @since 1.5 |
61 |
|
|
* @spec JSR-166 |
62 |
dl |
1.3 |
* @revised $Date: 2003/05/27 18:14:40 $ |
63 |
|
|
* @editor $Author: dl $ |
64 |
dl |
1.2 |
* @author Doug Lea |
65 |
tim |
1.1 |
* |
66 |
|
|
* @fixme (1) We need a non-nested example to motivate this |
67 |
|
|
* @fixme (2) Describe performance aspects of interruptible locking for the RI |
68 |
|
|
**/ |
69 |
dl |
1.3 |
public class ReentrantLock extends ReentrantLockQueueNode implements Lock, java.io.Serializable { |
70 |
dl |
1.2 |
/* |
71 |
|
|
This is a fastpath/slowpath algorithm. |
72 |
|
|
|
73 |
|
|
* The basic algorithm looks like this, ignoring reentrance, |
74 |
|
|
cancellation, timeouts, error checking etc: |
75 |
|
|
Lock: |
76 |
|
|
if (!fair && casOwner(null, currentThread)) // fastpath |
77 |
|
|
return; |
78 |
|
|
node = create and enq a wait node; |
79 |
|
|
for (;;) { |
80 |
|
|
if (node is first on queue) { |
81 |
|
|
if (casOwner(null, currentThread)) { |
82 |
|
|
deq(node); |
83 |
|
|
return; |
84 |
|
|
} |
85 |
|
|
} |
86 |
|
|
park(currentThread); |
87 |
|
|
} |
88 |
|
|
|
89 |
|
|
Unlock: |
90 |
|
|
owner = null; // volatile assignment |
91 |
|
|
h = first node on queue; |
92 |
|
|
if (h != null) unpark(h's successor's thread); |
93 |
|
|
|
94 |
|
|
* The fast path uses one atomic CAS operation, plus one |
95 |
|
|
StoreLoad barrier per lock/unlock pair. The "owner" field is |
96 |
|
|
handled as a simple spinlock. To lock, the owner field is set |
97 |
|
|
to current thread using conditional atomic update. To unlock, |
98 |
|
|
the owner field is set to null, checking if anyone needs |
99 |
|
|
waking up, if so doing so. Recursive locks/unlocks instead |
100 |
|
|
increment/decrement recursion field. |
101 |
|
|
|
102 |
|
|
* By default, contended locks use a kind of "greedy" / |
103 |
|
|
"renouncement" / barging / convoy-avoidance strategy: When a |
104 |
|
|
lock is released, a waiting thread is signalled so that it can |
105 |
|
|
(re)contend for the lock. It might lose and thus need to |
106 |
|
|
rewait. This strategy has much higher throughput than |
107 |
|
|
"directed handoff" because it reduces blocking of running |
108 |
|
|
threads, but poorer fairness. The wait queue is FIFO, but |
109 |
|
|
newly entering threads can barge ahead and grab lock before |
110 |
|
|
woken waiters, so acquires are not strictly FIFO, and transfer |
111 |
|
|
is not deterministically fair. It is probablistically fair |
112 |
|
|
though. Earlier queued threads are allowed to recontend before |
113 |
|
|
later queued threads, and each recontention has an unbiased |
114 |
|
|
chance to succeed. |
115 |
|
|
|
116 |
|
|
* The base class also sets up support for FairReentrantLock |
117 |
|
|
subclass, that differs only in that barging is disabled when |
118 |
|
|
there is contention, so locks proceed FIFO. There can be |
119 |
|
|
some races in detecting contention, but it is still FIFO from |
120 |
|
|
a definable (although complicated to describe) single point, |
121 |
|
|
so qualifies as a FIFO lock. |
122 |
|
|
|
123 |
|
|
* The wait queue is a variant of a "CLH" (Craig, Landin, and |
124 |
|
|
Hagersten) lock. CLH locks are normally used for spinlocks. |
125 |
|
|
We instead use them for blocking locks, but use the same basic |
126 |
|
|
tactic of holding the information about whether a thread is |
127 |
|
|
released (i.e, eligible to contend for ownership lock) in the |
128 |
|
|
predecessor of each node. A node waits until its predecessor |
129 |
|
|
says it is released. It is signalled when its predecessor |
130 |
|
|
releases the lock. Each node of the queue otherwise serves as |
131 |
|
|
a specific-notification-style monitor holding a single waiting |
132 |
|
|
thread. |
133 |
|
|
|
134 |
|
|
To enqueue into a CLH lock, you atomically splice it in as new |
135 |
|
|
tail. To dequeue, you just set the head field. |
136 |
|
|
|
137 |
|
|
+------+ prev +-----+ +-----+ |
138 |
|
|
head | | <---- | | <---- | | tail |
139 |
|
|
+------+ +-----+ +-----+ |
140 |
|
|
|
141 |
|
|
The great thing about CLH Locks is that insertion into a CLH |
142 |
|
|
queue requires only a single atomic operation on "tail", so |
143 |
|
|
there is a simple atomic point of demarcation from unqueued to |
144 |
|
|
queued. Similarly, dequeing involves only updating the |
145 |
|
|
"head". However, it takes a bit more work for nodes to |
146 |
|
|
determine who their successors are, in part to deal with |
147 |
|
|
possible cancellation due to timeouts and interrupts. |
148 |
|
|
|
149 |
|
|
The "prev" links (not used in original CLH locks), are mainly |
150 |
|
|
needed to handle cancellation. If a node is cancelled, its |
151 |
|
|
successor must be relinked to a non-cancelled predecessor. For |
152 |
|
|
explanation in the case of spin locks, see the papers by Scott |
153 |
|
|
& Scherer at |
154 |
|
|
http://www.cs.rochester.edu/u/scott/synchronization/ |
155 |
|
|
|
156 |
|
|
Being first in the queue does not mean that you have the lock, |
157 |
|
|
only that you may contend for it (by CAS'ing owner field). So |
158 |
|
|
the currently released contender thread may need to rewait. |
159 |
|
|
|
160 |
|
|
We also use "next" links to implement blocking mechanics. The |
161 |
|
|
thread id for each node is kept in its node, so a predecessor |
162 |
|
|
signals the next node to wake up by traversing next link to |
163 |
|
|
determine which thread it is. Determination of successor must |
164 |
|
|
avoid races with newly queued nodes to set the "next" fields |
165 |
|
|
of their predecessors. This is solved by checking backwards |
166 |
|
|
from the atomically updated "tail" when a node's successor |
167 |
|
|
appears to be null. |
168 |
|
|
|
169 |
|
|
Cancellation introduces some conservatism to the basic |
170 |
|
|
algorithm of these. Since we must poll for cancellation of |
171 |
|
|
other nodes, we can miss noticing whether a cancelled node is |
172 |
|
|
ahead or behind us. This is dealt with by always unparking |
173 |
|
|
successors upon cancellation, and not letting them park again |
174 |
|
|
(by saturating release counts) until they stabilize on a new |
175 |
|
|
predecessor. |
176 |
|
|
|
177 |
|
|
* Threads waiting on Conditions use the same kind of nodes, but |
178 |
|
|
only need to link them in simple (non-concurrent) linked |
179 |
|
|
queues because they are only accessed when lock is held. To |
180 |
|
|
wait, a thread makes a node inserted into a condition queue. |
181 |
|
|
Upon signal, the node is transferred to the lock queue. |
182 |
dl |
1.3 |
Special values of releaseStatus fields are used to mark which queue a |
183 |
dl |
1.2 |
node is on. |
184 |
|
|
|
185 |
|
|
* All suspension and resumption of threads uses the JSR166 |
186 |
|
|
native park/unpark API. These are safe versions of |
187 |
|
|
suspend/resume (plus timeout support) that internally avoid |
188 |
|
|
the intrinsic race problems with suspend/resume: Park suspends |
189 |
|
|
if not preceded by an unpark. Unpark resumes if waiting, else |
190 |
|
|
causes next park not to suspend. While safe and efficient, |
191 |
|
|
these are not general-purpose public operations because we do |
192 |
|
|
not want code outside this package to randomly call these |
193 |
|
|
methods -- parks and unparks should be matched up. (It is OK |
194 |
|
|
to have more unparks than unparks, but it causes threads to |
195 |
|
|
spuriously wake up. So minimizing excessive unparks is a |
196 |
|
|
performance concern.) |
197 |
dl |
1.3 |
|
198 |
|
|
* The ReentrantLock class extends package-private |
199 |
|
|
ReentrantLockQueueNode class as an expedient and efficient |
200 |
|
|
(although slightly sleazy) solution to serialization and |
201 |
|
|
initialization problems. |
202 |
dl |
1.2 |
*/ |
203 |
|
|
|
204 |
|
|
/* |
205 |
|
|
Note that all fields are transient and defined in a way that |
206 |
|
|
deserialized locks are in initial unlocked state. |
207 |
|
|
*/ |
208 |
|
|
|
209 |
|
|
/** |
210 |
|
|
* Creates an instance of <tt>ReentrantLock</tt>. |
211 |
|
|
*/ |
212 |
|
|
public ReentrantLock() { } |
213 |
|
|
|
214 |
|
|
/** |
215 |
|
|
* Current owner of lock, or null iff the lock is free. Acquired |
216 |
|
|
* only using CAS. |
217 |
|
|
*/ |
218 |
|
|
private transient volatile Thread owner; |
219 |
|
|
|
220 |
|
|
/** Number of recursive acquires. Note: total holds = recursions+1 */ |
221 |
|
|
private transient int recursions; |
222 |
|
|
|
223 |
dl |
1.3 |
/** Head of the wait queue, initialized to point to self as dummy node */ |
224 |
|
|
private transient volatile ReentrantLockQueueNode head = this; |
225 |
dl |
1.2 |
|
226 |
dl |
1.3 |
/** Tail of the wait queue, initialized to point to self as dummy node */ |
227 |
|
|
private transient volatile ReentrantLockQueueNode tail = this; |
228 |
dl |
1.2 |
|
229 |
|
|
// Atomics support |
230 |
|
|
|
231 |
|
|
private final static AtomicReferenceFieldUpdater<ReentrantLock, Thread> ownerUpdater = new AtomicReferenceFieldUpdater<ReentrantLock, Thread>(new ReentrantLock[0], new Thread[0], "owner"); |
232 |
dl |
1.3 |
private final static AtomicReferenceFieldUpdater<ReentrantLock, ReentrantLockQueueNode> tailUpdater = new AtomicReferenceFieldUpdater<ReentrantLock, ReentrantLockQueueNode>(new ReentrantLock[0], new ReentrantLockQueueNode[0], "tail"); |
233 |
|
|
private final static AtomicReferenceFieldUpdater<ReentrantLock, ReentrantLockQueueNode> headUpdater = new AtomicReferenceFieldUpdater<ReentrantLock, ReentrantLockQueueNode>(new ReentrantLock[0], new ReentrantLockQueueNode[0], "head"); |
234 |
|
|
private final static AtomicIntegerFieldUpdater<ReentrantLockQueueNode> releaseStatusUpdater = |
235 |
|
|
new AtomicIntegerFieldUpdater<ReentrantLockQueueNode>(new ReentrantLockQueueNode[0], "releaseStatus"); |
236 |
dl |
1.2 |
|
237 |
|
|
private boolean acquireOwner(Thread current) { |
238 |
|
|
return ownerUpdater.compareAndSet(this, null, current); |
239 |
|
|
} |
240 |
|
|
|
241 |
dl |
1.3 |
private boolean casTail(ReentrantLockQueueNode cmp, ReentrantLockQueueNode val) { |
242 |
dl |
1.2 |
return tailUpdater.compareAndSet(this, cmp, val); |
243 |
|
|
} |
244 |
|
|
|
245 |
dl |
1.3 |
private boolean casHead(ReentrantLockQueueNode cmp, ReentrantLockQueueNode val) { |
246 |
dl |
1.2 |
return headUpdater.compareAndSet(this, cmp, val); |
247 |
|
|
} |
248 |
|
|
|
249 |
dl |
1.3 |
// casReleaseStatus non-private because also accessed by Conditions |
250 |
|
|
final boolean casReleaseStatus(ReentrantLockQueueNode node, int cmp, int val) { |
251 |
|
|
return releaseStatusUpdater.compareAndSet(node, cmp, val); |
252 |
dl |
1.2 |
} |
253 |
|
|
|
254 |
|
|
|
255 |
|
|
/** |
256 |
dl |
1.3 |
* Special value for releaseStatus indicating that node is cancelled. |
257 |
dl |
1.2 |
* Must be a large positive number. |
258 |
|
|
*/ |
259 |
|
|
static private final int CANCELLED = Integer.MAX_VALUE; |
260 |
|
|
|
261 |
|
|
/** |
262 |
dl |
1.3 |
* Special value for node releaseStatus indicating that node is on a |
263 |
dl |
1.2 |
* condition queue. Must be large negative number. |
264 |
|
|
*/ |
265 |
|
|
static private final int ON_CONDITION_QUEUE = Integer.MIN_VALUE; |
266 |
|
|
|
267 |
|
|
/** |
268 |
dl |
1.3 |
* Special value for node releaseStatus indicating that node is in |
269 |
dl |
1.2 |
* process of transfer. Must be negative and greater than |
270 |
|
|
* ON_CONDITION_QUEUE. |
271 |
|
|
*/ |
272 |
|
|
static private final int TRANSFERRING = ON_CONDITION_QUEUE+1; |
273 |
|
|
|
274 |
|
|
|
275 |
|
|
/** |
276 |
|
|
* Utility to throw an exception if lock not held by given thread. |
277 |
|
|
*/ |
278 |
|
|
final void checkOwner(Thread current) { |
279 |
|
|
if (current != owner) |
280 |
|
|
throw new IllegalMonitorStateException(); |
281 |
|
|
} |
282 |
|
|
|
283 |
|
|
/** |
284 |
|
|
* Return whther lock wait queue is empty |
285 |
|
|
*/ |
286 |
|
|
final boolean queueEmpty() { |
287 |
dl |
1.3 |
ReentrantLockQueueNode h = head; // force order of the volatile reads |
288 |
dl |
1.2 |
return h == tail; |
289 |
|
|
} |
290 |
|
|
|
291 |
|
|
/** |
292 |
|
|
* Insert node into queue. Return predecessor. |
293 |
|
|
*/ |
294 |
dl |
1.3 |
private ReentrantLockQueueNode enq(ReentrantLockQueueNode node) { |
295 |
dl |
1.2 |
for (;;) { |
296 |
dl |
1.3 |
ReentrantLockQueueNode p = tail; |
297 |
dl |
1.2 |
node.prev = p; // prev must be valid before/upon CAS |
298 |
|
|
if (casTail(p, node)) { |
299 |
|
|
p.next = node; // Note: next field assignment lags CAS |
300 |
|
|
return p; |
301 |
|
|
} |
302 |
|
|
} |
303 |
|
|
} |
304 |
|
|
|
305 |
|
|
/** |
306 |
|
|
* Return true if it is OK to take fast path to lock. |
307 |
|
|
* Overridden in FairReentrantLock. |
308 |
|
|
*/ |
309 |
|
|
boolean canBarge() { |
310 |
|
|
return true; |
311 |
|
|
} |
312 |
|
|
|
313 |
|
|
/* |
314 |
|
|
* policy/status values for main lock routine. A bit ugly but |
315 |
|
|
* worth it to factor the common lock code across the different |
316 |
|
|
* policies for suppressing or throwing IE and indicating |
317 |
|
|
* timeouts. |
318 |
|
|
*/ |
319 |
|
|
|
320 |
|
|
/** |
321 |
|
|
* As param, suppress interrupts. |
322 |
|
|
* As return, lock was successfully obtained. |
323 |
|
|
*/ |
324 |
|
|
static final int NO_INTERRUPT = 0; |
325 |
|
|
|
326 |
|
|
/** |
327 |
|
|
* As param, allow interrupts. |
328 |
|
|
* As return, lock was not obtained because thread was interrupted. |
329 |
|
|
*/ |
330 |
|
|
static final int INTERRUPT = -1; |
331 |
|
|
|
332 |
|
|
|
333 |
|
|
/** |
334 |
|
|
* As param, use timed waits. |
335 |
|
|
* As return, lock was not obtained because timeout was exceeded. |
336 |
|
|
*/ |
337 |
|
|
static final int TIMEOUT = -2; |
338 |
|
|
|
339 |
|
|
/** |
340 |
|
|
* Main locking code, parameterized across different policies. |
341 |
|
|
* @param current current thread |
342 |
|
|
* @param node its wait-node, it is alread exists; else null in |
343 |
|
|
* which case it is created, |
344 |
|
|
* @param policy -- NO_INTERRUPT, INTERRUPT, or TIMEOUT |
345 |
|
|
* thread interrupted |
346 |
|
|
* @param nanos time to wait, or zero if untimed |
347 |
|
|
* @return NO_INTERRUPT on success, else INTERRUPT, or TIMEOUT |
348 |
|
|
*/ |
349 |
|
|
final int doLock(Thread current, |
350 |
dl |
1.3 |
ReentrantLockQueueNode node, |
351 |
dl |
1.2 |
int policy, |
352 |
|
|
long nanos) { |
353 |
|
|
|
354 |
|
|
int status = NO_INTERRUPT; |
355 |
|
|
|
356 |
|
|
/* |
357 |
|
|
* Bypass queueing if a recursive lock |
358 |
|
|
*/ |
359 |
|
|
if (owner == current) { |
360 |
|
|
++recursions; |
361 |
|
|
return status; |
362 |
|
|
} |
363 |
|
|
|
364 |
|
|
|
365 |
|
|
long lastTime = 0; // for adjusting timeouts, below |
366 |
|
|
|
367 |
|
|
/* |
368 |
dl |
1.3 |
* p is our predecessor node, that holds releaseStatus giving |
369 |
dl |
1.2 |
* permission to try to obtain lock if we are first in queue. |
370 |
|
|
*/ |
371 |
dl |
1.3 |
ReentrantLockQueueNode p; |
372 |
dl |
1.2 |
|
373 |
|
|
/* |
374 |
|
|
* Create and enqueue node if not already created. Nodes |
375 |
|
|
* transferred from condition queues will already be created |
376 |
|
|
* and queued. |
377 |
|
|
*/ |
378 |
|
|
if (node == null) { |
379 |
dl |
1.3 |
node = new ReentrantLockQueueNode(current); |
380 |
dl |
1.2 |
p = enq(node); |
381 |
|
|
} |
382 |
|
|
else |
383 |
|
|
p = node.prev; |
384 |
|
|
|
385 |
|
|
/* |
386 |
|
|
* Repeatedly try to get ownership if first in queue, else |
387 |
|
|
* block. |
388 |
|
|
*/ |
389 |
|
|
for (;;) { |
390 |
|
|
/* |
391 |
|
|
* If we are the first thread in queue, try to get the |
392 |
|
|
* lock. (We must not try to get lock if we are not |
393 |
|
|
* first.) Note that if we become first after p == head |
394 |
|
|
* check, all is well -- we can be sure an unlocking |
395 |
|
|
* thread will signal us. |
396 |
|
|
*/ |
397 |
|
|
|
398 |
|
|
if (p == head && acquireOwner(current)) { |
399 |
|
|
/* |
400 |
|
|
* status can be INTERRUPT here if policy is |
401 |
|
|
* NO_INTERRUPT but thread was interrupted while |
402 |
|
|
* waiting, in which case we must reset status upon |
403 |
|
|
* return. |
404 |
|
|
*/ |
405 |
|
|
if (status == INTERRUPT) |
406 |
|
|
current.interrupt(); |
407 |
|
|
|
408 |
|
|
p.next = null; // clear for GC and to suppress signals |
409 |
|
|
node.thread = null; |
410 |
|
|
node.prev = null; |
411 |
|
|
head = node; |
412 |
|
|
return NO_INTERRUPT; |
413 |
|
|
} |
414 |
|
|
|
415 |
dl |
1.3 |
int releaseStatus = p.releaseStatus; |
416 |
dl |
1.2 |
|
417 |
|
|
/* |
418 |
|
|
* If our predecessor was cancelled, use its predecessor. |
419 |
|
|
* There will always be a non-cancelled one somewhere |
420 |
|
|
* because head node is never cancelled, so at worst we |
421 |
|
|
* will hit it. (Note that because head is never |
422 |
|
|
* cancelled, we can perform this check after trying to |
423 |
|
|
* acquire ownership). |
424 |
|
|
*/ |
425 |
dl |
1.3 |
if (releaseStatus == CANCELLED) { |
426 |
dl |
1.2 |
node.prev = p = p.prev; |
427 |
|
|
} |
428 |
|
|
|
429 |
|
|
/* |
430 |
|
|
* Wait if we are not not first in queue, or if we are |
431 |
|
|
* first, we have tried to acquire owner and failed since |
432 |
dl |
1.3 |
* either entry or last release. (Note that releaseStatus can |
433 |
dl |
1.2 |
* already be less than zero if we spuriously returned |
434 |
|
|
* from a previous park or got new a predecessor due to |
435 |
|
|
* cancellation.) |
436 |
|
|
* |
437 |
dl |
1.3 |
* We also don't wait if atomic decrement of releaseStatus |
438 |
dl |
1.2 |
* fails. We just continue main loop on failure to |
439 |
dl |
1.3 |
* atomically update releaseStatus because interference |
440 |
dl |
1.2 |
* causing failure is almost surely due to someone |
441 |
|
|
* releasing us anyway. |
442 |
|
|
* |
443 |
|
|
* Each wait consumes all available releases. Normally |
444 |
|
|
* there is only one anyway because release doesn't bother |
445 |
|
|
* incrementing if already positive. |
446 |
|
|
* |
447 |
|
|
*/ |
448 |
dl |
1.3 |
else if (casReleaseStatus(p, releaseStatus, (releaseStatus > 0)? 0 : -1) && releaseStatus <= 0) { |
449 |
dl |
1.2 |
|
450 |
|
|
// Update and check timeout value |
451 |
|
|
if (nanos > 0) { |
452 |
|
|
long now = TimeUnit.nanoTime(); |
453 |
|
|
if (lastTime != 0) { |
454 |
|
|
nanos -= now - lastTime; |
455 |
|
|
if (nanos <= 0) |
456 |
|
|
status = TIMEOUT; |
457 |
|
|
} |
458 |
|
|
lastTime = now; |
459 |
|
|
} |
460 |
|
|
|
461 |
|
|
if (status != TIMEOUT) |
462 |
|
|
JSR166Support.park(false, nanos); |
463 |
|
|
|
464 |
|
|
if (Thread.interrupted()) |
465 |
|
|
status = INTERRUPT; |
466 |
|
|
|
467 |
|
|
if (status != NO_INTERRUPT && policy != NO_INTERRUPT) { |
468 |
|
|
node.thread = null; // disable signals |
469 |
dl |
1.3 |
releaseStatusUpdater.set(node, CANCELLED); // don't need CAS here |
470 |
dl |
1.2 |
signalSuccessor(node); |
471 |
|
|
return status; |
472 |
|
|
} |
473 |
|
|
} |
474 |
|
|
} |
475 |
|
|
} |
476 |
tim |
1.1 |
|
477 |
|
|
/** |
478 |
dl |
1.2 |
* Wake up node's successor, if one exists |
479 |
tim |
1.1 |
*/ |
480 |
dl |
1.3 |
private void signalSuccessor(ReentrantLockQueueNode node) { |
481 |
dl |
1.2 |
/* |
482 |
|
|
* Find successor -- normally just node.next. |
483 |
|
|
*/ |
484 |
dl |
1.3 |
ReentrantLockQueueNode s = node.next; |
485 |
dl |
1.2 |
|
486 |
|
|
/* |
487 |
|
|
* if s is cancelled, traverse through next's. |
488 |
|
|
*/ |
489 |
|
|
|
490 |
dl |
1.3 |
while (s != null && s.releaseStatus == CANCELLED) { |
491 |
dl |
1.2 |
node = s; |
492 |
|
|
s = s.next; |
493 |
|
|
} |
494 |
|
|
|
495 |
|
|
/* |
496 |
|
|
* If successor appears to be null, check to see if a newly |
497 |
|
|
* queued node is successor by starting at tail and working |
498 |
|
|
* backwards. If so, help out the enqueing thread by setting |
499 |
|
|
* next field. We don't expect this loop to trigger often, |
500 |
|
|
* and hardly ever to iterate. |
501 |
|
|
*/ |
502 |
|
|
|
503 |
|
|
if (s == null) { |
504 |
dl |
1.3 |
ReentrantLockQueueNode t = tail; |
505 |
dl |
1.2 |
for (;;) { |
506 |
|
|
/* |
507 |
|
|
* If t == node, there is no successor. |
508 |
|
|
*/ |
509 |
|
|
if (t == node) |
510 |
|
|
return; |
511 |
|
|
|
512 |
dl |
1.3 |
ReentrantLockQueueNode tp = t.prev; |
513 |
dl |
1.2 |
|
514 |
|
|
/* |
515 |
|
|
* t's predecessor is null if we are lagging so far |
516 |
|
|
* behind the actions of other nodes/threads that an |
517 |
|
|
* intervening head.prev was nulled by some |
518 |
|
|
* non-cancelled successor of node. In which case, |
519 |
|
|
* there's no live successor. |
520 |
|
|
*/ |
521 |
|
|
|
522 |
|
|
if (tp == null) |
523 |
|
|
return; |
524 |
|
|
|
525 |
|
|
/* |
526 |
|
|
* If we find successor, we can do the assignment to |
527 |
|
|
* next (don't even need CAS) on behalf of enqueuing |
528 |
|
|
* thread. At worst we will stall now and lag behind |
529 |
|
|
* both the setting and the later clearing of next |
530 |
|
|
* field. But if so, we will reattach an internal link |
531 |
|
|
* in soon-to-be unreachable set of nodes, so no harm |
532 |
|
|
* done. |
533 |
|
|
*/ |
534 |
|
|
|
535 |
|
|
if (tp == node) { |
536 |
|
|
node.next = s = t; |
537 |
|
|
break; |
538 |
|
|
} |
539 |
|
|
|
540 |
|
|
t = tp; |
541 |
|
|
|
542 |
|
|
/* |
543 |
|
|
* Before iterating, check to see if link has |
544 |
|
|
* appeared. |
545 |
|
|
*/ |
546 |
dl |
1.3 |
ReentrantLockQueueNode n = node.next; |
547 |
dl |
1.2 |
if (n != null) { |
548 |
|
|
s = n; |
549 |
|
|
break; |
550 |
|
|
} |
551 |
|
|
} |
552 |
|
|
} |
553 |
|
|
|
554 |
|
|
Thread thr = s.thread; |
555 |
|
|
if (thr != null && thr != owner) // don't bother signalling if has lock |
556 |
|
|
JSR166Support.unpark(thr); |
557 |
|
|
} |
558 |
|
|
|
559 |
|
|
|
560 |
|
|
/** |
561 |
dl |
1.3 |
* Increment releaseStatus and signal next thread in queue if one |
562 |
dl |
1.2 |
* exists and is waiting. Called only by unlock. This code is split |
563 |
|
|
* out from unlock to encourage inlining of non-contended cases. |
564 |
|
|
* @param h (nonnull) current head of lock queue (reread upon CAS |
565 |
|
|
* failure). |
566 |
|
|
*/ |
567 |
dl |
1.3 |
private void releaseFirst(ReentrantLockQueueNode h) { |
568 |
dl |
1.2 |
for (;;) { |
569 |
dl |
1.3 |
int c = h.releaseStatus; |
570 |
dl |
1.2 |
if (c > 0) // Don't need signal if already positive |
571 |
|
|
return; |
572 |
|
|
if (owner != null) // Don't bother if some thread got lock |
573 |
|
|
return; |
574 |
dl |
1.3 |
if (casReleaseStatus(h, c, (c < 0)? 0 : 1)) { // saturate at 1 |
575 |
dl |
1.2 |
if (c < 0) |
576 |
|
|
signalSuccessor(h); |
577 |
|
|
return; |
578 |
|
|
} |
579 |
|
|
h = head; |
580 |
|
|
if (h == tail) // No successor |
581 |
|
|
return; |
582 |
|
|
} |
583 |
|
|
} |
584 |
tim |
1.1 |
|
585 |
|
|
/** |
586 |
dl |
1.2 |
* Attempts to release this lock. <p>If the current thread is the |
587 |
|
|
* holder of this lock then the hold count is decremented. If the |
588 |
|
|
* hold count is now zero then the lock is released. <p>If the |
589 |
|
|
* current thread is not the holder of this lock then {@link |
590 |
|
|
* IllegalMonitorStateException} is thrown. |
591 |
|
|
* @throws IllegalMonitorStateException if the current thread does not |
592 |
|
|
* hold this lock. |
593 |
|
|
*/ |
594 |
|
|
public void unlock() { |
595 |
|
|
checkOwner(Thread.currentThread()); |
596 |
|
|
|
597 |
|
|
if (recursions > 0) { |
598 |
|
|
--recursions; |
599 |
|
|
return; |
600 |
|
|
} |
601 |
|
|
|
602 |
|
|
ownerUpdater.set(this, null); |
603 |
dl |
1.3 |
ReentrantLockQueueNode h = head; |
604 |
dl |
1.2 |
if (h != tail) |
605 |
|
|
releaseFirst(h); |
606 |
|
|
} |
607 |
|
|
|
608 |
|
|
/** |
609 |
|
|
* Acquire the lock. |
610 |
tim |
1.1 |
* <p>Acquires the lock if it is not held be another thread and returns |
611 |
|
|
* immediately, setting the lock hold count to one. |
612 |
|
|
* <p>If the current thread |
613 |
|
|
* already holds the lock then the hold count is incremented by one and |
614 |
|
|
* the method returns immediately. |
615 |
|
|
* <p>If the lock is held by another thread then the |
616 |
|
|
* the current thread thread becomes disabled for thread scheduling |
617 |
|
|
* purposes and lies dormant until the lock has been acquired |
618 |
|
|
* at which time the lock hold count is set to one. |
619 |
|
|
*/ |
620 |
dl |
1.2 |
public void lock() { |
621 |
|
|
Thread current = Thread.currentThread(); |
622 |
|
|
if (canBarge() && acquireOwner(current)) |
623 |
|
|
return; |
624 |
|
|
int stat = doLock(current, null, NO_INTERRUPT, 0); |
625 |
|
|
// assert stat == NO_INTERRUPT; |
626 |
|
|
} |
627 |
tim |
1.1 |
|
628 |
|
|
/** |
629 |
|
|
* Acquires the lock unless the current thread is |
630 |
|
|
* {@link Thread#interrupt interrupted}. |
631 |
|
|
* <p>Acquires the lock if it is not held by another thread and returns |
632 |
|
|
* immediately, setting the lock hold count to one. |
633 |
|
|
* <p>If the current thread already holds this lock then the hold count |
634 |
|
|
* is incremented by one and the method returns immediately. |
635 |
|
|
* <p>If the lock is held by another thread then the |
636 |
|
|
* the current thread becomes disabled for thread scheduling |
637 |
|
|
* purposes and lies dormant until one of two things happens: |
638 |
|
|
* <ul> |
639 |
|
|
* <li> The lock is acquired by the current thread; or |
640 |
|
|
* <li> Some other thread {@link Thread#interrupt interrupts} the current |
641 |
|
|
* thread. |
642 |
|
|
* </ul> |
643 |
|
|
* <p>If the lock is acquired by the current thread then the lock hold |
644 |
|
|
* count is set to one. |
645 |
|
|
* <p>If the current thread: |
646 |
|
|
* <ul> |
647 |
|
|
* <li>has its interrupted status set on entry to this method; or |
648 |
|
|
* <li>is {@link Thread#interrupt interrupted} while waiting to acquire |
649 |
|
|
* the lock, |
650 |
|
|
* </ul> |
651 |
|
|
* then {@link InterruptedException} is thrown and the current thread's |
652 |
|
|
* interrupted status is cleared. As this method is an explicit |
653 |
|
|
* interruption point, preference is given to responding to the interrupt |
654 |
|
|
* over reentrant acquisition of the lock. |
655 |
|
|
* |
656 |
dl |
1.2 |
* <h3>Implementation Notes</h3> |
657 |
tim |
1.1 |
* <p> To-BE_DONE |
658 |
|
|
* |
659 |
|
|
* @throws InterruptedException if the current thread is interrupted |
660 |
|
|
*/ |
661 |
dl |
1.2 |
public void lockInterruptibly() throws InterruptedException { |
662 |
|
|
Thread current = Thread.currentThread(); |
663 |
|
|
if (Thread.interrupted()) |
664 |
|
|
throw new InterruptedException(); |
665 |
|
|
if (canBarge() && acquireOwner(current)) |
666 |
|
|
return; |
667 |
|
|
if (doLock(current, null, INTERRUPT, 0) == INTERRUPT) |
668 |
|
|
throw new InterruptedException(); |
669 |
|
|
} |
670 |
tim |
1.1 |
|
671 |
|
|
/** |
672 |
|
|
* Acquires the lock only if it is not held by another thread at the time |
673 |
|
|
* of invocation. |
674 |
|
|
* <p>Acquires the lock if it is not held by another thread and returns |
675 |
|
|
* immediately with the value <tt>true</tt>, setting the lock hold count |
676 |
|
|
* to one. |
677 |
|
|
* <p> If the current thread |
678 |
|
|
* already holds this lock then the hold count is incremented by one and |
679 |
|
|
* the method returns <tt>true</code>. |
680 |
|
|
* <p>If the lock is held by another thread then this method will return |
681 |
|
|
* immediately with the value <tt>false</tt>. |
682 |
|
|
* |
683 |
|
|
* @return <tt>true</tt>if the lock was free and was acquired by the |
684 |
|
|
* current thread, or the lock was already held by the current thread; and |
685 |
|
|
* <tt>false</tt> otherwise. |
686 |
|
|
*/ |
687 |
|
|
public boolean tryLock() { |
688 |
dl |
1.2 |
Thread current = Thread.currentThread(); |
689 |
|
|
if (acquireOwner(current)) |
690 |
|
|
return true; |
691 |
|
|
if (owner == current) { |
692 |
|
|
++recursions; |
693 |
|
|
return true; |
694 |
|
|
} |
695 |
tim |
1.1 |
return false; |
696 |
|
|
} |
697 |
|
|
|
698 |
|
|
/** |
699 |
|
|
* |
700 |
|
|
* Acquires the lock if it is not held by another thread within the given |
701 |
|
|
* waiting time and the current thread has not been interrupted. |
702 |
|
|
* <p>Acquires the lock if it is not held by another thread and returns |
703 |
|
|
* immediately with the value <tt>true</tt>, setting the lock hold count |
704 |
|
|
* to one. |
705 |
|
|
* <p> If the current thread |
706 |
|
|
* already holds this lock then the hold count is incremented by one and |
707 |
|
|
* the method returns <tt>true</code>. |
708 |
|
|
* <p>If the lock is held by another thread then the |
709 |
|
|
* the current thread becomes disabled for thread scheduling |
710 |
|
|
* purposes and lies dormant until one of three things happens: |
711 |
|
|
* <ul> |
712 |
|
|
* <li> The lock is acquired by the current thread; or |
713 |
|
|
* <li> Some other thread {@link Thread#interrupt interrupts} the current |
714 |
|
|
* thread; or |
715 |
|
|
* <li> The specified waiting time elapses |
716 |
|
|
* </ul> |
717 |
|
|
* <p>If the lock is acquired then the value <tt>true</tt> is returned and |
718 |
|
|
* the lock hold count is set to one. |
719 |
|
|
* <p>If the current thread: |
720 |
|
|
* <ul> |
721 |
|
|
* <li>has its interrupted status set on entry to this method; or |
722 |
dl |
1.2 |
* <li>is {@link Thread#interrupt interrupted} before acquiring |
723 |
tim |
1.1 |
* the lock, |
724 |
|
|
* </ul> |
725 |
|
|
* then {@link InterruptedException} is thrown and the current thread's |
726 |
|
|
* interrupted status is cleared. As this method is an explicit |
727 |
|
|
* interruption point, preference is given to responding to the interrupt |
728 |
|
|
* over reentrant acquisition of the lock. |
729 |
|
|
* <p>If the specified waiting time elapses then the value <tt>false</tt> |
730 |
|
|
* is returned. |
731 |
|
|
* <p>The given waiting time is a best-effort lower bound. If the time is |
732 |
|
|
* less than or equal to zero, the method will not wait at all. |
733 |
|
|
* |
734 |
dl |
1.2 |
* <h3>Implementation Notes</h3> |
735 |
tim |
1.1 |
* <p> To-BE_DONE |
736 |
|
|
* |
737 |
|
|
* @return <tt>true</tt> if the lock was free and was acquired by the |
738 |
|
|
* current thread, or the lock was already held by the current thread; and |
739 |
|
|
* <tt>false</tt> if the waiting time elapsed before the lock could be |
740 |
|
|
* acquired. |
741 |
|
|
* |
742 |
|
|
* @throws InterruptedException if the current thread is interrupted |
743 |
|
|
*/ |
744 |
dl |
1.2 |
public boolean tryLock(long timeout, TimeUnit unit) throws InterruptedException { |
745 |
|
|
if (unit == null) |
746 |
|
|
throw new NullPointerException(); |
747 |
|
|
if (Thread.interrupted()) |
748 |
|
|
throw new InterruptedException(); |
749 |
|
|
Thread current = Thread.currentThread(); |
750 |
|
|
if (canBarge() && acquireOwner(current)) |
751 |
|
|
return true; |
752 |
|
|
if (owner == current) { |
753 |
|
|
++recursions; |
754 |
|
|
return true; |
755 |
|
|
} |
756 |
|
|
if (timeout <= 0) |
757 |
|
|
return false; |
758 |
|
|
int stat = doLock(current, null, TIMEOUT, unit.toNanos(timeout)); |
759 |
|
|
if (stat == NO_INTERRUPT) |
760 |
|
|
return true; |
761 |
|
|
if (stat == INTERRUPT) |
762 |
|
|
throw new InterruptedException(); |
763 |
|
|
assert stat == TIMEOUT; |
764 |
|
|
return false; |
765 |
tim |
1.1 |
} |
766 |
|
|
|
767 |
|
|
|
768 |
|
|
/** |
769 |
|
|
* Queries the number of holds on this lock by the current thread. |
770 |
|
|
* <p>A thread has a hold on a lock for each lock action that is not |
771 |
|
|
* matched by an unlock action. |
772 |
|
|
* <p>The hold count information is typically only used for testing and |
773 |
|
|
* debugging purposes. For example, if a certain section of code should |
774 |
|
|
* not be entered with the lock already held then we can assert that |
775 |
|
|
* fact: |
776 |
|
|
* <pre> |
777 |
|
|
* class X { |
778 |
|
|
* ReentrantLock lock = new ReentrantLock(); |
779 |
|
|
* // ... |
780 |
|
|
* |
781 |
|
|
* public void m() { |
782 |
|
|
* assert lock.getHoldCount() == 0; |
783 |
|
|
* lock.lock(); |
784 |
|
|
* try { |
785 |
|
|
* // ... method body |
786 |
|
|
* } finally { |
787 |
|
|
* lock.unlock() |
788 |
|
|
* } |
789 |
|
|
* } |
790 |
|
|
* } |
791 |
|
|
* </pre> |
792 |
|
|
* |
793 |
|
|
* @return the number of holds on this lock by current thread, |
794 |
|
|
* or zero if this lock is not held by current thread. |
795 |
|
|
**/ |
796 |
|
|
public int getHoldCount() { |
797 |
dl |
1.2 |
return (owner == Thread.currentThread()) ? recursions + 1 : 0; |
798 |
tim |
1.1 |
} |
799 |
|
|
|
800 |
|
|
/** |
801 |
|
|
* Queries if this lock is held by the current thread. |
802 |
|
|
* <p>Analogous to the {@link Thread#holdsLock} method for built-in |
803 |
|
|
* monitor locks, this method is typically used for debugging and |
804 |
|
|
* testing. For example, a method that should only be called while |
805 |
|
|
* a lock is held can assert that this is the case: |
806 |
|
|
* <pre> |
807 |
|
|
* class X { |
808 |
|
|
* ReentrantLock lock = new ReentrantLock(); |
809 |
|
|
* // ... |
810 |
|
|
* |
811 |
|
|
* public void m() { |
812 |
|
|
* assert lock.isHeldByCurrentThread(); |
813 |
|
|
* // ... method body |
814 |
|
|
* } |
815 |
|
|
* } |
816 |
|
|
* </pre> |
817 |
|
|
* |
818 |
|
|
* @return <tt>true</tt> if current thread holds this lock and |
819 |
|
|
* <tt>false</tt> otherwise. |
820 |
|
|
**/ |
821 |
|
|
public boolean isHeldByCurrentThread() { |
822 |
dl |
1.2 |
return (owner == Thread.currentThread()); |
823 |
|
|
} |
824 |
|
|
|
825 |
|
|
|
826 |
|
|
/** |
827 |
|
|
* Queries if this lock is held by any thread. THis method is |
828 |
|
|
* designed for use in monitoring, not for synchronization control. |
829 |
|
|
* @return <tt>true</tt> if any thread holds this lock and |
830 |
|
|
* <tt>false</tt> otherwise. |
831 |
|
|
**/ |
832 |
|
|
public boolean isLocked() { |
833 |
|
|
return owner != null; |
834 |
tim |
1.1 |
} |
835 |
|
|
|
836 |
dl |
1.3 |
/** |
837 |
|
|
* Reconstitute by resetting head and tail to point back to the lock. |
838 |
|
|
*/ |
839 |
|
|
private void readObject(java.io.ObjectInputStream s) |
840 |
|
|
throws java.io.IOException, ClassNotFoundException { |
841 |
|
|
s.defaultReadObject(); |
842 |
|
|
head = tail = this; |
843 |
|
|
} |
844 |
dl |
1.2 |
|
845 |
tim |
1.1 |
/** |
846 |
|
|
* Returns a {@link Condition} instance for use with this |
847 |
|
|
* {@link Lock} instance. |
848 |
|
|
* |
849 |
|
|
* <p>The returned {@link Condition} instance has the same behavior and |
850 |
|
|
* usage |
851 |
|
|
* restrictions with this lock as the {@link Object} monitor methods |
852 |
|
|
* ({@link Object#wait() wait}, {@link Object#notify notify}, and |
853 |
|
|
* {@link Object#notifyAll notifyAll}) have with the built-in monitor |
854 |
|
|
* lock: |
855 |
|
|
* <ul> |
856 |
|
|
* <li>If this lock is not held when any of the {@link Condition} |
857 |
|
|
* {@link Condition#await() waiting} or {@link Condition#signal signalling} |
858 |
|
|
* methods are called, then an {@link IllegalMonitorStateException} is |
859 |
|
|
* thrown. |
860 |
|
|
* <li>When the condition {@link Condition#await() waiting} methods are |
861 |
|
|
* called the lock is released and before they return the lock is |
862 |
|
|
* reacquired and the lock hold count restored to what it was when the |
863 |
|
|
* method was called. |
864 |
|
|
* <li>If a thread is {@link Thread#interrupt interrupted} while waiting |
865 |
|
|
* then the wait will terminate, an {@link InterruptedException} will be |
866 |
|
|
* thrown, and the thread's interrupted status will be cleared. |
867 |
|
|
* <li>The order in which waiting threads are signalled is not specified. |
868 |
|
|
* <li>The order in which threads returning from a wait, and threads trying |
869 |
|
|
* to acquire the lock, are granted the lock, is not specified. |
870 |
|
|
* </ul> |
871 |
|
|
*/ |
872 |
|
|
public Condition newCondition() { |
873 |
dl |
1.2 |
return new ReentrantLockConditionObject(); |
874 |
|
|
} |
875 |
|
|
|
876 |
|
|
// Helper methods for Conditions |
877 |
|
|
|
878 |
|
|
/** |
879 |
|
|
* Return true if a node, always one that was initially placed on |
880 |
|
|
* a condition queue, is off the condition queue (and thus, |
881 |
|
|
* normally is now on lock queue.) |
882 |
|
|
*/ |
883 |
dl |
1.3 |
boolean isOffConditionQueue(ReentrantLockQueueNode w) { |
884 |
|
|
return w.releaseStatus > TRANSFERRING; |
885 |
dl |
1.2 |
} |
886 |
|
|
|
887 |
|
|
/** |
888 |
|
|
* Transfer a node from condition queue onto lock queue. |
889 |
|
|
* Return true if successful (i.e., node not cancelled) |
890 |
|
|
*/ |
891 |
dl |
1.3 |
final boolean transferToLockQueue(ReentrantLockQueueNode node) { |
892 |
dl |
1.2 |
/* |
893 |
|
|
* Atomically change status to TRANSFERRING to avoid races |
894 |
|
|
* with cancelling waiters. We use a special value that causes |
895 |
|
|
* any waiters spuriously waking up to re-park until the node |
896 |
|
|
* has been placed on lock queue. |
897 |
|
|
*/ |
898 |
dl |
1.3 |
if (!casReleaseStatus(node, ON_CONDITION_QUEUE, TRANSFERRING)) |
899 |
dl |
1.2 |
return false; |
900 |
|
|
|
901 |
|
|
/* |
902 |
|
|
* Splice onto queue |
903 |
|
|
*/ |
904 |
dl |
1.3 |
ReentrantLockQueueNode p = enq(node); |
905 |
dl |
1.2 |
|
906 |
|
|
/* |
907 |
dl |
1.3 |
* Establish normal lock-queue releaseStatus for node. The |
908 |
dl |
1.2 |
* CAS can fail if node already was involved in a cancellation |
909 |
|
|
* on lock-queue, in which case we signal to be sure. |
910 |
|
|
*/ |
911 |
dl |
1.3 |
if (!casReleaseStatus(node, TRANSFERRING, 0)) |
912 |
dl |
1.2 |
signalSuccessor(node); |
913 |
|
|
|
914 |
|
|
/* |
915 |
dl |
1.3 |
* Ensure releaseStatus of predecessor is negative to indicate |
916 |
|
|
* that thread is (probably) waiting. If attempt to set releaseStatus |
917 |
dl |
1.2 |
* fails or is pred is/becomes cancelled, wake up successor |
918 |
|
|
* (which will ordinarily be "node") to resynch. |
919 |
|
|
*/ |
920 |
|
|
|
921 |
|
|
for (;;) { |
922 |
dl |
1.3 |
int c = p.releaseStatus; |
923 |
|
|
if (c < 0 || (c != CANCELLED && casReleaseStatus(p, c, -1))) |
924 |
dl |
1.2 |
break; |
925 |
|
|
signalSuccessor(p); |
926 |
|
|
if (c == CANCELLED) |
927 |
|
|
break; |
928 |
|
|
} |
929 |
|
|
|
930 |
|
|
return true; |
931 |
tim |
1.1 |
} |
932 |
|
|
|
933 |
dl |
1.2 |
/** |
934 |
|
|
* Hook method used by ReentrantReadWriteLock. Called |
935 |
|
|
* before unlocking lock to enter wait. |
936 |
|
|
*/ |
937 |
|
|
void beforeWait() { } |
938 |
|
|
|
939 |
|
|
|
940 |
|
|
/** |
941 |
|
|
* Hook method used by ReentrantReadWriteLock. Called |
942 |
|
|
* after locking lock after exiting wait. |
943 |
|
|
*/ |
944 |
|
|
void afterWait() { } |
945 |
|
|
|
946 |
|
|
private class ReentrantLockConditionObject implements Condition, java.io.Serializable { |
947 |
|
|
/* |
948 |
|
|
* Because condition queues are accessed only when locks are |
949 |
|
|
* already held, we just need a simple linked queue to hold |
950 |
|
|
* nodes while they are waiting on conditions. They are then |
951 |
|
|
* transferred to the lock queue to re-acquire locks. |
952 |
|
|
*/ |
953 |
|
|
|
954 |
|
|
/** |
955 |
|
|
* First node of condition queue. |
956 |
|
|
*/ |
957 |
dl |
1.3 |
private transient ReentrantLockQueueNode firstWaiter; |
958 |
dl |
1.2 |
|
959 |
|
|
/** |
960 |
|
|
* Last node of condition queue. |
961 |
|
|
*/ |
962 |
dl |
1.3 |
private transient ReentrantLockQueueNode lastWaiter; |
963 |
dl |
1.2 |
|
964 |
|
|
/** |
965 |
|
|
* Basic linked queue insertion. |
966 |
|
|
*/ |
967 |
dl |
1.3 |
private ReentrantLockQueueNode addWaiter(Thread current) { |
968 |
|
|
ReentrantLockQueueNode w = new ReentrantLockQueueNode(current); |
969 |
|
|
w.releaseStatus = ON_CONDITION_QUEUE; |
970 |
dl |
1.2 |
if (lastWaiter == null) |
971 |
|
|
firstWaiter = lastWaiter = w; |
972 |
|
|
else { |
973 |
dl |
1.3 |
ReentrantLockQueueNode t = lastWaiter; |
974 |
dl |
1.2 |
lastWaiter = w; |
975 |
|
|
t.next = w; |
976 |
|
|
} |
977 |
|
|
return w; |
978 |
|
|
} |
979 |
|
|
|
980 |
|
|
/** |
981 |
|
|
* Main code for signal. Dequeue and transfer nodes until hit |
982 |
|
|
* non-cancelled one or null. Split out from signal to |
983 |
|
|
* encourage compilers to inline the case of no waiters. |
984 |
|
|
*/ |
985 |
dl |
1.3 |
private void doSignal(ReentrantLockQueueNode first) { |
986 |
dl |
1.2 |
do { |
987 |
|
|
if ( (firstWaiter = first.next) == null) |
988 |
|
|
lastWaiter = null; |
989 |
|
|
first.next = null; |
990 |
|
|
if (transferToLockQueue(first)) |
991 |
|
|
return; |
992 |
|
|
first = firstWaiter; |
993 |
|
|
} while (first != null); |
994 |
|
|
} |
995 |
|
|
|
996 |
|
|
public void signal() { |
997 |
|
|
checkOwner(Thread.currentThread()); |
998 |
dl |
1.3 |
ReentrantLockQueueNode w = firstWaiter; |
999 |
dl |
1.2 |
if (w != null) |
1000 |
|
|
doSignal(w); |
1001 |
|
|
} |
1002 |
|
|
|
1003 |
|
|
public void signalAll() { |
1004 |
|
|
checkOwner(Thread.currentThread()); |
1005 |
|
|
// Pull off list all at once and traverse. |
1006 |
dl |
1.3 |
ReentrantLockQueueNode w = firstWaiter; |
1007 |
dl |
1.2 |
if (w != null) { |
1008 |
|
|
lastWaiter = firstWaiter = null; |
1009 |
|
|
do { |
1010 |
dl |
1.3 |
ReentrantLockQueueNode n = w.next; |
1011 |
dl |
1.2 |
w.next = null; |
1012 |
|
|
transferToLockQueue(w); |
1013 |
|
|
w = n; |
1014 |
|
|
} while (w != null); |
1015 |
|
|
} |
1016 |
|
|
} |
1017 |
|
|
|
1018 |
|
|
/* |
1019 |
|
|
* Various flavors of wait. Each almost the same, but |
1020 |
|
|
* annoyingly different and no nice way to factor common code. |
1021 |
|
|
*/ |
1022 |
|
|
|
1023 |
|
|
public void await() throws InterruptedException { |
1024 |
|
|
Thread current = Thread.currentThread(); |
1025 |
|
|
checkOwner(current); |
1026 |
|
|
|
1027 |
dl |
1.3 |
ReentrantLockQueueNode w = addWaiter(current); |
1028 |
dl |
1.2 |
beforeWait(); |
1029 |
|
|
int recs = recursions; |
1030 |
|
|
unlock(); |
1031 |
|
|
|
1032 |
|
|
boolean wasInterrupted = false; |
1033 |
|
|
|
1034 |
|
|
while (!isOffConditionQueue(w)) { |
1035 |
|
|
JSR166Support.park(false, 0); |
1036 |
|
|
if (Thread.interrupted()) { |
1037 |
|
|
wasInterrupted = true; |
1038 |
dl |
1.3 |
if (casReleaseStatus(w, ON_CONDITION_QUEUE, CANCELLED)) { |
1039 |
dl |
1.2 |
w.thread = null; |
1040 |
|
|
w = null; |
1041 |
|
|
} |
1042 |
|
|
break; |
1043 |
|
|
} |
1044 |
|
|
} |
1045 |
|
|
|
1046 |
|
|
/* |
1047 |
|
|
* If we exited above loop due to cancellation, then w is |
1048 |
|
|
* null, and doLock will make a new lock node for |
1049 |
|
|
* us. Otherwise, upon exit, our node is already in the |
1050 |
|
|
* lock queue when doLock is called. |
1051 |
|
|
*/ |
1052 |
|
|
doLock(current, w, NO_INTERRUPT, 0); |
1053 |
|
|
|
1054 |
|
|
recursions = recs; |
1055 |
|
|
afterWait(); |
1056 |
|
|
|
1057 |
|
|
if (wasInterrupted) |
1058 |
|
|
throw new InterruptedException(); |
1059 |
|
|
} |
1060 |
|
|
|
1061 |
|
|
public void awaitUninterruptibly() { |
1062 |
|
|
Thread current = Thread.currentThread(); |
1063 |
|
|
checkOwner(current); |
1064 |
|
|
|
1065 |
dl |
1.3 |
ReentrantLockQueueNode w = addWaiter(current); |
1066 |
dl |
1.2 |
beforeWait(); |
1067 |
|
|
int recs = recursions; |
1068 |
|
|
unlock(); |
1069 |
|
|
|
1070 |
|
|
boolean wasInterrupted = false; |
1071 |
|
|
while (!isOffConditionQueue(w)) { |
1072 |
|
|
JSR166Support.park(false, 0); |
1073 |
|
|
if (Thread.interrupted()) |
1074 |
|
|
wasInterrupted = true; |
1075 |
|
|
} |
1076 |
|
|
|
1077 |
|
|
doLock(current, w, NO_INTERRUPT, 0); |
1078 |
|
|
recursions = recs; |
1079 |
|
|
afterWait(); |
1080 |
|
|
// avoid re-interrupts on exit |
1081 |
|
|
if (wasInterrupted && !current.isInterrupted()) |
1082 |
|
|
current.interrupt(); |
1083 |
|
|
} |
1084 |
|
|
|
1085 |
|
|
|
1086 |
|
|
public long awaitNanos(long nanos) throws InterruptedException { |
1087 |
|
|
Thread current = Thread.currentThread(); |
1088 |
|
|
checkOwner(current); |
1089 |
|
|
|
1090 |
dl |
1.3 |
ReentrantLockQueueNode w = addWaiter(current); |
1091 |
dl |
1.2 |
beforeWait(); |
1092 |
|
|
int recs = recursions; |
1093 |
|
|
unlock(); |
1094 |
|
|
|
1095 |
|
|
if (nanos <= 0) nanos = 1; // park arg must be positive |
1096 |
|
|
long timeLeft = nanos; |
1097 |
|
|
long startTime = TimeUnit.nanoTime(); |
1098 |
|
|
boolean wasInterrupted = false; |
1099 |
|
|
boolean cancelled = false; |
1100 |
|
|
|
1101 |
|
|
if (!isOffConditionQueue(w)) { |
1102 |
|
|
for (;;) { |
1103 |
|
|
JSR166Support.park(false, timeLeft); |
1104 |
|
|
if (Thread.interrupted()) |
1105 |
|
|
wasInterrupted = true; |
1106 |
|
|
else if (isOffConditionQueue(w)) |
1107 |
|
|
break; |
1108 |
|
|
else |
1109 |
|
|
timeLeft = nanos - (TimeUnit.nanoTime() - startTime); |
1110 |
|
|
|
1111 |
|
|
if (wasInterrupted || timeLeft <= 0) { |
1112 |
dl |
1.3 |
if (casReleaseStatus(w, ON_CONDITION_QUEUE, CANCELLED)) { |
1113 |
dl |
1.2 |
w.thread = null; |
1114 |
|
|
w = null; |
1115 |
|
|
} |
1116 |
|
|
break; |
1117 |
|
|
} |
1118 |
|
|
} |
1119 |
|
|
} |
1120 |
|
|
|
1121 |
|
|
doLock(current, w, NO_INTERRUPT, 0); |
1122 |
|
|
recursions = recs; |
1123 |
|
|
afterWait(); |
1124 |
|
|
|
1125 |
|
|
if (wasInterrupted) |
1126 |
|
|
throw new InterruptedException(); |
1127 |
|
|
else if (timeLeft <= 0) |
1128 |
|
|
return timeLeft; |
1129 |
|
|
else |
1130 |
|
|
return nanos - (TimeUnit.nanoTime() - startTime); |
1131 |
|
|
} |
1132 |
|
|
|
1133 |
|
|
public boolean awaitUntil(Date deadline) throws InterruptedException { |
1134 |
|
|
Thread current = Thread.currentThread(); |
1135 |
|
|
checkOwner(current); |
1136 |
|
|
|
1137 |
dl |
1.3 |
ReentrantLockQueueNode w = addWaiter(current); |
1138 |
dl |
1.2 |
beforeWait(); |
1139 |
|
|
int recs = recursions; |
1140 |
|
|
unlock(); |
1141 |
|
|
|
1142 |
|
|
boolean wasInterrupted = false; |
1143 |
|
|
boolean cancelled = false; |
1144 |
|
|
long abstime = deadline.getTime(); |
1145 |
|
|
|
1146 |
|
|
if (!isOffConditionQueue(w)) { |
1147 |
|
|
for (;;) { |
1148 |
|
|
JSR166Support.park(true, abstime); |
1149 |
|
|
|
1150 |
|
|
boolean timedOut = false; |
1151 |
|
|
if (Thread.interrupted()) |
1152 |
|
|
wasInterrupted = true; |
1153 |
|
|
else if (isOffConditionQueue(w)) |
1154 |
|
|
break; |
1155 |
|
|
else if (System.currentTimeMillis() <= abstime) |
1156 |
|
|
timedOut = true; |
1157 |
|
|
|
1158 |
|
|
if (wasInterrupted || timedOut) { |
1159 |
dl |
1.3 |
if (casReleaseStatus(w, ON_CONDITION_QUEUE, CANCELLED)) { |
1160 |
dl |
1.2 |
w.thread = null; |
1161 |
|
|
w = null; |
1162 |
|
|
} |
1163 |
|
|
break; |
1164 |
|
|
} |
1165 |
|
|
} |
1166 |
|
|
} |
1167 |
tim |
1.1 |
|
1168 |
dl |
1.2 |
doLock(current, w, NO_INTERRUPT, 0); |
1169 |
|
|
recursions = recs; |
1170 |
|
|
afterWait(); |
1171 |
tim |
1.1 |
|
1172 |
dl |
1.2 |
if (wasInterrupted) |
1173 |
|
|
throw new InterruptedException(); |
1174 |
|
|
return !cancelled; |
1175 |
|
|
} |
1176 |
tim |
1.1 |
|
1177 |
dl |
1.2 |
public boolean await(long time, TimeUnit unit) throws InterruptedException { |
1178 |
|
|
return awaitNanos(unit.toNanos(time)) > 0; |
1179 |
|
|
} |
1180 |
tim |
1.1 |
|
1181 |
dl |
1.2 |
} |
1182 |
|
|
|
1183 |
dl |
1.3 |
} |
1184 |
|
|
|
1185 |
|
|
/** |
1186 |
|
|
* Node class for threads waiting for locks. This cannot be nested |
1187 |
|
|
* inside ReentrantLock because of Java inheritance circularity rules. |
1188 |
|
|
*/ |
1189 |
|
|
class ReentrantLockQueueNode { |
1190 |
|
|
/** |
1191 |
|
|
* Controls whether successor node is allowed to try to obtain |
1192 |
|
|
* ownership. Acts as a saturating (in both directions) counting |
1193 |
|
|
* semaphore: Upon each wait, the releaseStatus is reduced to zero |
1194 |
|
|
* if positive, else reduced to negative, in which case the thread |
1195 |
|
|
* will park. The releaseStatus is incremented on each unlock that |
1196 |
|
|
* would enable successor thread to obtain lock (succeeding if |
1197 |
|
|
* there is no contention). The special value of CANCELLED is used |
1198 |
|
|
* to mean that the releaseStatus cannot be either incremented or |
1199 |
|
|
* decremented. The special value of ON_CONDITION_QUEUE is used |
1200 |
|
|
* when nodes are on conditions queues instead of lock queue, and |
1201 |
|
|
* the special value TRANSFERRING is used while signals are in |
1202 |
|
|
* progress. |
1203 |
|
|
*/ |
1204 |
|
|
transient volatile int releaseStatus; |
1205 |
|
|
|
1206 |
|
|
/** |
1207 |
|
|
* Link to predecessor node that current node/thread relies on |
1208 |
|
|
* for checking releaseStatus. Assigned once during enqueing, |
1209 |
|
|
* and nulled out (for sake of GC) only upon dequeuing. Upon |
1210 |
|
|
* cancellation, we do NOT adjust this field, but simply |
1211 |
|
|
* traverse through prev's until we hit a non-cancelled node. |
1212 |
|
|
* A valid predecessor will always exist because the head node |
1213 |
|
|
* is never cancelled. |
1214 |
|
|
*/ |
1215 |
|
|
transient volatile ReentrantLockQueueNode prev; |
1216 |
|
|
|
1217 |
|
|
/** |
1218 |
|
|
* Link to the successor node that the current node/thread |
1219 |
|
|
* unparks upon lock release. Assigned once during enquing. |
1220 |
|
|
* Upon cancellation, we do NOT adjust this field, but simply |
1221 |
|
|
* traverse through next's until we hit a non-cancelled node, |
1222 |
|
|
* (or null if at end of queue). The enqueue operation does |
1223 |
|
|
* not assign next field of a predecessor until after |
1224 |
|
|
* attachment, so seeing a null next field not necessarily |
1225 |
|
|
* mean that node is at end of queue. However, if a next field |
1226 |
|
|
* appears to be null, we can scan prev's from the tail to |
1227 |
|
|
* double-check. |
1228 |
|
|
*/ |
1229 |
|
|
transient volatile ReentrantLockQueueNode next; |
1230 |
|
|
|
1231 |
|
|
/** |
1232 |
|
|
* The thread that enqueued this node. Initialized on |
1233 |
|
|
* construction and nulled out after use. Note that this need |
1234 |
|
|
* not be declared volatile since it is always accessed after |
1235 |
|
|
* traversing volatile links, and written before writing |
1236 |
|
|
* links. |
1237 |
|
|
*/ |
1238 |
|
|
transient Thread thread; |
1239 |
|
|
|
1240 |
|
|
ReentrantLockQueueNode() { } |
1241 |
|
|
ReentrantLockQueueNode(Thread t) { thread = t; } |
1242 |
dl |
1.2 |
} |