1 |
/* |
2 |
* Written by Doug Lea with assistance from members of JCP JSR-166 |
3 |
* Expert Group and released to the public domain, as explained at |
4 |
* http://creativecommons.org/licenses/publicdomain |
5 |
*/ |
6 |
// Adapted from code that was in turn |
7 |
// Derived from SocketPerformanceTest.java - BugID: 4763450 |
8 |
// |
9 |
// |
10 |
|
11 |
import java.io.*; |
12 |
import java.net.*; |
13 |
import java.util.concurrent.*; |
14 |
import java.util.concurrent.locks.*; |
15 |
|
16 |
public class RLIBar { |
17 |
|
18 |
static int batchLimit ; |
19 |
static int mseq ; |
20 |
static int nReady ; |
21 |
static int ExThreads ; |
22 |
static int ASum ; |
23 |
static final ReentrantLock Gate = new ReentrantLock () ; |
24 |
static final Condition GateCond = Gate.newCondition () ; |
25 |
|
26 |
static final ReentrantLock HoldQ = new ReentrantLock () ; |
27 |
static final Condition HoldQCond = HoldQ.newCondition() ; |
28 |
static boolean Hold = false ; |
29 |
static int HoldPop ; |
30 |
static int HoldLimit ; |
31 |
|
32 |
static private boolean HoldCheck () { |
33 |
try { |
34 |
HoldQ.lock(); |
35 |
try { |
36 |
if (!Hold) return false; |
37 |
else { |
38 |
++HoldPop ; |
39 |
if (HoldPop >= HoldLimit) { |
40 |
System.out.print ("Holding ") ; |
41 |
Thread.sleep (1000) ; |
42 |
System.out.println () ; |
43 |
Hold = false ; |
44 |
HoldQCond.signalAll () ; |
45 |
} |
46 |
else |
47 |
while (Hold) |
48 |
HoldQCond.await() ; |
49 |
|
50 |
if (--HoldPop == 0) HoldQCond.signalAll () ; |
51 |
return true; |
52 |
} |
53 |
} |
54 |
finally { |
55 |
HoldQ.unlock(); |
56 |
} |
57 |
} catch (Exception Ex) { |
58 |
System.out.println ("Unexpected exception in Hold: " + Ex) ; |
59 |
return false; |
60 |
} |
61 |
} |
62 |
|
63 |
private static class Server { |
64 |
private int nClients; |
65 |
final ReentrantLock thisLock = new ReentrantLock(); |
66 |
final Condition thisCond = thisLock.newCondition(); |
67 |
|
68 |
Server (int nClients) { |
69 |
this.nClients = nClients; |
70 |
try { |
71 |
for (int i = 0; i < nClients; ++i) { |
72 |
final int fix = i ; |
73 |
new Thread() { public void run () { runServer(fix); }}.start(); |
74 |
} |
75 |
} catch (Exception e) { |
76 |
System.err.println(e) ; |
77 |
} |
78 |
} |
79 |
|
80 |
// the total number of messages received by all server threads |
81 |
// on this server |
82 |
int msgsReceived = 0; |
83 |
|
84 |
// incremented each time we get a complete batch of requests |
85 |
private int currentBatch = 0; |
86 |
|
87 |
// the number of requests received since the last time currentBatch |
88 |
// was incremented |
89 |
private int currentBatchSize = 0; |
90 |
|
91 |
private void runServer (int id) { |
92 |
int msg ; |
93 |
boolean held = false; |
94 |
final ReentrantLock thisLock = this.thisLock; |
95 |
final Condition thisCond = this.thisCond; |
96 |
|
97 |
try { |
98 |
|
99 |
// Startup barrier - rendezvous - wait for all threads. |
100 |
// Forces all threads to park on their LWPs, ensuring |
101 |
// proper provisioning on T1. |
102 |
// Alternately, use THR_BOUND threads |
103 |
Gate.lock(); try { |
104 |
++nReady ; |
105 |
if (nReady == ExThreads ) { |
106 |
GateCond.signalAll () ; |
107 |
} |
108 |
while (nReady != ExThreads ) |
109 |
GateCond.await() ; |
110 |
} finally { Gate.unlock(); } |
111 |
|
112 |
for (;;) { |
113 |
// if (!held && currentBatchSize == 0) held = HoldCheck () ; |
114 |
msg = (++ mseq) ^ id ; |
115 |
thisLock.lock(); |
116 |
try { |
117 |
ASum += msg ; |
118 |
++msgsReceived; |
119 |
int myBatch = currentBatch; |
120 |
if (++currentBatchSize >= batchLimit) { |
121 |
// this batch is full, start a new one ... |
122 |
++currentBatch; |
123 |
currentBatchSize = 0; |
124 |
// and wake up everyone in this one |
125 |
thisCond.signalAll () ; |
126 |
} |
127 |
// Wait until our batch is complete |
128 |
while (myBatch == currentBatch) |
129 |
thisCond.await(); |
130 |
} |
131 |
finally { |
132 |
thisLock.unlock(); |
133 |
} |
134 |
} |
135 |
} catch (Exception e) { |
136 |
System.err.println("Server thread: exception " + e) ; |
137 |
e.printStackTrace(); |
138 |
} |
139 |
} |
140 |
|
141 |
|
142 |
} |
143 |
|
144 |
public static void main (String[] args) throws Exception { |
145 |
int nServers = 10 ; |
146 |
int nClients = 10 ; |
147 |
int samplePeriod = 10000; |
148 |
int nSamples = 5; |
149 |
|
150 |
int nextArg = 0; |
151 |
while (nextArg < args.length) { |
152 |
String arg = args[nextArg++]; |
153 |
if (arg.equals("-nc")) |
154 |
nClients = Integer.parseInt(args[nextArg++]); |
155 |
else if (arg.equals("-ns")) |
156 |
nServers = Integer.parseInt(args[nextArg++]); |
157 |
else if (arg.equals("-batch")) |
158 |
batchLimit = Integer.parseInt(args[nextArg++]); |
159 |
else if (arg.equals("-sample")) |
160 |
samplePeriod = Integer.parseInt(args[nextArg++]); |
161 |
else if (arg.equals("-np")) |
162 |
nSamples = Integer.parseInt(args[nextArg++]); |
163 |
else { |
164 |
System.err.println ("Argument error:" + arg) ; |
165 |
System.exit (1) ; |
166 |
} |
167 |
} |
168 |
if (nClients <= 0 || nServers <= 0 || samplePeriod <= 0 || batchLimit > nClients) { |
169 |
System.err.println ("Argument error") ; |
170 |
System.exit (1) ; |
171 |
} |
172 |
|
173 |
// default batch size is 2/3 the number of clients |
174 |
// (for no particular reason) |
175 |
if (false && batchLimit <= 0) |
176 |
batchLimit = (2 * nClients + 1) / 3; |
177 |
|
178 |
ExThreads = nServers * nClients ; // expected # of threads |
179 |
HoldLimit = ExThreads ; |
180 |
|
181 |
// start up all threads |
182 |
Server[] servers = new Server[nServers]; |
183 |
for (int i = 0; i < nServers; ++i) { |
184 |
servers[i] = new Server(nClients); |
185 |
} |
186 |
|
187 |
// Wait for consensus |
188 |
try { |
189 |
Gate.lock(); try { |
190 |
while (nReady != ExThreads ) GateCond.await() ; |
191 |
} finally { Gate.unlock(); } |
192 |
} catch (Exception ex) { |
193 |
System.out.println (ex); |
194 |
} |
195 |
System.out.println ( |
196 |
nReady + " Ready: nc=" + nClients + " ns=" + nServers + " batch=" + batchLimit) ; |
197 |
|
198 |
// Start sampling ... |
199 |
// Methodological problem: all the mutator threads |
200 |
// can starve the compiler threads, resulting in skewed scores. |
201 |
// In theory, over time, the scores will improve as the compiler |
202 |
// threads are granted CPU cycles, but in practice a "warm up" phase |
203 |
// might be good idea to help C2. For this reason I've implemented |
204 |
// the "Hold" facility. |
205 |
|
206 |
long lastNumMsgs = 0; |
207 |
long sampleStart = System.currentTimeMillis(); |
208 |
for (int j = 0; j < nSamples; ++j) { |
209 |
// when this sample period is supposed to end |
210 |
long sampleEnd = sampleStart + samplePeriod; |
211 |
for (;;) { |
212 |
long now = System.currentTimeMillis(); |
213 |
if (now >= sampleEnd) { |
214 |
// when it really did end |
215 |
sampleEnd = now; |
216 |
break; |
217 |
} |
218 |
Thread.sleep(sampleEnd - now); |
219 |
} |
220 |
|
221 |
if (false && j == 2) { |
222 |
System.out.print ("Hold activated ...") ; |
223 |
HoldQ.lock(); |
224 |
try { |
225 |
Hold = true ; |
226 |
while (Hold) HoldQCond.await() ; |
227 |
} |
228 |
finally { |
229 |
HoldQ.unlock(); |
230 |
} |
231 |
} |
232 |
|
233 |
|
234 |
|
235 |
// there's no synchronization here, so the total i get is |
236 |
// approximate, but that's OK since any i miss for this |
237 |
// sample will get credited to the next sample, and on average |
238 |
// we'll be right |
239 |
long numMsgs = 0; |
240 |
for (int i = 0; i < nServers; ++i) |
241 |
numMsgs += servers[i].msgsReceived; |
242 |
long deltaMsgs = numMsgs - lastNumMsgs; |
243 |
long deltaT = sampleEnd - sampleStart; |
244 |
if (true || j != 2) { // Don't report results if we issued a hold ... |
245 |
System.out.print( |
246 |
"Sample period = " + deltaT + " ms; " |
247 |
+ "New msgs rcvd = " + deltaMsgs + "; " |
248 |
+ "Throughput = " + (deltaMsgs*1000 / deltaT) + " msg/sec\n"); |
249 |
// for (int i = 0; i < nServers; ++i) |
250 |
// servers[i].thisLock.dump(); |
251 |
} |
252 |
sampleStart = sampleEnd; |
253 |
lastNumMsgs = numMsgs; |
254 |
} |
255 |
System.exit(0); |
256 |
} |
257 |
} |
258 |
|
259 |
|