1 |
// Adapted from code that was in turn |
2 |
// Derived from SocketPerformanceTest.java - BugID: 4763450 |
3 |
// |
4 |
// |
5 |
|
6 |
import java.io.*; |
7 |
import java.net.*; |
8 |
import java.util.concurrent.*; |
9 |
import java.util.concurrent.locks.*; |
10 |
|
11 |
public class RLIBar { |
12 |
|
13 |
static int batchLimit ; |
14 |
static int mseq ; |
15 |
static int nReady ; |
16 |
static int ExThreads ; |
17 |
static int ASum ; |
18 |
static final ReentrantLock Gate = new ReentrantLock () ; |
19 |
static final Condition GateCond = Gate.newCondition () ; |
20 |
|
21 |
static final ReentrantLock HoldQ = new ReentrantLock () ; |
22 |
static final Condition HoldQCond = HoldQ.newCondition() ; |
23 |
static boolean Hold = false ; |
24 |
static int HoldPop ; |
25 |
static int HoldLimit ; |
26 |
|
27 |
static private boolean HoldCheck () { |
28 |
try { |
29 |
HoldQ.lock(); |
30 |
try { |
31 |
if (!Hold) return false; |
32 |
else { |
33 |
++HoldPop ; |
34 |
if (HoldPop >= HoldLimit) { |
35 |
System.out.print ("Holding ") ; |
36 |
Thread.sleep (1000) ; |
37 |
System.out.println () ; |
38 |
Hold = false ; |
39 |
HoldQCond.signalAll () ; |
40 |
} |
41 |
else |
42 |
while (Hold) |
43 |
HoldQCond.await() ; |
44 |
|
45 |
if (--HoldPop == 0) HoldQCond.signalAll () ; |
46 |
return true; |
47 |
} |
48 |
} |
49 |
finally { |
50 |
HoldQ.unlock(); |
51 |
} |
52 |
} catch (Exception Ex) { |
53 |
System.out.println ("Unexpected exception in Hold: " + Ex) ; |
54 |
return false; |
55 |
} |
56 |
} |
57 |
|
58 |
private static class Server { |
59 |
private int nClients; |
60 |
final ReentrantLock thisLock = new ReentrantLock(); |
61 |
final Condition thisCond = thisLock.newCondition(); |
62 |
|
63 |
Server (int nClients) { |
64 |
this.nClients = nClients; |
65 |
try { |
66 |
for (int i = 0; i < nClients; ++i) { |
67 |
final int fix = i ; |
68 |
new Thread() { public void run () { runServer(fix); }}.start(); |
69 |
} |
70 |
} catch (Exception e) { |
71 |
System.err.println(e) ; |
72 |
} |
73 |
} |
74 |
|
75 |
// the total number of messages received by all server threads |
76 |
// on this server |
77 |
int msgsReceived = 0; |
78 |
|
79 |
// incremented each time we get a complete batch of requests |
80 |
private int currentBatch = 0; |
81 |
|
82 |
// the number of requests received since the last time currentBatch |
83 |
// was incremented |
84 |
private int currentBatchSize = 0; |
85 |
|
86 |
private void runServer (int id) { |
87 |
int msg ; |
88 |
boolean held = false; |
89 |
final ReentrantLock thisLock = this.thisLock; |
90 |
final Condition thisCond = this.thisCond; |
91 |
|
92 |
try { |
93 |
|
94 |
// Startup barrier - rendezvous - wait for all threads. |
95 |
// Forces all threads to park on their LWPs, ensuring |
96 |
// proper provisioning on T1. |
97 |
// Alternately, use THR_BOUND threads |
98 |
Gate.lock(); try { |
99 |
++nReady ; |
100 |
if (nReady == ExThreads ) { |
101 |
GateCond.signalAll () ; |
102 |
} |
103 |
while (nReady != ExThreads ) |
104 |
GateCond.await() ; |
105 |
} finally { Gate.unlock(); } |
106 |
|
107 |
for (;;) { |
108 |
// if (!held && currentBatchSize == 0) held = HoldCheck () ; |
109 |
msg = (++ mseq) ^ id ; |
110 |
thisLock.lock(); |
111 |
try { |
112 |
ASum += msg ; |
113 |
++msgsReceived; |
114 |
int myBatch = currentBatch; |
115 |
if (++currentBatchSize >= batchLimit) { |
116 |
// this batch is full, start a new one ... |
117 |
++currentBatch; |
118 |
currentBatchSize = 0; |
119 |
// and wake up everyone in this one |
120 |
thisCond.signalAll () ; |
121 |
} |
122 |
// Wait until our batch is complete |
123 |
while (myBatch == currentBatch) |
124 |
thisCond.await(); |
125 |
} |
126 |
finally { |
127 |
thisLock.unlock(); |
128 |
} |
129 |
} |
130 |
} catch (Exception e) { |
131 |
System.err.println("Server thread: exception " + e) ; |
132 |
e.printStackTrace(); |
133 |
} |
134 |
} |
135 |
|
136 |
|
137 |
} |
138 |
|
139 |
public static void main (String[] args) throws Exception { |
140 |
int nServers = 10 ; |
141 |
int nClients = 10 ; |
142 |
int samplePeriod = 10000; |
143 |
int nSamples = 5; |
144 |
|
145 |
int nextArg = 0; |
146 |
while (nextArg < args.length) { |
147 |
String arg = args[nextArg++]; |
148 |
if (arg.equals("-nc")) |
149 |
nClients = Integer.parseInt(args[nextArg++]); |
150 |
else if (arg.equals("-ns")) |
151 |
nServers = Integer.parseInt(args[nextArg++]); |
152 |
else if (arg.equals("-batch")) |
153 |
batchLimit = Integer.parseInt(args[nextArg++]); |
154 |
else if (arg.equals("-sample")) |
155 |
samplePeriod = Integer.parseInt(args[nextArg++]); |
156 |
else if (arg.equals("-np")) |
157 |
nSamples = Integer.parseInt(args[nextArg++]); |
158 |
else { |
159 |
System.err.println ("Argument error:" + arg) ; |
160 |
System.exit (1) ; |
161 |
} |
162 |
} |
163 |
if (nClients <= 0 || nServers <= 0 || samplePeriod <= 0 || batchLimit > nClients) { |
164 |
System.err.println ("Argument error") ; |
165 |
System.exit (1) ; |
166 |
} |
167 |
|
168 |
// default batch size is 2/3 the number of clients |
169 |
// (for no particular reason) |
170 |
if (false && batchLimit <= 0) |
171 |
batchLimit = (2 * nClients + 1) / 3; |
172 |
|
173 |
ExThreads = nServers * nClients ; // expected # of threads |
174 |
HoldLimit = ExThreads ; |
175 |
|
176 |
// start up all threads |
177 |
Server[] servers = new Server[nServers]; |
178 |
for (int i = 0; i < nServers; ++i) { |
179 |
servers[i] = new Server(nClients); |
180 |
} |
181 |
|
182 |
// Wait for consensus |
183 |
try { |
184 |
Gate.lock(); try { |
185 |
while (nReady != ExThreads ) GateCond.await() ; |
186 |
} finally { Gate.unlock(); } |
187 |
} catch (Exception ex) { |
188 |
System.out.println (ex); |
189 |
} |
190 |
System.out.println ( |
191 |
nReady + " Ready: nc=" + nClients + " ns=" + nServers + " batch=" + batchLimit) ; |
192 |
|
193 |
// Start sampling ... |
194 |
// Methodological problem: all the mutator threads |
195 |
// can starve the compiler threads, resulting in skewed scores. |
196 |
// In theory, over time, the scores will improve as the compiler |
197 |
// threads are granted CPU cycles, but in practice a "warm up" phase |
198 |
// might be good idea to help C2. For this reason I've implemented |
199 |
// the "Hold" facility. |
200 |
|
201 |
long lastNumMsgs = 0; |
202 |
long sampleStart = System.currentTimeMillis(); |
203 |
for (int j = 0; j < nSamples; ++j) { |
204 |
// when this sample period is supposed to end |
205 |
long sampleEnd = sampleStart + samplePeriod; |
206 |
for (;;) { |
207 |
long now = System.currentTimeMillis(); |
208 |
if (now >= sampleEnd) { |
209 |
// when it really did end |
210 |
sampleEnd = now; |
211 |
break; |
212 |
} |
213 |
Thread.sleep(sampleEnd - now); |
214 |
} |
215 |
|
216 |
if (false && j == 2) { |
217 |
System.out.print ("Hold activated ...") ; |
218 |
HoldQ.lock(); |
219 |
try { |
220 |
Hold = true ; |
221 |
while (Hold) HoldQCond.await() ; |
222 |
} |
223 |
finally { |
224 |
HoldQ.unlock(); |
225 |
} |
226 |
} |
227 |
|
228 |
|
229 |
|
230 |
// there's no synchronization here, so the total i get is |
231 |
// approximate, but that's OK since any i miss for this |
232 |
// sample will get credited to the next sample, and on average |
233 |
// we'll be right |
234 |
long numMsgs = 0; |
235 |
for (int i = 0; i < nServers; ++i) |
236 |
numMsgs += servers[i].msgsReceived; |
237 |
long deltaMsgs = numMsgs - lastNumMsgs; |
238 |
long deltaT = sampleEnd - sampleStart; |
239 |
if (true || j != 2) { // Don't report results if we issued a hold ... |
240 |
System.out.print( |
241 |
"Sample period = " + deltaT + " ms; " |
242 |
+ "New msgs rcvd = " + deltaMsgs + "; " |
243 |
+ "Throughput = " + (deltaMsgs*1000 / deltaT) + " msg/sec\n"); |
244 |
// for (int i = 0; i < nServers; ++i) |
245 |
// servers[i].thisLock.dump(); |
246 |
} |
247 |
sampleStart = sampleEnd; |
248 |
lastNumMsgs = numMsgs; |
249 |
} |
250 |
System.exit(0); |
251 |
} |
252 |
} |
253 |
|
254 |
|