projects
/
bluesky.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Report average latency in synbench
[bluesky.git]
/
microbench
/
workload.py
diff --git
a/microbench/workload.py
b/microbench/workload.py
index
3ec8942
..
0ce8ea0
100755
(executable)
--- a/
microbench/workload.py
+++ b/
microbench/workload.py
@@
-29,12
+29,12
@@
class WorkerThread:
def __init__(self):
self.stats = []
def __init__(self):
self.stats = []
- self.duration =
72
00.0 # Seconds for which to run
+ self.duration =
18
00.0 # Seconds for which to run
self.write_fraction = 0.5 # Fraction of operations which are writes
self.wss_count = 2048 # Files in the working set
self.tot_count = 2048 # Total number of files created
self.write_fraction = 0.5 # Fraction of operations which are writes
self.wss_count = 2048 # Files in the working set
self.tot_count = 2048 # Total number of files created
- self.filesize =
256 * 1024
# Size of files to work with
- self.target_ops =
1
0 # Target operations/second/thread
+ self.filesize =
32 * 1024
# Size of files to work with
+ self.target_ops =
4
0 # Target operations/second/thread
def get_params(self):
params = {}
def get_params(self):
params = {}
@@
-58,7
+58,7
@@
class WorkerThread:
info = self._operation()
time2 = time.time()
self.stats.append((time1, time2 - time1, info))
info = self._operation()
time2 = time.time()
self.stats.append((time1, time2 - time1, info))
- print self.stats[-1]
+
#
print self.stats[-1]
delay = time1 + (1.0 / self.target_ops) - time2
if delay > 0: time.sleep(delay)
delay = time1 + (1.0 / self.target_ops) - time2
if delay > 0: time.sleep(delay)
@@
-97,12
+97,17
@@
def run_stats(stats):
print "WRITES:"
print_distribution_stats([x[1] for x in stats if x[2][0] == 'write'])
print "WRITES:"
print_distribution_stats([x[1] for x in stats if x[2][0] == 'write'])
-if __name__ == '__main__':
+fp = open('/tmp/results.json', 'a')
+
+def run(filecount, writefrac, filesize):
workers = []
threads = []
for i in range(THREADS):
w = WorkerThread()
workers = []
threads = []
for i in range(THREADS):
w = WorkerThread()
- #if i == 0: w.setup()
+ w.write_fraction = writefrac
+ w.wss_count = w.tot_count = filecount
+ w.filesize = filesize
+ if i == 0: w.setup()
t = threading.Thread(target=w.run)
threads.append(t)
workers.append(w)
t = threading.Thread(target=w.run)
threads.append(t)
workers.append(w)
@@
-118,8
+123,15
@@
if __name__ == '__main__':
results += w.stats
results.sort()
results += w.stats
results.sort()
- fp = open('/tmp/results.json', 'w')
fp.write(json.dumps(workers[0].get_params(), indent=2) + "\n\n")
fp.write(json.dumps(results, indent=2))
fp.write(json.dumps(workers[0].get_params(), indent=2) + "\n\n")
fp.write(json.dumps(results, indent=2))
- fp.
close(
)
+ fp.
write("\n\n"
)
run_stats(results)
run_stats(results)
+
+if __name__ == '__main__':
+ for filesize in [32, 256, 2048]: # KiB
+ for totsize in [256, 512, 1024]: # MiB
+ filecount = totsize * 1024 / filesize
+ for writefrac in [0.0, 0.5]:
+ run(filecount, writefrac, filesize * 1024)
+ fp.close()