--- /dev/null
+#!/usr/bin/python
+#
+# A simple benchmark for Blue Sky that will read and/or write a collection of
+# files with a specified working set size, and measure the response time to do
+# so.
+
+import json, os, random, sys, threading, time
+
+THREADS = 1
+
+class WorkerThread:
+ """Performs a mix of file system operations and records the performance."""
+
+ def __init__(self):
+ self.stats = []
+ self.duration = 10.0 # Seconds for which to run
+ self.write_fraction = 0.3 # Fraction of operations which are writes
+ self.wss_count = 16 # Files in the working set
+ self.tot_count = 32 # Total number of files created
+ self.filesize = 32 * 1024 # Size of files to work with
+ self.target_ops = 2 # Target operations/second/thread
+
+ def setup(self):
+ for i in range(self.tot_count):
+ filename = "file-%d" % (i,)
+ fp = open(filename, 'w')
+ fp.write('\0' * self.filesize)
+ fp.close()
+
+ def run(self):
+ stop_time = time.time() + self.duration
+
+ while True:
+ time1 = time.time()
+ if time1 >= stop_time: break
+ info = self._operation()
+ time2 = time.time()
+ self.stats.append((time1, time2 - time1, info))
+ print self.stats[-1]
+ delay = time1 + (1.0 / self.target_ops) - time2
+ if delay > 0: time.sleep(delay)
+
+ def _operation(self):
+ """Run a single file system test (i.e., read or write a file)."""
+
+ filename = "file-%d" % (random.randrange(self.wss_count),)
+
+ if random.uniform(0.0, 1.0) < self.write_fraction:
+ fp = open(filename, 'w')
+ fp.write('\0' * self.filesize)
+ fp.close()
+ return ('write', filename)
+ else:
+ fp = open(filename, 'r')
+ fp.read()
+ fp.close()
+ return ('read', filename)
+
+def run_stats(stats):
+ duration = max(x[0] for x in stats) - min(x[0] for x in stats)
+ latencies = [x[1] for x in stats]
+ latencies.sort()
+ print "Experiment duration:", duration
+ print "Operation count:", len(stats)
+ print "Latencies:", latencies
+ print "Average latency:", sum(latencies) / len(latencies)
+
+if __name__ == '__main__':
+ workers = []
+ threads = []
+ for i in range(THREADS):
+ w = WorkerThread()
+ if i == 0: w.setup()
+ t = threading.Thread(target=w.run)
+ threads.append(t)
+ workers.append(w)
+ t.start()
+ for t in threads:
+ t.join()
+
+ results = []
+ for w in workers:
+ results += w.stats
+ results.sort()
+
+ print json.dumps(results, indent=2)
+ run_stats(results)