3 # Copyright (C) 2010 The Regents of the University of California
4 # Written by Michael Vrable <mvrable@cs.ucsd.edu>
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions
9 # 1. Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # 2. Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # 3. Neither the name of the University nor the names of its contributors
15 # may be used to endorse or promote products derived from this software
16 # without specific prior written permission.
18 # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 # Run a series of simple test requests against S3 for gathering some basic
31 # performance numbers.
34 from boto.s3.connection import SubdomainCallingFormat
35 from boto.s3.key import Key
36 import sys, threading, time, Queue
39 BUCKET_NAME = 'mvrable-benchmark'
40 SIZES = [(1 << s) for s in range(12, 23)]
42 class S3TestConnection:
44 self.conn = boto.connect_s3(is_secure=False,
45 calling_format=SubdomainCallingFormat())
46 self.bucket = self.conn.get_bucket(BUCKET_NAME)
48 def put_object(self, name, size):
50 k = Key(self.bucket, name)
51 start_time = time.time()
52 k.set_contents_from_string(buf)
53 #print "%s: %f" % (name, time.time() - start_time)
55 def get_object(self, name, byterange=None):
56 k = Key(self.bucket, name)
58 if byterange is not None:
59 headers['Range'] = 'bytes=%s-%s' % byterange
60 start_time = time.time()
61 buf = k.get_contents_as_string(headers=headers)
62 duration = time.time() - start_time
63 return (duration, len(buf))
65 def parallel_rangeget(name, size, connections, pieces, repeat=1):
66 requests = Queue.Queue()
67 results = [[threading.Lock(), None] for n in range(pieces)]
68 for _ in range(repeat):
69 for i in range(pieces):
70 requests.put((i, results[i]))
71 blocksize = size // pieces
74 def launcher(c, requests):
77 (i, r) = requests.get(block=False)
78 # Possible data race here but it should be harmless
80 res = c.get_object(name, byterange=(blocksize * i,
81 blocksize * (i+1) - 1))
83 if r[1] is None: r[1] = time.time()
88 for i in range(len(connections)):
90 threads.append(threading.Thread(target=launcher, args=(c, requests)))
91 start_time = time.time()
92 for i in range(len(threads)):
96 return max(x[1] for x in results) - start_time
98 connections = [S3TestConnection() for _ in range(128)]
100 for i in [(1 << x) for x in range(8)]:
102 print i, parallel_rangeget('file-%d-0' % (s,), s, connections, i)
106 logfile = open('multifetch-simulation.data', 'a')
107 for s in [(1 << s) for s in range(16, 27)]:
108 print "Priming objects: %d-byte objects" % (s,)
109 run_test(s, 1, 100, open('/dev/null', 'w'), 0.0)
111 for blocksize in [x << 20 for x in (4, 8, 16, 32, 64, 128)]:
112 if s > blocksize: continue
114 for rep in range(10):
115 count = blocksize // s
116 print "Running tests: %d-byte blocks, %d-byte objects, %d parallel fetches" % (blocksize, s, t)
117 print "Object count:", count
118 if count * t > len(connections):
121 conns = connections[0 : count * t]
123 objects = ['file-%d-%d' % (s, i % 100) for i in range(count)]
124 r = parallel_multiget(objects, conns, t)
126 logfile.write('%s\t%s\t%s\t%s\t%s\n' % (s, blocksize >> 20, t, len(conns), r))