# Copyright (C) 2010 The Regents of the University of California
# Written by Michael Vrable <mvrable@cs.ucsd.edu>
-import base64, os, re, struct, sys
+import base64, os, re, struct, sys, time
import boto
+from boto.s3.key import Key
# The BlueSky 'struct cloudlog_header' data type.
-HEADER_FORMAT = '<4sb16sQIII'
-HEADER_MAGIC = 'AgI-'
+HEADER_FORMAT = '<4s48sb16sQIII'
+HEADER_CRYPTBYTES = 48
+HEADER_MAGIC1 = 'AgI-' # Unencrypted data
+HEADER_MAGIC2 = 'AgI=' # Encrypted data
HEADER_SIZE = struct.calcsize(HEADER_FORMAT)
class ITEM_TYPE:
return [(f, os.stat(os.path.join(self.path, f)).st_size)
for f in files]
- def read(self, filename):
+ def read(self, filename, offset=0, length=None):
fp = open(os.path.join(self.path, filename), 'rb')
- return fp.read()
+ if offset > 0:
+ fp.seek(offset)
+ if legnth is None:
+ return fp.read()
+ else:
+ return fp.read(length)
def write(self, filename, data):
fp = open(os.path.join(self.path, filename), 'wb')
fp.write(data)
fp.close()
+ def delete(self, filename):
+ os.unlink(os.path.join(self.path, filename))
+
+ def loc_to_name(self, location):
+ return "log-%08d-%08d" % (location)
+
+ def name_to_loc(self, name):
+ m = re.match(r"^log-(\d+)-(\d+)$", name)
+ if m: return (int(m.group(1)), int(m.group(2)))
+
+def retry_wrap(method):
+ def wrapped(self, *args, **kwargs):
+ for retries in range(3):
+ try:
+ return method(self, *args, **kwargs)
+ except:
+ print >>sys.stderr, "S3 operation failed, retrying..."
+ self.connect()
+ time.sleep(1.0)
+ return method(self, *args, **kwargs)
+ return wrapped
+
+class S3Backend:
+ """An interface to BlueSky where the log segments are on in Amazon S3."""
+
+ def __init__(self, bucket, path='', cachedir="."):
+ self.bucket_name = bucket
+ self.path = path
+ self.cachedir = cachedir
+ self.cache = {}
+ self.connect()
+
+ def connect(self):
+ self.conn = boto.connect_s3(is_secure=False)
+ self.bucket = self.conn.get_bucket(self.bucket_name)
+
+ def list(self):
+ files = []
+ for k in self.bucket.list(self.path + 'log-'):
+ files.append((k.key, k.size))
+ return files
+
+ @retry_wrap
+ def read(self, filename, offset=0, length=None):
+ if filename in self.cache:
+ fp = open(os.path.join(self.cachedir, filename), 'rb')
+ if offset > 0:
+ fp.seek(offset)
+ if length is None:
+ return fp.read()
+ else:
+ return fp.read(length)
+ else:
+ k = Key(self.bucket)
+ k.key = self.path + filename
+ data = k.get_contents_as_string()
+ fp = open(os.path.join(self.cachedir, filename), 'wb')
+ fp.write(data)
+ fp.close()
+ self.cache[filename] = True
+ if offset > 0:
+ data = data[offset:]
+ if length is not None:
+ data = data[0:length]
+ return data
+
+ @retry_wrap
+ def write(self, filename, data):
+ k = Key(self.bucket)
+ k.key = self.path + filename
+ k.set_contents_from_string(data)
+ if filename in self.cache:
+ del self.cache[filename]
+
+ @retry_wrap
+ def delete(self, filename):
+ k = Key(self.bucket)
+ k.key = self.path + filename
+ k.delete()
+ if filename in self.cache:
+ del self.cache[filename]
+
+ def loc_to_name(self, location):
+ return "log-%08d-%08d" % (location)
+
+ def name_to_loc(self, name):
+ m = re.match(r"^log-(\d+)-(\d+)$", name)
+ if m: return (int(m.group(1)), int(m.group(2)))
+
class LogItem:
"""In-memory representation of a single item stored in a log file."""
+ def __init__(self):
+ self.cryptkeys = '\0' * HEADER_CRYPTBYTES
+ self.encrypted = False
+
def __str__(self):
- return "<Item ty=%s location=%s size=%d id=%s...>" % (self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8])
+ return "<Item%s ty=%s location=%s size=%d id=%s...>" % (self.encrypted and '$' or '', self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8])
@staticmethod
def random_id():
link_ids = ''.join(link_ids)
link_locs = ''.join(link_locs)
+ if self.encrypted:
+ magic = HEADER_MAGIC2
+ else:
+ magic = HEADER_MAGIC1
header = struct.pack(HEADER_FORMAT,
- HEADER_MAGIC, ord(self.type), self.id, self.inum,
+ magic, self.cryptkeys,
+ ord(self.type), self.id, self.inum,
len(self.data), len(link_ids), len(link_locs))
return header + self.data + link_ids + link_locs
def close(self):
data = ''.join(self.data)
- filename = "log-%08d-%08d" % (self.location)
+ filename = self.backend.loc_to_name(self.location)
print "Would write %d bytes of data to %s" % (len(data), filename)
self.backend.write(filename, data)
class LogDirectory:
TARGET_SIZE = 4 << 20
- def __init__(self, backend, dir, seq):
+ def __init__(self, backend, dir):
self.backend = backend
self.dir_num = dir
- self.seq_num = seq
+ self.seq_num = 0
+ for logname in backend.list():
+ loc = backend.name_to_loc(logname[0])
+ if loc is not None and loc[0] == dir:
+ self.seq_num = max(self.seq_num, loc[1] + 1)
self.groups = {}
+ print "Starting sequence number is", self.seq_num
def open_segment(self):
seg = LogSegment(self.backend, (self.dir_num, self.seq_num))
def parse_item(data):
if len(data) < HEADER_SIZE: return
header = struct.unpack_from(HEADER_FORMAT, data, 0)
- size = HEADER_SIZE + sum(header[4:7])
+ size = HEADER_SIZE + sum(header[5:8])
- if header[0] != HEADER_MAGIC:
+ if header[0] not in (HEADER_MAGIC1, HEADER_MAGIC2):
print "Bad header magic!"
return
if len(data) != size:
- print "Item size does not match!"
+ print "Item size does not match: %d != %d" % (size, len(data))
return
item = LogItem()
- item.id = header[2]
- item.inum = header[3]
+ if header[0] == HEADER_MAGIC2: item.encrypted = True
+ item.cryptkeys = header[1]
+ item.id = header[3]
+ item.inum = header[4]
item.location = None
- item.type = chr(header[1])
+ item.type = chr(header[2])
item.size = size
- item.data = data[HEADER_SIZE : HEADER_SIZE + header[4]]
+ item.data = data[HEADER_SIZE : HEADER_SIZE + header[5]]
links = []
- link_ids = data[HEADER_SIZE + header[4]
- : HEADER_SIZE + header[4] + header[5]]
- link_locs = data[HEADER_SIZE + header[4] + header[5]
- : HEADER_SIZE + sum(header[4:7])]
+ link_ids = data[HEADER_SIZE + header[5]
+ : HEADER_SIZE + header[5] + header[6]]
+ link_locs = data[HEADER_SIZE + header[5] + header[6]
+ : HEADER_SIZE + sum(header[5:8])]
for i in range(len(link_ids) // 16):
id = link_ids[16*i : 16*i + 16]
if id == '\0' * 16:
The elements of the tuple are (directory, sequence, offset, size)."""
- filename = "log-%08d-%08d" % (location[0], location[1])
- data = backend.read(filename)[location[2] : location[2] + location[3]]
+ filename = backend.loc_to_name((location[0], location[1]))
+ data = backend.read(filename, location[2], location[3])
item = parse_item(data)
item.location = location
return item
offset = 0
while len(data) - offset >= HEADER_SIZE:
header = struct.unpack_from(HEADER_FORMAT, data, offset)
- size = HEADER_SIZE + sum(header[4:7])
- if header[0] != HEADER_MAGIC:
+ size = HEADER_SIZE + sum(header[5:8])
+ if header[0] not in (HEADER_MAGIC1, HEADER_MAGIC2):
print "Bad header magic!"
break
if size + offset > len(data):
def load_checkpoint_record(backend):
for (log, size) in reversed(backend.list()):
for item in reversed(list(parse_log(backend.read(log), log))):
+ print item
if item.type == ITEM_TYPE.CHECKPOINT:
return item
print
print "Segment utilizations:"
for (s, u) in sorted(util.segments.items()):
- #if u[1] > 0:
print "%s: %s %s" % (s, u, float(u[1]) / u[0])
+ if u[1] == 0:
+ print "Deleting..."
+ backend.delete(s)
self.inodes = inodes
self.util = util
log.write(new_checkpoint, 2)
self.checkpoint_record = new_checkpoint
-def rewrite_inode(backend, inode_map, inum, log):
+def rewrite_inode(backend, inode_map, inum, log, copy_data=True):
inode = inode_map.inodes[inum]
- blocks = []
- for l in inode.links:
- data = load_item(backend, l[1])
- blocks.append(data)
- log.write(data, 0)
- inode.links = [(b.id, b.location) for b in blocks]
+ if copy_data:
+ blocks = []
+ for l in inode.links:
+ data = load_item(backend, l[1])
+ blocks.append(data)
+ log.write(data, 0)
+ inode.links = [(b.id, b.location) for b in blocks]
log.write(inode, 1)
inode_map.mark_updated(inum)
-def run_cleaner(backend, inode_map, log):
+def run_cleaner(backend, inode_map, log, repack_inodes=False):
# Determine which segments are poorly utilized and should be cleaned. We
# need better heuristics here.
for (s, u) in sorted(inode_map.util.segments.items()):
- if float(u[1]) / u[0] < 0.99 and u[1] > 0:
+ if (float(u[1]) / u[0] < 0.6 or u[1] < 32768) and u[1] > 0:
print "Should clean segment", s
- m = re.match(r"^log-(\d+)-(\d+)$", s)
- if m: inode_map.obsolete_segments.add((int(m.group(1)), int(m.group(2))))
+ loc = backend.name_to_loc(s)
+ if s: inode_map.obsolete_segments.add(loc)
+
+ # TODO: We probably also want heuristics that will find inodes with
+ # badly-fragmented data and rewrite that to achieve better locality.
# Given that list of segments to clean, scan through those segments to find
# data which is still live and mark relevant inodes as needing to be
# rewritten.
- dirty_inodes = set()
+ if repack_inodes:
+ dirty_inodes = set(inode_map.inodes)
+ else:
+ dirty_inodes = set()
+ dirty_inode_data = set()
for s in inode_map.obsolete_segments:
- filename = "log-%08d-%08d" % s
+ filename = backend.loc_to_name(s)
print "Scanning", filename, "for live data"
for item in parse_log(backend.read(filename), filename):
if item.type in (ITEM_TYPE.DATA, ITEM_TYPE.INODE):
if item.inum != 0:
- dirty_inodes.add(item.inum)
+ inode = inode_map.inodes[item.inum]
+ if s == inode.location[0:2]:
+ dirty_inodes.add(item.inum)
+ if item.inum not in dirty_inode_data:
+ for b in inode.links:
+ if s == b[1][0:2]:
+ dirty_inode_data.add(item.inum)
+ break
print "Inodes to rewrite:", dirty_inodes
- for i in sorted(dirty_inodes):
- rewrite_inode(backend, inode_map, i, log)
+ print "Inodes with data to rewrite:", dirty_inode_data
+ for i in sorted(dirty_inodes.union(dirty_inode_data)):
+ rewrite_inode(backend, inode_map, i, log, i in dirty_inode_data)
if __name__ == '__main__':
- backend = FileBackend(".")
+ backend = S3Backend("mvrable-bluesky", cachedir=".")
chkpt = load_checkpoint_record(backend)
+ print backend.list()
imap = InodeMap()
imap.build(backend, chkpt)
print chkpt
- log_dir = LogDirectory(backend, 1, 0)
+ log_dir = LogDirectory(backend, 0)
run_cleaner(backend, imap, log_dir)
imap.write(backend, log_dir)
log_dir.close_all()