X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=cleaner%2Fcleaner;h=60c91f501d229e93510b37e3629e8471b9a3ba02;hb=5551523de35c1abaf24d77654928e2c748fd95aa;hp=fb3ce0559ae99f751b4ecbf623510e126eea119c;hpb=c665095a0fb48ac1304f906eaca563ba8d01e6e8;p=bluesky.git diff --git a/cleaner/cleaner b/cleaner/cleaner index fb3ce05..60c91f5 100755 --- a/cleaner/cleaner +++ b/cleaner/cleaner @@ -8,21 +8,39 @@ # Copyright (C) 2010 The Regents of the University of California # Written by Michael Vrable -import base64, os, re, struct, sys +import base64, os, re, struct, sys, time import boto +from boto.s3.key import Key # The BlueSky 'struct cloudlog_header' data type. -HEADER_FORMAT = '<4sb16sQIII' -HEADER_MAGIC = 'AgI-' +HEADER_FORMAT = '<4s48sb16sQIII' +HEADER_CRYPTBYTES = 48 +HEADER_MAGIC1 = 'AgI-' # Unencrypted data +HEADER_MAGIC2 = 'AgI=' # Encrypted data HEADER_SIZE = struct.calcsize(HEADER_FORMAT) +CHECKPOINT_MAGIC = struct.pack(' 0: + fp.seek(offset) + if length is None: + return fp.read() + else: + return fp.read(length) def write(self, filename, data): fp = open(os.path.join(self.path, filename), 'wb') fp.write(data) fp.close() - def loc_to_name(self, location): - return "log-%08d-%08d" % (location) + def delete(self, filename): + os.unlink(os.path.join(self.path, filename)) + +def retry_wrap(method): + def wrapped(self, *args, **kwargs): + for retries in range(3): + try: + return method(self, *args, **kwargs) + except: + print >>sys.stderr, "S3 operation failed, retrying..." + self.connect() + time.sleep(1.0) + return method(self, *args, **kwargs) + return wrapped + +class S3Backend(Backend): + """An interface to BlueSky where the log segments are on in Amazon S3.""" + + def __init__(self, bucket, path='', cachedir="."): + self.bucket_name = bucket + self.path = path + self.cachedir = cachedir + self.cache = {} + for f in os.listdir(cachedir): + self.cache[f] = True + print "Initial cache contents:", list(self.cache.keys()) + self.connect() + self.stats_get = [0, 0] + self.stats_put = [0, 0] + + def connect(self): + self.conn = boto.connect_s3(is_secure=False) + self.bucket = self.conn.get_bucket(self.bucket_name) + + def list(self, directory=0): + files = [] + prefix = "log-%08d-" % (directory,) + for k in self.bucket.list(self.path + prefix): + files.append((k.key, k.size)) + return files + + @retry_wrap + def read(self, filename, offset=0, length=None): + if filename in self.cache: + fp = open(os.path.join(self.cachedir, filename), 'rb') + if offset > 0: + fp.seek(offset) + if length is None: + return fp.read() + else: + return fp.read(length) + else: + k = Key(self.bucket) + k.key = self.path + filename + data = k.get_contents_as_string() + fp = open(os.path.join(self.cachedir, filename), 'wb') + fp.write(data) + fp.close() + self.cache[filename] = True + self.stats_get[0] += 1 + self.stats_get[1] += len(data) + if offset > 0: + data = data[offset:] + if length is not None: + data = data[0:length] + return data + + @retry_wrap + def write(self, filename, data): + k = Key(self.bucket) + k.key = self.path + filename + k.set_contents_from_string(data) + self.stats_put[0] += 1 + self.stats_put[1] += len(data) + if filename in self.cache: + del self.cache[filename] + + @retry_wrap + def delete(self, filename): + k = Key(self.bucket) + k.key = self.path + filename + k.delete() + if filename in self.cache: + del self.cache[filename] + + def dump_stats(self): + print "S3 statistics:" + print "GET: %d ops / %d bytes" % tuple(self.stats_get) + print "PUT: %d ops / %d bytes" % tuple(self.stats_put) + +class SimpleBackend(Backend): + """An interface to the simple BlueSky test network server.""" + + def __init__(self, server=('localhost', 12345), cachedir="."): + self.bucket_name = bucket + self.server_address = server + self.cachedir = cachedir + self.cache = {} + + def _get_socket(self): + return socket.create_connection(self.server_address).makefile() + + def list(self, directory=0): + files = [] + prefix = "log-%08d-" % (directory,) + for k in self.bucket.list(self.path + prefix): + files.append((k.key, k.size)) + return files + + def read(self, filename, offset=0, length=None): + if filename in self.cache: + fp = open(os.path.join(self.cachedir, filename), 'rb') + if offset > 0: + fp.seek(offset) + if length is None: + return fp.read() + else: + return fp.read(length) + else: + f = self._get_socket() + f.write("GET %s %d %d\n" % (filename, 0, 0)) + f.flush() + datalen = int(f.readline()) + if datalen < 0: + raise RuntimeError + data = f.read(datalen) + fp = open(os.path.join(self.cachedir, filename), 'wb') + fp.write(data) + fp.close() + self.cache[filename] = True + if offset > 0: + data = data[offset:] + if length is not None: + data = data[0:length] + return data - def name_to_loc(self, name): - m = re.match(r"^log-(\d+)-(\d+)$", name) - if m: return (int(m.group(1)), int(m.group(2))) + def write(self, filename, data): + f = self._get_socket() + f.write("PUT %s %d %d\n" % (filename, len(data))) + f.write(data) + f.flush() + result = int(f.readline()) + if filename in self.cache: + del self.cache[filename] + + def delete(self, filename): + pass class LogItem: """In-memory representation of a single item stored in a log file.""" + def __init__(self): + self.cryptkeys = '\0' * HEADER_CRYPTBYTES + self.encrypted = False + def __str__(self): - return "" % (self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8]) + return "" % (self.encrypted and '$' or '', self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8]) @staticmethod def random_id(): @@ -76,8 +245,13 @@ class LogItem: link_ids = ''.join(link_ids) link_locs = ''.join(link_locs) + if self.encrypted: + magic = HEADER_MAGIC2 + else: + magic = HEADER_MAGIC1 header = struct.pack(HEADER_FORMAT, - HEADER_MAGIC, ord(self.type), self.id, self.inum, + magic, self.cryptkeys, + ord(self.type), self.id, self.inum, len(self.data), len(link_ids), len(link_locs)) return header + self.data + link_ids + link_locs @@ -109,7 +283,8 @@ class LogDirectory: self.backend = backend self.dir_num = dir self.seq_num = 0 - for logname in backend.list(): + for logname in backend.list(dir): + print "Old log file:", logname loc = backend.name_to_loc(logname[0]) if loc is not None and loc[0] == dir: self.seq_num = max(self.seq_num, loc[1] + 1) @@ -142,7 +317,7 @@ class UtilizationTracker: def __init__(self, backend): self.segments = {} - for (segment, size) in backend.list(): + for (segment, size) in backend.list(0) + backend.list(1): self.segments[segment] = [size, 0] def add_item(self, item): @@ -156,28 +331,30 @@ class UtilizationTracker: def parse_item(data): if len(data) < HEADER_SIZE: return header = struct.unpack_from(HEADER_FORMAT, data, 0) - size = HEADER_SIZE + sum(header[4:7]) + size = HEADER_SIZE + sum(header[5:8]) - if header[0] != HEADER_MAGIC: + if header[0] not in (HEADER_MAGIC1, HEADER_MAGIC2): print "Bad header magic!" return if len(data) != size: - print "Item size does not match!" + print "Item size does not match: %d != %d" % (size, len(data)) return item = LogItem() - item.id = header[2] - item.inum = header[3] + if header[0] == HEADER_MAGIC2: item.encrypted = True + item.cryptkeys = header[1] + item.id = header[3] + item.inum = header[4] item.location = None - item.type = chr(header[1]) + item.type = chr(header[2]) item.size = size - item.data = data[HEADER_SIZE : HEADER_SIZE + header[4]] + item.data = data[HEADER_SIZE : HEADER_SIZE + header[5]] links = [] - link_ids = data[HEADER_SIZE + header[4] - : HEADER_SIZE + header[4] + header[5]] - link_locs = data[HEADER_SIZE + header[4] + header[5] - : HEADER_SIZE + sum(header[4:7])] + link_ids = data[HEADER_SIZE + header[5] + : HEADER_SIZE + header[5] + header[6]] + link_locs = data[HEADER_SIZE + header[5] + header[6] + : HEADER_SIZE + sum(header[5:8])] for i in range(len(link_ids) // 16): id = link_ids[16*i : 16*i + 16] if id == '\0' * 16: @@ -195,7 +372,7 @@ def load_item(backend, location): The elements of the tuple are (directory, sequence, offset, size).""" filename = backend.loc_to_name((location[0], location[1])) - data = backend.read(filename)[location[2] : location[2] + location[3]] + data = backend.read(filename, location[2], location[3]) item = parse_item(data) item.location = location return item @@ -213,8 +390,8 @@ def parse_log(data, location=None): offset = 0 while len(data) - offset >= HEADER_SIZE: header = struct.unpack_from(HEADER_FORMAT, data, offset) - size = HEADER_SIZE + sum(header[4:7]) - if header[0] != HEADER_MAGIC: + size = HEADER_SIZE + sum(header[5:8]) + if header[0] not in (HEADER_MAGIC1, HEADER_MAGIC2): print "Bad header magic!" break if size + offset > len(data): @@ -226,9 +403,10 @@ def parse_log(data, location=None): if item is not None: yield item offset += size -def load_checkpoint_record(backend): - for (log, size) in reversed(backend.list()): +def load_checkpoint_record(backend, directory=0): + for (log, size) in reversed(backend.list(directory)): for item in reversed(list(parse_log(backend.read(log), log))): + print item if item.type == ITEM_TYPE.CHECKPOINT: return item @@ -241,6 +419,7 @@ class InodeMap: This will also build up information about segment utilization.""" + self.version_vector = {} self.checkpoint_record = checkpoint_record util = UtilizationTracker(backend) @@ -248,9 +427,25 @@ class InodeMap: inodes = {} self.obsolete_segments = set() + data = checkpoint_record.data + if not data.startswith(CHECKPOINT_MAGIC): + raise ValueError, "Invalid checkpoint record!" + data = data[len(CHECKPOINT_MAGIC):] + (vvlen,) = struct.unpack_from(" 0: + for i in range(2): total_data[i] += u[i] print "%s: %s %s" % (s, u, float(u[1]) / u[0]) + if u[1] == 0: + print "Would delete..." + backend.delete(s) + deletions[0] += 1 + deletions[1] += u[0] self.inodes = inodes self.util = util self.updated_inodes = set() + print "%d bytes total / %d bytes used" % tuple(total_data) + print "would delete %d segments (%d bytes)" % tuple(deletions) + def mark_updated(self, inum): self.updated_inodes.add(inum) @@ -285,13 +490,18 @@ class InodeMap: new_checkpoint.id = LogItem.random_id() new_checkpoint.inum = 0 new_checkpoint.type = ITEM_TYPE.CHECKPOINT - new_checkpoint.data = "" + new_checkpoint.data = CHECKPOINT_MAGIC new_checkpoint.links = [] - for i in range(len(self.checkpoint_record.data) // 16): - (start, end) = struct.unpack_from(" 0: + if (float(u[1]) / u[0] < 0.6) and u[1] > 0: print "Should clean segment", s loc = backend.name_to_loc(s) if s: inode_map.obsolete_segments.add(loc) + # TODO: We probably also want heuristics that will find inodes with + # badly-fragmented data and rewrite that to achieve better locality. + # Given that list of segments to clean, scan through those segments to find # data which is still live and mark relevant inodes as needing to be # rewritten. - dirty_inodes = set() + if repack_inodes: + dirty_inodes = set(inode_map.inodes) + else: + dirty_inodes = set() + dirty_inode_data = set() for s in inode_map.obsolete_segments: filename = backend.loc_to_name(s) print "Scanning", filename, "for live data" for item in parse_log(backend.read(filename), filename): if item.type in (ITEM_TYPE.DATA, ITEM_TYPE.INODE): if item.inum != 0: - dirty_inodes.add(item.inum) + inode = inode_map.inodes[item.inum] + if s == inode.location[0:2]: + dirty_inodes.add(item.inum) + if item.inum not in dirty_inode_data: + for b in inode.links: + if s == b[1][0:2]: + dirty_inode_data.add(item.inum) + break print "Inodes to rewrite:", dirty_inodes - for i in sorted(dirty_inodes): - rewrite_inode(backend, inode_map, i, log) + print "Inodes with data to rewrite:", dirty_inode_data + for i in sorted(dirty_inodes.union(dirty_inode_data)): + rewrite_inode(backend, inode_map, i, log, i in dirty_inode_data) if __name__ == '__main__': - backend = FileBackend(".") + start_time = time.time() + backend = S3Backend("mvrable-bluesky-west", cachedir="/tmp/bluesky-cache") + #backend = FileBackend(".") chkpt = load_checkpoint_record(backend) + print backend.list() imap = InodeMap() imap.build(backend, chkpt) print chkpt - log_dir = LogDirectory(backend, 0) + log_dir = LogDirectory(backend, 1) run_cleaner(backend, imap, log_dir) + print "Version vector:", imap.version_vector imap.write(backend, log_dir) log_dir.close_all() + end_time = time.time() + print "Cleaner running time:", end_time - start_time + backend.dump_stats()