Add retries to the S3 backend in the cleaner.
[bluesky.git] / cleaner / cleaner
index e249ed2..9103769 100755 (executable)
 
 import base64, os, re, struct, sys
 import boto
+from boto.s3.key import Key
 
 # The BlueSky 'struct cloudlog_header' data type.
-HEADER_FORMAT = '<4sb16sQIII'
-HEADER_MAGIC = 'AgI-'
+HEADER_FORMAT = '<4s48sb16sQIII'
+HEADER_CRYPTBYTES = 48
+HEADER_MAGIC1 = 'AgI-'          # Unencrypted data
+HEADER_MAGIC2 = 'AgI='          # Encrypted data
 HEADER_SIZE = struct.calcsize(HEADER_FORMAT)
 
 class ITEM_TYPE:
@@ -40,20 +43,117 @@ class FileBackend:
         return [(f, os.stat(os.path.join(self.path, f)).st_size)
                 for f in files]
 
-    def read(self, filename):
+    def read(self, filename, offset=0, length=None):
         fp = open(os.path.join(self.path, filename), 'rb')
-        return fp.read()
+        if offset > 0:
+            fp.seek(offset)
+        if legnth is None:
+            return fp.read()
+        else:
+            return fp.read(length)
 
     def write(self, filename, data):
         fp = open(os.path.join(self.path, filename), 'wb')
         fp.write(data)
         fp.close()
 
+    def delete(self, filename):
+        os.unlink(os.path.join(self.path, filename))
+
+    def loc_to_name(self, location):
+        return "log-%08d-%08d" % (location)
+
+    def name_to_loc(self, name):
+        m = re.match(r"^log-(\d+)-(\d+)$", name)
+        if m: return (int(m.group(1)), int(m.group(2)))
+
+def retry_wrap(method):
+    def wrapped(self, *args, **kwargs):
+        for retries in range(3):
+            try:
+                return method(self, *args, **kwargs)
+            except:
+                print >>sys.stderr, "S3 operation failed, retrying..."
+                self.connect()
+        return method(self, *args, **kwargs)
+    return wrapped
+
+class S3Backend:
+    """An interface to BlueSky where the log segments are on in Amazon S3."""
+
+    def __init__(self, bucket, path='', cachedir="."):
+        self.bucket_name = bucket
+        self.path = path
+        self.cachedir = cachedir
+        self.cache = {}
+        self.connect()
+
+    def connect(self):
+        self.conn = boto.connect_s3(is_secure=False)
+        self.bucket = self.conn.get_bucket(self.bucket_name)
+
+    def list(self):
+        files = []
+        for k in self.bucket.list(self.path + 'log-'):
+            files.append((k.key, k.size))
+        return files
+
+    @retry_wrap
+    def read(self, filename, offset=0, length=None):
+        if filename in self.cache:
+            fp = open(os.path.join(self.cachedir, filename), 'rb')
+            if offset > 0:
+                fp.seek(offset)
+            if length is None:
+                return fp.read()
+            else:
+                return fp.read(length)
+        else:
+            k = Key(self.bucket)
+            k.key = self.path + filename
+            data = k.get_contents_as_string()
+            fp = open(os.path.join(self.cachedir, filename), 'wb')
+            fp.write(data)
+            fp.close()
+            self.cache[filename] = True
+            if offset > 0:
+                data = data[offset:]
+            if length is not None:
+                data = data[0:length]
+            return data
+
+    @retry_wrap
+    def write(self, filename, data):
+        k = Key(self.bucket)
+        k.key = self.path + filename
+        k.set_contents_from_string(data)
+        if filename in self.cache:
+            del self.cache[filename]
+
+    @retry_wrap
+    def delete(self, filename):
+        k = Key(self.bucket)
+        k.key = self.path + filename
+        k.delete()
+        if filename in self.cache:
+            del self.cache[filename]
+
+    def loc_to_name(self, location):
+        return "log-%08d-%08d" % (location)
+
+    def name_to_loc(self, name):
+        m = re.match(r"^log-(\d+)-(\d+)$", name)
+        if m: return (int(m.group(1)), int(m.group(2)))
+
 class LogItem:
     """In-memory representation of a single item stored in a log file."""
 
+    def __init__(self):
+        self.cryptkeys = '\0' * HEADER_CRYPTBYTES
+        self.encrypted = False
+
     def __str__(self):
-        return "<Item ty=%s location=%s size=%d id=%s...>" % (self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8])
+        return "<Item%s ty=%s location=%s size=%d id=%s...>" % (self.encrypted and '$' or '', self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8])
 
     @staticmethod
     def random_id():
@@ -69,8 +169,13 @@ class LogItem:
         link_ids = ''.join(link_ids)
         link_locs = ''.join(link_locs)
 
+        if self.encrypted:
+            magic = HEADER_MAGIC2
+        else:
+            magic = HEADER_MAGIC1
         header = struct.pack(HEADER_FORMAT,
-                             HEADER_MAGIC, ord(self.type), self.id, self.inum,
+                             magic, self.cryptkeys,
+                             ord(self.type), self.id, self.inum,
                              len(self.data), len(link_ids), len(link_locs))
         return header + self.data + link_ids + link_locs
 
@@ -91,18 +196,23 @@ class LogSegment:
 
     def close(self):
         data = ''.join(self.data)
-        filename = "log-%08d-%08d" % (self.location)
+        filename = self.backend.loc_to_name(self.location)
         print "Would write %d bytes of data to %s" % (len(data), filename)
         self.backend.write(filename, data)
 
 class LogDirectory:
     TARGET_SIZE = 4 << 20
 
-    def __init__(self, backend, dir, seq):
+    def __init__(self, backend, dir):
         self.backend = backend
         self.dir_num = dir
-        self.seq_num = seq
+        self.seq_num = 0
+        for logname in backend.list():
+            loc = backend.name_to_loc(logname[0])
+            if loc is not None and loc[0] == dir:
+                self.seq_num = max(self.seq_num, loc[1] + 1)
         self.groups = {}
+        print "Starting sequence number is", self.seq_num
 
     def open_segment(self):
         seg = LogSegment(self.backend, (self.dir_num, self.seq_num))
@@ -144,28 +254,30 @@ class UtilizationTracker:
 def parse_item(data):
     if len(data) < HEADER_SIZE: return
     header = struct.unpack_from(HEADER_FORMAT, data, 0)
-    size = HEADER_SIZE + sum(header[4:7])
+    size = HEADER_SIZE + sum(header[5:8])
 
-    if header[0] != HEADER_MAGIC:
+    if header[0] not in (HEADER_MAGIC1, HEADER_MAGIC2):
         print "Bad header magic!"
         return
 
     if len(data) != size:
-        print "Item size does not match!"
+        print "Item size does not match: %d != %d" % (size, len(data))
         return
 
     item = LogItem()
-    item.id = header[2]
-    item.inum = header[3]
+    if header[0] == HEADER_MAGIC2: item.encrypted = True
+    item.cryptkeys = header[1]
+    item.id = header[3]
+    item.inum = header[4]
     item.location = None
-    item.type = chr(header[1])
+    item.type = chr(header[2])
     item.size = size
-    item.data = data[HEADER_SIZE : HEADER_SIZE + header[4]]
+    item.data = data[HEADER_SIZE : HEADER_SIZE + header[5]]
     links = []
-    link_ids = data[HEADER_SIZE + header[4]
-                    : HEADER_SIZE + header[4] + header[5]]
-    link_locs = data[HEADER_SIZE + header[4] + header[5]
-                     : HEADER_SIZE + sum(header[4:7])]
+    link_ids = data[HEADER_SIZE + header[5]
+                    : HEADER_SIZE + header[5] + header[6]]
+    link_locs = data[HEADER_SIZE + header[5] + header[6]
+                     : HEADER_SIZE + sum(header[5:8])]
     for i in range(len(link_ids) // 16):
         id = link_ids[16*i : 16*i + 16]
         if id == '\0' * 16:
@@ -182,8 +294,8 @@ def load_item(backend, location):
 
     The elements of the tuple are (directory, sequence, offset, size)."""
 
-    filename = "log-%08d-%08d" % (location[0], location[1])
-    data = backend.read(filename)[location[2] : location[2] + location[3]]
+    filename = backend.loc_to_name((location[0], location[1]))
+    data = backend.read(filename, location[2], location[3])
     item = parse_item(data)
     item.location = location
     return item
@@ -201,8 +313,8 @@ def parse_log(data, location=None):
     offset = 0
     while len(data) - offset >= HEADER_SIZE:
         header = struct.unpack_from(HEADER_FORMAT, data, offset)
-        size = HEADER_SIZE + sum(header[4:7])
-        if header[0] != HEADER_MAGIC:
+        size = HEADER_SIZE + sum(header[5:8])
+        if header[0] not in (HEADER_MAGIC1, HEADER_MAGIC2):
             print "Bad header magic!"
             break
         if size + offset > len(data):
@@ -217,6 +329,7 @@ def parse_log(data, location=None):
 def load_checkpoint_record(backend):
     for (log, size) in reversed(backend.list()):
         for item in reversed(list(parse_log(backend.read(log), log))):
+            print item
             if item.type == ITEM_TYPE.CHECKPOINT:
                 return item
 
@@ -234,6 +347,7 @@ class InodeMap:
         util = UtilizationTracker(backend)
         util.add_item(checkpoint_record)
         inodes = {}
+        self.obsolete_segments = set()
 
         print "Inode map:"
         for i in range(len(checkpoint_record.data) // 16):
@@ -255,8 +369,10 @@ class InodeMap:
         print
         print "Segment utilizations:"
         for (s, u) in sorted(util.segments.items()):
-            #if u[1] > 0:
             print "%s: %s %s" % (s, u, float(u[1]) / u[0])
+            if u[1] == 0:
+                print "Deleting..."
+                backend.delete(s)
 
         self.inodes = inodes
         self.util = util
@@ -282,11 +398,11 @@ class InodeMap:
 
             # Case 1: No inodes in this range of the old inode map have
             # changed.  Simply emit a new pointer to the same inode map block.
-            # TODO: Add the ability to rewrite the inode map block if we choose
-            # to do so for cleaning, even if no inodes have changed.
             if len(updated_inodes) == 0 or updated_inodes[-1] > end:
-                new_checkpoint.links.append(self.checkpoint_record.links[i])
-                continue
+                old_location = self.checkpoint_record.links[i][1][0:2]
+                if old_location not in self.obsolete_segments:
+                    new_checkpoint.links.append(self.checkpoint_record.links[i])
+                    continue
 
             # Case 2: Some inodes have been updated.  Create a new inode map
             # block, write it out, and point the new checkpoint at it.
@@ -312,27 +428,67 @@ class InodeMap:
         log.write(new_checkpoint, 2)
         self.checkpoint_record = new_checkpoint
 
-def rewrite_inode(backend, inode_map, inum, log):
+def rewrite_inode(backend, inode_map, inum, log, copy_data=True):
     inode = inode_map.inodes[inum]
-    blocks = []
-    for l in inode.links:
-        data = load_item(backend, l[1])
-        blocks.append(data)
-        log.write(data, 0)
-    inode.links = [(b.id, b.location) for b in blocks]
+    if copy_data:
+        blocks = []
+        for l in inode.links:
+            data = load_item(backend, l[1])
+            blocks.append(data)
+            log.write(data, 0)
+        inode.links = [(b.id, b.location) for b in blocks]
     log.write(inode, 1)
     inode_map.mark_updated(inum)
 
+def run_cleaner(backend, inode_map, log, repack_inodes=False):
+    # Determine which segments are poorly utilized and should be cleaned.  We
+    # need better heuristics here.
+    for (s, u) in sorted(inode_map.util.segments.items()):
+        if (float(u[1]) / u[0] < 0.6 or u[1] < 32768) and u[1] > 0:
+            print "Should clean segment", s
+            loc = backend.name_to_loc(s)
+            if s: inode_map.obsolete_segments.add(loc)
+
+    # TODO: We probably also want heuristics that will find inodes with
+    # badly-fragmented data and rewrite that to achieve better locality.
+
+    # Given that list of segments to clean, scan through those segments to find
+    # data which is still live and mark relevant inodes as needing to be
+    # rewritten.
+    if repack_inodes:
+        dirty_inodes = set(inode_map.inodes)
+    else:
+        dirty_inodes = set()
+    dirty_inode_data = set()
+    for s in inode_map.obsolete_segments:
+        filename = backend.loc_to_name(s)
+        print "Scanning", filename, "for live data"
+        for item in parse_log(backend.read(filename), filename):
+            if item.type in (ITEM_TYPE.DATA, ITEM_TYPE.INODE):
+                if item.inum != 0:
+                    inode = inode_map.inodes[item.inum]
+                    if s == inode.location[0:2]:
+                        dirty_inodes.add(item.inum)
+                    if item.inum not in dirty_inode_data:
+                        for b in inode.links:
+                            if s == b[1][0:2]:
+                                dirty_inode_data.add(item.inum)
+                                break
+
+    print "Inodes to rewrite:", dirty_inodes
+    print "Inodes with data to rewrite:", dirty_inode_data
+    for i in sorted(dirty_inodes.union(dirty_inode_data)):
+        rewrite_inode(backend, inode_map, i, log, i in dirty_inode_data)
+
 if __name__ == '__main__':
-    backend = FileBackend(".")
+    backend = S3Backend("mvrable-bluesky", cachedir=".")
     chkpt = load_checkpoint_record(backend)
+    print backend.list()
     imap = InodeMap()
     imap.build(backend, chkpt)
     print chkpt
 
-    print repr(chkpt.serialize())
-
-    log_dir = LogDirectory(backend, 1, 0)
-    rewrite_inode(backend, imap, 147, log_dir)
+    log_dir = LogDirectory(backend, 0)
+    run_cleaner(backend, imap, log_dir)
     imap.write(backend, log_dir)
     log_dir.close_all()