Add retries to the S3 backend in the cleaner.
[bluesky.git] / cleaner / cleaner
index b018d0d..9103769 100755 (executable)
 
 import base64, os, re, struct, sys
 import boto
+from boto.s3.key import Key
 
 # The BlueSky 'struct cloudlog_header' data type.
-HEADER_FORMAT = '<4sb16sQIII'
-HEADER_MAGIC = 'AgI-'
+HEADER_FORMAT = '<4s48sb16sQIII'
+HEADER_CRYPTBYTES = 48
+HEADER_MAGIC1 = 'AgI-'          # Unencrypted data
+HEADER_MAGIC2 = 'AgI='          # Encrypted data
 HEADER_SIZE = struct.calcsize(HEADER_FORMAT)
 
 class ITEM_TYPE:
@@ -40,9 +43,14 @@ class FileBackend:
         return [(f, os.stat(os.path.join(self.path, f)).st_size)
                 for f in files]
 
-    def read(self, filename):
+    def read(self, filename, offset=0, length=None):
         fp = open(os.path.join(self.path, filename), 'rb')
-        return fp.read()
+        if offset > 0:
+            fp.seek(offset)
+        if legnth is None:
+            return fp.read()
+        else:
+            return fp.read(length)
 
     def write(self, filename, data):
         fp = open(os.path.join(self.path, filename), 'wb')
@@ -59,11 +67,93 @@ class FileBackend:
         m = re.match(r"^log-(\d+)-(\d+)$", name)
         if m: return (int(m.group(1)), int(m.group(2)))
 
+def retry_wrap(method):
+    def wrapped(self, *args, **kwargs):
+        for retries in range(3):
+            try:
+                return method(self, *args, **kwargs)
+            except:
+                print >>sys.stderr, "S3 operation failed, retrying..."
+                self.connect()
+        return method(self, *args, **kwargs)
+    return wrapped
+
+class S3Backend:
+    """An interface to BlueSky where the log segments are on in Amazon S3."""
+
+    def __init__(self, bucket, path='', cachedir="."):
+        self.bucket_name = bucket
+        self.path = path
+        self.cachedir = cachedir
+        self.cache = {}
+        self.connect()
+
+    def connect(self):
+        self.conn = boto.connect_s3(is_secure=False)
+        self.bucket = self.conn.get_bucket(self.bucket_name)
+
+    def list(self):
+        files = []
+        for k in self.bucket.list(self.path + 'log-'):
+            files.append((k.key, k.size))
+        return files
+
+    @retry_wrap
+    def read(self, filename, offset=0, length=None):
+        if filename in self.cache:
+            fp = open(os.path.join(self.cachedir, filename), 'rb')
+            if offset > 0:
+                fp.seek(offset)
+            if length is None:
+                return fp.read()
+            else:
+                return fp.read(length)
+        else:
+            k = Key(self.bucket)
+            k.key = self.path + filename
+            data = k.get_contents_as_string()
+            fp = open(os.path.join(self.cachedir, filename), 'wb')
+            fp.write(data)
+            fp.close()
+            self.cache[filename] = True
+            if offset > 0:
+                data = data[offset:]
+            if length is not None:
+                data = data[0:length]
+            return data
+
+    @retry_wrap
+    def write(self, filename, data):
+        k = Key(self.bucket)
+        k.key = self.path + filename
+        k.set_contents_from_string(data)
+        if filename in self.cache:
+            del self.cache[filename]
+
+    @retry_wrap
+    def delete(self, filename):
+        k = Key(self.bucket)
+        k.key = self.path + filename
+        k.delete()
+        if filename in self.cache:
+            del self.cache[filename]
+
+    def loc_to_name(self, location):
+        return "log-%08d-%08d" % (location)
+
+    def name_to_loc(self, name):
+        m = re.match(r"^log-(\d+)-(\d+)$", name)
+        if m: return (int(m.group(1)), int(m.group(2)))
+
 class LogItem:
     """In-memory representation of a single item stored in a log file."""
 
+    def __init__(self):
+        self.cryptkeys = '\0' * HEADER_CRYPTBYTES
+        self.encrypted = False
+
     def __str__(self):
-        return "<Item ty=%s location=%s size=%d id=%s...>" % (self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8])
+        return "<Item%s ty=%s location=%s size=%d id=%s...>" % (self.encrypted and '$' or '', self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8])
 
     @staticmethod
     def random_id():
@@ -79,8 +169,13 @@ class LogItem:
         link_ids = ''.join(link_ids)
         link_locs = ''.join(link_locs)
 
+        if self.encrypted:
+            magic = HEADER_MAGIC2
+        else:
+            magic = HEADER_MAGIC1
         header = struct.pack(HEADER_FORMAT,
-                             HEADER_MAGIC, ord(self.type), self.id, self.inum,
+                             magic, self.cryptkeys,
+                             ord(self.type), self.id, self.inum,
                              len(self.data), len(link_ids), len(link_locs))
         return header + self.data + link_ids + link_locs
 
@@ -159,28 +254,30 @@ class UtilizationTracker:
 def parse_item(data):
     if len(data) < HEADER_SIZE: return
     header = struct.unpack_from(HEADER_FORMAT, data, 0)
-    size = HEADER_SIZE + sum(header[4:7])
+    size = HEADER_SIZE + sum(header[5:8])
 
-    if header[0] != HEADER_MAGIC:
+    if header[0] not in (HEADER_MAGIC1, HEADER_MAGIC2):
         print "Bad header magic!"
         return
 
     if len(data) != size:
-        print "Item size does not match!"
+        print "Item size does not match: %d != %d" % (size, len(data))
         return
 
     item = LogItem()
-    item.id = header[2]
-    item.inum = header[3]
+    if header[0] == HEADER_MAGIC2: item.encrypted = True
+    item.cryptkeys = header[1]
+    item.id = header[3]
+    item.inum = header[4]
     item.location = None
-    item.type = chr(header[1])
+    item.type = chr(header[2])
     item.size = size
-    item.data = data[HEADER_SIZE : HEADER_SIZE + header[4]]
+    item.data = data[HEADER_SIZE : HEADER_SIZE + header[5]]
     links = []
-    link_ids = data[HEADER_SIZE + header[4]
-                    : HEADER_SIZE + header[4] + header[5]]
-    link_locs = data[HEADER_SIZE + header[4] + header[5]
-                     : HEADER_SIZE + sum(header[4:7])]
+    link_ids = data[HEADER_SIZE + header[5]
+                    : HEADER_SIZE + header[5] + header[6]]
+    link_locs = data[HEADER_SIZE + header[5] + header[6]
+                     : HEADER_SIZE + sum(header[5:8])]
     for i in range(len(link_ids) // 16):
         id = link_ids[16*i : 16*i + 16]
         if id == '\0' * 16:
@@ -198,7 +295,7 @@ def load_item(backend, location):
     The elements of the tuple are (directory, sequence, offset, size)."""
 
     filename = backend.loc_to_name((location[0], location[1]))
-    data = backend.read(filename)[location[2] : location[2] + location[3]]
+    data = backend.read(filename, location[2], location[3])
     item = parse_item(data)
     item.location = location
     return item
@@ -216,8 +313,8 @@ def parse_log(data, location=None):
     offset = 0
     while len(data) - offset >= HEADER_SIZE:
         header = struct.unpack_from(HEADER_FORMAT, data, offset)
-        size = HEADER_SIZE + sum(header[4:7])
-        if header[0] != HEADER_MAGIC:
+        size = HEADER_SIZE + sum(header[5:8])
+        if header[0] not in (HEADER_MAGIC1, HEADER_MAGIC2):
             print "Bad header magic!"
             break
         if size + offset > len(data):
@@ -232,6 +329,7 @@ def parse_log(data, location=None):
 def load_checkpoint_record(backend):
     for (log, size) in reversed(backend.list()):
         for item in reversed(list(parse_log(backend.read(log), log))):
+            print item
             if item.type == ITEM_TYPE.CHECKPOINT:
                 return item
 
@@ -342,11 +440,11 @@ def rewrite_inode(backend, inode_map, inum, log, copy_data=True):
     log.write(inode, 1)
     inode_map.mark_updated(inum)
 
-def run_cleaner(backend, inode_map, log):
+def run_cleaner(backend, inode_map, log, repack_inodes=False):
     # Determine which segments are poorly utilized and should be cleaned.  We
     # need better heuristics here.
     for (s, u) in sorted(inode_map.util.segments.items()):
-        if float(u[1]) / u[0] < 0.6 and u[1] > 0:
+        if (float(u[1]) / u[0] < 0.6 or u[1] < 32768) and u[1] > 0:
             print "Should clean segment", s
             loc = backend.name_to_loc(s)
             if s: inode_map.obsolete_segments.add(loc)
@@ -357,7 +455,10 @@ def run_cleaner(backend, inode_map, log):
     # Given that list of segments to clean, scan through those segments to find
     # data which is still live and mark relevant inodes as needing to be
     # rewritten.
-    dirty_inodes = set()
+    if repack_inodes:
+        dirty_inodes = set(inode_map.inodes)
+    else:
+        dirty_inodes = set()
     dirty_inode_data = set()
     for s in inode_map.obsolete_segments:
         filename = backend.loc_to_name(s)
@@ -380,8 +481,9 @@ def run_cleaner(backend, inode_map, log):
         rewrite_inode(backend, inode_map, i, log, i in dirty_inode_data)
 
 if __name__ == '__main__':
-    backend = FileBackend(".")
+    backend = S3Backend("mvrable-bluesky", cachedir=".")
     chkpt = load_checkpoint_record(backend)
+    print backend.list()
     imap = InodeMap()
     imap.build(backend, chkpt)
     print chkpt