Add S3 backend for the cleaner.
[bluesky.git] / cleaner / cleaner
index 42aef01..4267b6e 100755 (executable)
@@ -10,6 +10,7 @@
 
 import base64, os, re, struct, sys
 import boto
+from boto.s3.key import Key
 
 # The BlueSky 'struct cloudlog_header' data type.
 HEADER_FORMAT = '<4sb16sQIII'
@@ -49,6 +50,52 @@ class FileBackend:
         fp.write(data)
         fp.close()
 
+    def delete(self, filename):
+        os.unlink(os.path.join(self.path, filename))
+
+    def loc_to_name(self, location):
+        return "log-%08d-%08d" % (location)
+
+    def name_to_loc(self, name):
+        m = re.match(r"^log-(\d+)-(\d+)$", name)
+        if m: return (int(m.group(1)), int(m.group(2)))
+
+class S3Backend:
+    """An interface to BlueSky where the log segments are on in Amazon S3."""
+
+    def __init__(self, bucket, path='', cachedir=None):
+        self.conn = boto.connect_s3(is_secure=False)
+        self.bucket = self.conn.get_bucket(bucket)
+        self.path = path
+
+    def list(self):
+        files = []
+        for k in self.bucket.list(self.path + 'log-'):
+            files.append((k.key, k.size))
+        return files
+
+    def read(self, filename):
+        k = Key(self.bucket)
+        k.key = self.path + filename
+        return k.get_contents_as_string()
+
+    def write(self, filename, data):
+        k = Key(self.bucket)
+        k.key = self.path + filename
+        k.set_contents_from_string(data)
+
+    def delete(self, filename):
+        k = Key(self.bucket)
+        k.key = self.path + filename
+        k.delete()
+
+    def loc_to_name(self, location):
+        return "log-%08d-%08d" % (location)
+
+    def name_to_loc(self, name):
+        m = re.match(r"^log-(\d+)-(\d+)$", name)
+        if m: return (int(m.group(1)), int(m.group(2)))
+
 class LogItem:
     """In-memory representation of a single item stored in a log file."""
 
@@ -91,18 +138,23 @@ class LogSegment:
 
     def close(self):
         data = ''.join(self.data)
-        filename = "log-%08d-%08d" % (self.location)
+        filename = self.backend.loc_to_name(self.location)
         print "Would write %d bytes of data to %s" % (len(data), filename)
         self.backend.write(filename, data)
 
 class LogDirectory:
     TARGET_SIZE = 4 << 20
 
-    def __init__(self, backend, dir, seq):
+    def __init__(self, backend, dir):
         self.backend = backend
         self.dir_num = dir
-        self.seq_num = seq
+        self.seq_num = 0
+        for logname in backend.list():
+            loc = backend.name_to_loc(logname[0])
+            if loc is not None and loc[0] == dir:
+                self.seq_num = max(self.seq_num, loc[1] + 1)
         self.groups = {}
+        print "Starting sequence number is", self.seq_num
 
     def open_segment(self):
         seg = LogSegment(self.backend, (self.dir_num, self.seq_num))
@@ -182,7 +234,7 @@ def load_item(backend, location):
 
     The elements of the tuple are (directory, sequence, offset, size)."""
 
-    filename = "log-%08d-%08d" % (location[0], location[1])
+    filename = backend.loc_to_name((location[0], location[1]))
     data = backend.read(filename)[location[2] : location[2] + location[3]]
     item = parse_item(data)
     item.location = location
@@ -217,6 +269,7 @@ def parse_log(data, location=None):
 def load_checkpoint_record(backend):
     for (log, size) in reversed(backend.list()):
         for item in reversed(list(parse_log(backend.read(log), log))):
+            print item
             if item.type == ITEM_TYPE.CHECKPOINT:
                 return item
 
@@ -256,8 +309,10 @@ class InodeMap:
         print
         print "Segment utilizations:"
         for (s, u) in sorted(util.segments.items()):
-            #if u[1] > 0:
             print "%s: %s %s" % (s, u, float(u[1]) / u[0])
+            if u[1] == 0:
+                print "Deleting..."
+                backend.delete(s)
 
         self.inodes = inodes
         self.util = util
@@ -313,14 +368,15 @@ class InodeMap:
         log.write(new_checkpoint, 2)
         self.checkpoint_record = new_checkpoint
 
-def rewrite_inode(backend, inode_map, inum, log):
+def rewrite_inode(backend, inode_map, inum, log, copy_data=True):
     inode = inode_map.inodes[inum]
-    blocks = []
-    for l in inode.links:
-        data = load_item(backend, l[1])
-        blocks.append(data)
-        log.write(data, 0)
-    inode.links = [(b.id, b.location) for b in blocks]
+    if copy_data:
+        blocks = []
+        for l in inode.links:
+            data = load_item(backend, l[1])
+            blocks.append(data)
+            log.write(data, 0)
+        inode.links = [(b.id, b.location) for b in blocks]
     log.write(inode, 1)
     inode_map.mark_updated(inum)
 
@@ -328,35 +384,48 @@ def run_cleaner(backend, inode_map, log):
     # Determine which segments are poorly utilized and should be cleaned.  We
     # need better heuristics here.
     for (s, u) in sorted(inode_map.util.segments.items()):
-        if float(u[1]) / u[0] < 0.99 and u[1] > 0:
+        if (float(u[1]) / u[0] < 0.6 or u[1] < 32768) and u[1] > 0:
             print "Should clean segment", s
-            m = re.match(r"^log-(\d+)-(\d+)$", s)
-            if m: inode_map.obsolete_segments.add((int(m.group(1)), int(m.group(2))))
+            loc = backend.name_to_loc(s)
+            if s: inode_map.obsolete_segments.add(loc)
+
+    # TODO: We probably also want heuristics that will find inodes with
+    # badly-fragmented data and rewrite that to achieve better locality.
 
     # Given that list of segments to clean, scan through those segments to find
     # data which is still live and mark relevant inodes as needing to be
     # rewritten.
     dirty_inodes = set()
+    dirty_inode_data = set()
     for s in inode_map.obsolete_segments:
-        filename = "log-%08d-%08d" % s
+        filename = backend.loc_to_name(s)
         print "Scanning", filename, "for live data"
         for item in parse_log(backend.read(filename), filename):
             if item.type in (ITEM_TYPE.DATA, ITEM_TYPE.INODE):
                 if item.inum != 0:
-                    dirty_inodes.add(item.inum)
+                    inode = inode_map.inodes[item.inum]
+                    if s == inode.location[0:2]:
+                        dirty_inodes.add(item.inum)
+                    if item.inum not in dirty_inode_data:
+                        for b in inode.links:
+                            if s == b[1][0:2]:
+                                dirty_inode_data.add(item.inum)
+                                break
 
     print "Inodes to rewrite:", dirty_inodes
-    for i in sorted(dirty_inodes):
-        rewrite_inode(backend, inode_map, i, log)
+    print "Inodes with data to rewrite:", dirty_inode_data
+    for i in sorted(dirty_inodes.union(dirty_inode_data)):
+        rewrite_inode(backend, inode_map, i, log, i in dirty_inode_data)
 
 if __name__ == '__main__':
-    backend = FileBackend(".")
+    backend = S3Backend("mvrable-bluesky", cachedir=".")
     chkpt = load_checkpoint_record(backend)
+    print backend.list()
     imap = InodeMap()
     imap.build(backend, chkpt)
     print chkpt
 
-    log_dir = LogDirectory(backend, 1, 0)
+    log_dir = LogDirectory(backend, 0)
     run_cleaner(backend, imap, log_dir)
     imap.write(backend, log_dir)
     log_dir.close_all()