X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=cleaner%2Fcleaner;h=58292e4ede17693a045151ce680310c22ba223c9;hb=bdb05ff1ad95ab25e13934f66c83452ef00119fe;hp=336537da3b50c7bacc5539c65a06893de6e02387;hpb=1d2357b567318ccc6172f4b9edbbadbeba713086;p=bluesky.git diff --git a/cleaner/cleaner b/cleaner/cleaner index 336537d..58292e4 100755 --- a/cleaner/cleaner +++ b/cleaner/cleaner @@ -13,7 +13,8 @@ import boto from boto.s3.key import Key # The BlueSky 'struct cloudlog_header' data type. -HEADER_FORMAT = '<4sb16sQIII' +HEADER_FORMAT = '<4s48sb16sQIII' +HEADER_CRYPTBYTES = 48 HEADER_MAGIC = 'AgI-' HEADER_SIZE = struct.calcsize(HEADER_FORMAT) @@ -114,6 +115,9 @@ class S3Backend: class LogItem: """In-memory representation of a single item stored in a log file.""" + def __init__(self): + self.cryptkeys = '\0' * HEADER_CRYPTBYTES + def __str__(self): return "" % (self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8]) @@ -132,7 +136,8 @@ class LogItem: link_locs = ''.join(link_locs) header = struct.pack(HEADER_FORMAT, - HEADER_MAGIC, ord(self.type), self.id, self.inum, + HEADER_MAGIC, self.cryptkeys, + ord(self.type), self.id, self.inum, len(self.data), len(link_ids), len(link_locs)) return header + self.data + link_ids + link_locs @@ -211,28 +216,29 @@ class UtilizationTracker: def parse_item(data): if len(data) < HEADER_SIZE: return header = struct.unpack_from(HEADER_FORMAT, data, 0) - size = HEADER_SIZE + sum(header[4:7]) + size = HEADER_SIZE + sum(header[5:8]) if header[0] != HEADER_MAGIC: print "Bad header magic!" return if len(data) != size: - print "Item size does not match!" + print "Item size does not match: %d != %d" % (size, len(data)) return item = LogItem() - item.id = header[2] - item.inum = header[3] + item.cryptkeys = header[1] + item.id = header[3] + item.inum = header[4] item.location = None - item.type = chr(header[1]) + item.type = chr(header[2]) item.size = size - item.data = data[HEADER_SIZE : HEADER_SIZE + header[4]] + item.data = data[HEADER_SIZE : HEADER_SIZE + header[5]] links = [] - link_ids = data[HEADER_SIZE + header[4] - : HEADER_SIZE + header[4] + header[5]] - link_locs = data[HEADER_SIZE + header[4] + header[5] - : HEADER_SIZE + sum(header[4:7])] + link_ids = data[HEADER_SIZE + header[5] + : HEADER_SIZE + header[5] + header[6]] + link_locs = data[HEADER_SIZE + header[5] + header[6] + : HEADER_SIZE + sum(header[5:8])] for i in range(len(link_ids) // 16): id = link_ids[16*i : 16*i + 16] if id == '\0' * 16: @@ -268,7 +274,7 @@ def parse_log(data, location=None): offset = 0 while len(data) - offset >= HEADER_SIZE: header = struct.unpack_from(HEADER_FORMAT, data, offset) - size = HEADER_SIZE + sum(header[4:7]) + size = HEADER_SIZE + sum(header[5:8]) if header[0] != HEADER_MAGIC: print "Bad header magic!" break @@ -395,7 +401,7 @@ def rewrite_inode(backend, inode_map, inum, log, copy_data=True): log.write(inode, 1) inode_map.mark_updated(inum) -def run_cleaner(backend, inode_map, log): +def run_cleaner(backend, inode_map, log, repack_inodes=False): # Determine which segments are poorly utilized and should be cleaned. We # need better heuristics here. for (s, u) in sorted(inode_map.util.segments.items()): @@ -410,7 +416,10 @@ def run_cleaner(backend, inode_map, log): # Given that list of segments to clean, scan through those segments to find # data which is still live and mark relevant inodes as needing to be # rewritten. - dirty_inodes = set() + if repack_inodes: + dirty_inodes = set(inode_map.inodes) + else: + dirty_inodes = set() dirty_inode_data = set() for s in inode_map.obsolete_segments: filename = backend.loc_to_name(s)