From 145bdaa2f6c76953f9abe4ec49aa04fed3aaef20 Mon Sep 17 00:00:00 2001 From: Michael Vrable Date: Wed, 8 Sep 2010 21:03:31 -0700 Subject: [PATCH 1/1] Updates to the Python cleaner prototype. This can now read in the old inode maps, rewrite inode data, and write out an updated inode map/checkpoint. --- cleaner/cleaner | 228 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 193 insertions(+), 35 deletions(-) diff --git a/cleaner/cleaner b/cleaner/cleaner index 7ebcc2b..e249ed2 100755 --- a/cleaner/cleaner +++ b/cleaner/cleaner @@ -8,7 +8,7 @@ # Copyright (C) 2010 The Regents of the University of California # Written by Michael Vrable -import base64, os, struct, sys +import base64, os, re, struct, sys import boto # The BlueSky 'struct cloudlog_header' data type. @@ -44,12 +44,85 @@ class FileBackend: fp = open(os.path.join(self.path, filename), 'rb') return fp.read() + def write(self, filename, data): + fp = open(os.path.join(self.path, filename), 'wb') + fp.write(data) + fp.close() + class LogItem: """In-memory representation of a single item stored in a log file.""" def __str__(self): return "" % (self.type, self.location, self.size, base64.b16encode(self.id).lower()[0:8]) + @staticmethod + def random_id(): + return open('/dev/urandom').read(16) + + def serialize(self): + link_ids = [] + link_locs = [] + for (i, l) in self.links: + link_ids.append(i) + if i != '\0' * 16: + link_locs.append(struct.pack('= LogDirectory.TARGET_SIZE: + seg.close() + del self.groups[segment_group] + + def close_all(self): + for k in list(self.groups.keys()): + self.groups[k].close() + del self.groups[k] + class UtilizationTracker: """A simple object that tracks what fraction of each segment is used. @@ -83,6 +156,7 @@ def parse_item(data): item = LogItem() item.id = header[2] + item.inum = header[3] item.location = None item.type = chr(header[1]) item.size = size @@ -114,14 +188,15 @@ def load_item(backend, location): item.location = location return item -def parse_log(data, logname=None): +def parse_log(data, location=None): """Parse contents of a log file, yielding a sequence of log items.""" - location = None - if logname is not None: - m = re.match(r"^log-(\d+)-(\d+)$", logname) + if isinstance(location, str): + m = re.match(r"^log-(\d+)-(\d+)$", location) if m: location = (int(m.group(1)), int(m.group(2))) + else: + location = None offset = 0 while len(data) - offset >= HEADER_SIZE: @@ -141,40 +216,123 @@ def parse_log(data, logname=None): def load_checkpoint_record(backend): for (log, size) in reversed(backend.list()): - for item in reversed(list(parse_log(backend.read(log)))): + for item in reversed(list(parse_log(backend.read(log), log))): if item.type == ITEM_TYPE.CHECKPOINT: return item -def build_inode_map(backend, checkpoint_record): - """Reconstruct the inode map, starting from the checkpoint record given. - - This will also build up information about segment utilization.""" - - util = UtilizationTracker(backend) - util.add_item(checkpoint_record) - - print "Inode map:" - for i in range(len(checkpoint_record.data) // 16): - (start, end) = struct.unpack_from(" 0: + print "%s: %s %s" % (s, u, float(u[1]) / u[0]) + + self.inodes = inodes + self.util = util + self.updated_inodes = set() + + def mark_updated(self, inum): + self.updated_inodes.add(inum) + + def write(self, backend, log): + updated_inodes = sorted(self.updated_inodes, reverse=True) + + new_checkpoint = LogItem() + new_checkpoint.id = LogItem.random_id() + new_checkpoint.inum = 0 + new_checkpoint.type = ITEM_TYPE.CHECKPOINT + new_checkpoint.data = "" + new_checkpoint.links = [] + + for i in range(len(self.checkpoint_record.data) // 16): + (start, end) = struct.unpack_from(" end: + new_checkpoint.links.append(self.checkpoint_record.links[i]) + continue + + # Case 2: Some inodes have been updated. Create a new inode map + # block, write it out, and point the new checkpoint at it. + inodes = [k for k in self.inodes if k >= start and k <= end] + inodes.sort() + + block = LogItem() + block.id = LogItem.random_id() + block.inum = 0 + block.type = ITEM_TYPE.INODE_MAP + block.links = [] + block.data = "" + for j in inodes: + block.data += struct.pack(" 0 and updated_inodes[-1] <= end: + updated_inodes.pop() + + log.write(new_checkpoint, 2) + self.checkpoint_record = new_checkpoint + +def rewrite_inode(backend, inode_map, inum, log): + inode = inode_map.inodes[inum] + blocks = [] + for l in inode.links: + data = load_item(backend, l[1]) + blocks.append(data) + log.write(data, 0) + inode.links = [(b.id, b.location) for b in blocks] + log.write(inode, 1) + inode_map.mark_updated(inum) if __name__ == '__main__': backend = FileBackend(".") chkpt = load_checkpoint_record(backend) - build_inode_map(backend, chkpt) + imap = InodeMap() + imap.build(backend, chkpt) + print chkpt + + print repr(chkpt.serialize()) + + log_dir = LogDirectory(backend, 1, 0) + rewrite_inode(backend, imap, 147, log_dir) + imap.write(backend, log_dir) + log_dir.close_all() -- 2.20.1