-def build_inode_map(backend, checkpoint_record):
- """Reconstruct the inode map, starting from the checkpoint record given.
-
- This will also build up information about segment utilization."""
-
- util = UtilizationTracker(backend)
- util.add_item(checkpoint_record)
-
- print "Inode map:"
- for i in range(len(checkpoint_record.data) // 16):
- (start, end) = struct.unpack_from("<QQ", checkpoint_record.data, 16*i)
- imap = load_item(backend, checkpoint_record.links[i][1])
- util.add_item(imap)
- print "[%d, %d]: %s" % (start, end, imap)
- for j in range(len(imap.data) // 8):
- (inum,) = struct.unpack_from("<Q", imap.data, 8*j)
- inode = load_item(backend, imap.links[j][1])
- data_segments = set()
- util.add_item(inode)
- for i in inode.links:
- util.add_item(i[1])
- data_segments.add(i[1][0:2])
- print " %d: %s (%d data segments)" % (inum, inode, len(data_segments))
-
- print
- print "Segment utilizations:"
- for (s, u) in sorted(util.segments.items()):
- print "%s: %s" % (s, u)
+class InodeMap:
+ def __init__(self):
+ pass
+
+ def build(self, backend, checkpoint_record):
+ """Reconstruct the inode map from the checkpoint record given.
+
+ This will also build up information about segment utilization."""
+
+ self.checkpoint_record = checkpoint_record
+
+ util = UtilizationTracker(backend)
+ util.add_item(checkpoint_record)
+ inodes = {}
+ self.obsolete_segments = set()
+
+ print "Inode map:"
+ for i in range(len(checkpoint_record.data) // 16):
+ (start, end) = struct.unpack_from("<QQ", checkpoint_record.data, 16*i)
+ imap = load_item(backend, checkpoint_record.links[i][1])
+ util.add_item(imap)
+ print "[%d, %d]: %s" % (start, end, imap)
+ for j in range(len(imap.data) // 8):
+ (inum,) = struct.unpack_from("<Q", imap.data, 8*j)
+ inode = load_item(backend, imap.links[j][1])
+ inodes[inum] = inode
+ data_segments = set()
+ util.add_item(inode)
+ for i in inode.links:
+ util.add_item(i[1])
+ data_segments.add(i[1][0:2])
+ print " %d: %s (%d data segments)" % (inum, inode, len(data_segments))
+
+ print
+ print "Segment utilizations:"
+ for (s, u) in sorted(util.segments.items()):
+ print "%s: %s %s" % (s, u, float(u[1]) / u[0])
+ if u[1] == 0:
+ print "Deleting..."
+ backend.delete(s)
+
+ self.inodes = inodes
+ self.util = util
+ self.updated_inodes = set()
+
+ def mark_updated(self, inum):
+ self.updated_inodes.add(inum)
+
+ def write(self, backend, log):
+ updated_inodes = sorted(self.updated_inodes, reverse=True)
+
+ new_checkpoint = LogItem()
+ new_checkpoint.id = LogItem.random_id()
+ new_checkpoint.inum = 0
+ new_checkpoint.type = ITEM_TYPE.CHECKPOINT
+ new_checkpoint.data = ""
+ new_checkpoint.links = []
+
+ for i in range(len(self.checkpoint_record.data) // 16):
+ (start, end) = struct.unpack_from("<QQ", self.checkpoint_record.data, 16*i)
+
+ new_checkpoint.data += self.checkpoint_record.data[16*i : 16*i + 16]
+
+ # Case 1: No inodes in this range of the old inode map have
+ # changed. Simply emit a new pointer to the same inode map block.
+ if len(updated_inodes) == 0 or updated_inodes[-1] > end:
+ old_location = self.checkpoint_record.links[i][1][0:2]
+ if old_location not in self.obsolete_segments:
+ new_checkpoint.links.append(self.checkpoint_record.links[i])
+ continue
+
+ # Case 2: Some inodes have been updated. Create a new inode map
+ # block, write it out, and point the new checkpoint at it.
+ inodes = [k for k in self.inodes if k >= start and k <= end]
+ inodes.sort()
+
+ block = LogItem()
+ block.id = LogItem.random_id()
+ block.inum = 0
+ block.type = ITEM_TYPE.INODE_MAP
+ block.links = []
+ block.data = ""
+ for j in inodes:
+ block.data += struct.pack("<Q", j)
+ block.links.append((self.inodes[j].id, self.inodes[j].location))
+ log.write(block, 2)
+
+ new_checkpoint.links.append((block.id, block.location))
+
+ while len(updated_inodes) > 0 and updated_inodes[-1] <= end:
+ updated_inodes.pop()
+
+ log.write(new_checkpoint, 2)
+ self.checkpoint_record = new_checkpoint
+
+def rewrite_inode(backend, inode_map, inum, log, copy_data=True):
+ inode = inode_map.inodes[inum]
+ if copy_data:
+ blocks = []
+ for l in inode.links:
+ data = load_item(backend, l[1])
+ blocks.append(data)
+ log.write(data, 0)
+ inode.links = [(b.id, b.location) for b in blocks]
+ log.write(inode, 1)
+ inode_map.mark_updated(inum)
+
+def run_cleaner(backend, inode_map, log, repack_inodes=False):
+ # Determine which segments are poorly utilized and should be cleaned. We
+ # need better heuristics here.
+ for (s, u) in sorted(inode_map.util.segments.items()):
+ if (float(u[1]) / u[0] < 0.6 or u[1] < 32768) and u[1] > 0:
+ print "Should clean segment", s
+ loc = backend.name_to_loc(s)
+ if s: inode_map.obsolete_segments.add(loc)
+
+ # TODO: We probably also want heuristics that will find inodes with
+ # badly-fragmented data and rewrite that to achieve better locality.
+
+ # Given that list of segments to clean, scan through those segments to find
+ # data which is still live and mark relevant inodes as needing to be
+ # rewritten.
+ if repack_inodes:
+ dirty_inodes = set(inode_map.inodes)
+ else:
+ dirty_inodes = set()
+ dirty_inode_data = set()
+ for s in inode_map.obsolete_segments:
+ filename = backend.loc_to_name(s)
+ print "Scanning", filename, "for live data"
+ for item in parse_log(backend.read(filename), filename):
+ if item.type in (ITEM_TYPE.DATA, ITEM_TYPE.INODE):
+ if item.inum != 0:
+ inode = inode_map.inodes[item.inum]
+ if s == inode.location[0:2]:
+ dirty_inodes.add(item.inum)
+ if item.inum not in dirty_inode_data:
+ for b in inode.links:
+ if s == b[1][0:2]:
+ dirty_inode_data.add(item.inum)
+ break
+
+ print "Inodes to rewrite:", dirty_inodes
+ print "Inodes with data to rewrite:", dirty_inode_data
+ for i in sorted(dirty_inodes.union(dirty_inode_data)):
+ rewrite_inode(backend, inode_map, i, log, i in dirty_inode_data)