fp.write(data)
fp.close()
+ def delete(self, filename):
+ os.unlink(os.path.join(self.path, filename))
+
+ def loc_to_name(self, location):
+ return "log-%08d-%08d" % (location)
+
+ def name_to_loc(self, name):
+ m = re.match(r"^log-(\d+)-(\d+)$", name)
+ if m: return (int(m.group(1)), int(m.group(2)))
+
class LogItem:
"""In-memory representation of a single item stored in a log file."""
def close(self):
data = ''.join(self.data)
- filename = "log-%08d-%08d" % (self.location)
+ filename = self.backend.loc_to_name(self.location)
print "Would write %d bytes of data to %s" % (len(data), filename)
self.backend.write(filename, data)
class LogDirectory:
TARGET_SIZE = 4 << 20
- def __init__(self, backend, dir, seq):
+ def __init__(self, backend, dir):
self.backend = backend
self.dir_num = dir
- self.seq_num = seq
+ self.seq_num = 0
+ for logname in backend.list():
+ loc = backend.name_to_loc(logname[0])
+ if loc is not None and loc[0] == dir:
+ self.seq_num = max(self.seq_num, loc[1] + 1)
self.groups = {}
+ print "Starting sequence number is", self.seq_num
def open_segment(self):
seg = LogSegment(self.backend, (self.dir_num, self.seq_num))
The elements of the tuple are (directory, sequence, offset, size)."""
- filename = "log-%08d-%08d" % (location[0], location[1])
+ filename = backend.loc_to_name((location[0], location[1]))
data = backend.read(filename)[location[2] : location[2] + location[3]]
item = parse_item(data)
item.location = location
util = UtilizationTracker(backend)
util.add_item(checkpoint_record)
inodes = {}
+ self.obsolete_segments = set()
print "Inode map:"
for i in range(len(checkpoint_record.data) // 16):
print
print "Segment utilizations:"
for (s, u) in sorted(util.segments.items()):
- #if u[1] > 0:
print "%s: %s %s" % (s, u, float(u[1]) / u[0])
+ if u[1] == 0:
+ print "Deleting..."
+ backend.delete(s)
self.inodes = inodes
self.util = util
# Case 1: No inodes in this range of the old inode map have
# changed. Simply emit a new pointer to the same inode map block.
- # TODO: Add the ability to rewrite the inode map block if we choose
- # to do so for cleaning, even if no inodes have changed.
if len(updated_inodes) == 0 or updated_inodes[-1] > end:
- new_checkpoint.links.append(self.checkpoint_record.links[i])
- continue
+ old_location = self.checkpoint_record.links[i][1][0:2]
+ if old_location not in self.obsolete_segments:
+ new_checkpoint.links.append(self.checkpoint_record.links[i])
+ continue
# Case 2: Some inodes have been updated. Create a new inode map
# block, write it out, and point the new checkpoint at it.
log.write(new_checkpoint, 2)
self.checkpoint_record = new_checkpoint
-def rewrite_inode(backend, inode_map, inum, log):
+def rewrite_inode(backend, inode_map, inum, log, copy_data=True):
inode = inode_map.inodes[inum]
- blocks = []
- for l in inode.links:
- data = load_item(backend, l[1])
- blocks.append(data)
- log.write(data, 0)
- inode.links = [(b.id, b.location) for b in blocks]
+ if copy_data:
+ blocks = []
+ for l in inode.links:
+ data = load_item(backend, l[1])
+ blocks.append(data)
+ log.write(data, 0)
+ inode.links = [(b.id, b.location) for b in blocks]
log.write(inode, 1)
inode_map.mark_updated(inum)
+def run_cleaner(backend, inode_map, log):
+ # Determine which segments are poorly utilized and should be cleaned. We
+ # need better heuristics here.
+ for (s, u) in sorted(inode_map.util.segments.items()):
+ if float(u[1]) / u[0] < 0.6 and u[1] > 0:
+ print "Should clean segment", s
+ loc = backend.name_to_loc(s)
+ if s: inode_map.obsolete_segments.add(loc)
+
+ # TODO: We probably also want heuristics that will find inodes with
+ # badly-fragmented data and rewrite that to achieve better locality.
+
+ # Given that list of segments to clean, scan through those segments to find
+ # data which is still live and mark relevant inodes as needing to be
+ # rewritten.
+ dirty_inodes = set()
+ dirty_inode_data = set()
+ for s in inode_map.obsolete_segments:
+ filename = backend.loc_to_name(s)
+ print "Scanning", filename, "for live data"
+ for item in parse_log(backend.read(filename), filename):
+ if item.type in (ITEM_TYPE.DATA, ITEM_TYPE.INODE):
+ if item.inum != 0:
+ inode = inode_map.inodes[item.inum]
+ if s == inode.location[0:2]:
+ dirty_inodes.add(item.inum)
+ if item.inum not in dirty_inode_data:
+ for b in inode.links:
+ if s == b[1][0:2]:
+ dirty_inode_data.add(item.inum)
+ break
+
+ print "Inodes to rewrite:", dirty_inodes
+ print "Inodes with data to rewrite:", dirty_inode_data
+ for i in sorted(dirty_inodes.union(dirty_inode_data)):
+ rewrite_inode(backend, inode_map, i, log, i in dirty_inode_data)
+
if __name__ == '__main__':
backend = FileBackend(".")
chkpt = load_checkpoint_record(backend)
imap.build(backend, chkpt)
print chkpt
- print repr(chkpt.serialize())
-
- log_dir = LogDirectory(backend, 1, 0)
- rewrite_inode(backend, imap, 147, log_dir)
+ log_dir = LogDirectory(backend, 0)
+ run_cleaner(backend, imap, log_dir)
imap.write(backend, log_dir)
log_dir.close_all()