from optparse import OptionParser
import lbs
+# We support up to "LBS Snapshot v0.6" formats, but are also limited by the lbs
+# module.
+FORMAT_VERSION = min(lbs.FORMAT_VERSION, (0, 6))
+
+def check_version(format):
+ ver = lbs.parse_metadata_version(format)
+ if ver > FORMAT_VERSION:
+ raise RuntimeError("Unsupported LBS format: " + format)
+
parser = OptionParser(usage="%prog [option]... command [arg]...")
parser.add_option("-v", action="store_true", dest="verbose", default=False,
help="increase verbosity")
help="specify path to backup data store")
parser.add_option("--localdb", dest="localdb",
help="specify path to local database")
+parser.add_option("--intent", dest="intent", default=1.0,
+ help="give expected next snapshot type when cleaning")
(options, args) = parser.parse_args(sys.argv[1:])
# Read a passphrase from the user and store it in the LBS_GPG_PASSPHRASE
db = lbs.LocalDatabase(options.localdb)
# Delete old snapshots from the local database.
- db.garbage_collect()
- db.commit()
+ #db.garbage_collect()
+ #db.commit()
# Run the segment cleaner.
# Syntax: $0 --localdb=LOCALDB clean
db = lbs.LocalDatabase(options.localdb)
# Delete old snapshots from the local database.
- db.garbage_collect()
+ intent = float(options.intent)
+ for s in db.list_schemes():
+ db.garbage_collect(s, intent)
# Expire segments which are poorly-utilized.
for s in db.get_segment_cleaning_list():
previous = set()
for s in sorted(lowlevel.list_snapshots()):
d = lbs.parse_full(store.load_snapshot(s))
+ check_version(d['Format'])
segments = d['Segments'].split()
(size, added, removed) = (0, 0, 0)
for seg in segments:
store = lbs.ObjectStore(lowlevel)
for s in snapshots:
d = lbs.parse_full(store.load_snapshot(s))
+ check_version(d['Format'])
print d
print d['Segments'].split()
store.cleanup()
+# Produce a flattened metadata dump from a snapshot
+def cmd_read_metadata(snapshot):
+ get_passphrase()
+ lowlevel = lbs.LowlevelDataStore(options.store)
+ store = lbs.ObjectStore(lowlevel)
+ d = lbs.parse_full(store.load_snapshot(snapshot))
+ check_version(d['Format'])
+ metadata = lbs.read_metadata(store, d['Root'])
+ blank = True
+ for l in metadata:
+ if l == '\n':
+ if blank: continue
+ blank = True
+ else:
+ blank = False
+ sys.stdout.write(l)
+ store.cleanup()
+
# Verify snapshot integrity
def cmd_verify_snapshots(snapshots):
get_passphrase()
lowlevel = lbs.LowlevelDataStore(options.store)
store = lbs.ObjectStore(lowlevel)
for s in snapshots:
+ lbs.accessed_segments.clear()
print "#### Snapshot", s
d = lbs.parse_full(store.load_snapshot(s))
+ check_version(d['Format'])
print "## Root:", d['Root']
metadata = lbs.iterate_metadata(store, d['Root'])
for m in metadata:
- if m.fields['type'] != '-': continue
+ if m.fields['type'] not in ('-', 'f'): continue
print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
verifier = lbs.ChecksumVerifier(m.fields['checksum'])
size = 0
raise ValueError("File size does not match!")
if not verifier.valid():
raise ValueError("Bad checksum found")
+
+ # Verify that the list of segments included with the snapshot was
+ # actually accurate: covered all segments that were really read, and
+ # doesn't contain duplicates.
+ listed_segments = set(d['Segments'].split())
+ if lbs.accessed_segments - listed_segments:
+ print "Error: Some segments not listed in descriptor!"
+ print sorted(list(lbs.accessed_segments - listed_segments))
+ if listed_segments - lbs.accessed_segments :
+ print "Warning: Extra unused segments listed in descriptor!"
+ print sorted(list(listed_segments - lbs.accessed_segments))
store.cleanup()
# Restore a snapshot, or some subset of files from it
lowlevel = lbs.LowlevelDataStore(options.store)
store = lbs.ObjectStore(lowlevel)
snapshot = lbs.parse_full(store.load_snapshot(args[0]))
+ check_version(snapshot['Format'])
destdir = args[1]
paths = args[2:]
+ def matchpath(path):
+ "Return true if the specified path should be included in the restore."
+
+ # No specification of what to restore => restore everything
+ if len(paths) == 0: return True
+
+ for p in paths:
+ if path == p: return True
+ if path.startswith(p + "/"): return True
+ return False
+
def warn(m, msg):
print "Warning: %s: %s" % (m.items.name, msg)
+ # Phase 1: Read the complete metadata log and create directory structure.
+ metadata_items = []
+ metadata_paths = {}
+ metadata_segments = {}
for m in lbs.iterate_metadata(store, snapshot['Root']):
pathname = os.path.normpath(m.items.name)
while os.path.isabs(pathname):
pathname = pathname[1:]
- print pathname
+ if not matchpath(pathname): continue
+
+ destpath = os.path.join(destdir, pathname)
+ if m.items.type == 'd':
+ path = destpath
+ else:
+ (path, filename) = os.path.split(destpath)
+
+ metadata_items.append((pathname, m))
+ if m.items.type in ('-', 'f'):
+ metadata_paths[pathname] = m
+ for block in m.data():
+ (segment, object, checksum, slice) \
+ = lbs.ObjectStore.parse_ref(block)
+ if segment not in metadata_segments:
+ metadata_segments[segment] = set()
+ metadata_segments[segment].add(pathname)
+
+ try:
+ if not os.path.isdir(path):
+ print "mkdir:", path
+ os.makedirs(path)
+ except Exception, e:
+ warn(m, "Error creating directory structure: %s" % (e,))
+ continue
+
+ # Phase 2: Restore files, ordered by how data is stored in segments.
+ def restore_file(pathname, m):
+ assert m.items.type in ('-', 'f')
+ print "extract:", pathname
+ destpath = os.path.join(destdir, pathname)
+
+ file = open(destpath, 'wb')
+ verifier = lbs.ChecksumVerifier(m.items.checksum)
+ size = 0
+ for block in m.data():
+ data = store.get(block)
+ verifier.update(data)
+ size += len(data)
+ file.write(data)
+ file.close()
+ if int(m.fields['size']) != size:
+ raise ValueError("File size does not match!")
+ if not verifier.valid():
+ raise ValueError("Bad checksum found")
+
+ while metadata_segments:
+ (segment, items) = metadata_segments.popitem()
+ print "+ Segment", segment
+ for pathname in sorted(items):
+ if pathname in metadata_paths:
+ restore_file(pathname, metadata_paths[pathname])
+ del metadata_paths[pathname]
+
+ print "+ Remaining files"
+ while metadata_paths:
+ (pathname, m) = metadata_paths.popitem()
+ restore_file(pathname, m)
+
+ # Phase 3: Restore special files (symlinks, devices).
+ # Phase 4: Restore directory permissions and modification times.
+ for (pathname, m) in reversed(metadata_items):
+ print "permissions:", pathname
destpath = os.path.join(destdir, pathname)
(path, filename) = os.path.split(destpath)
# symlinks pointing outside?
try:
- if not os.path.isdir(path):
- os.makedirs(path)
-
- if m.items.type == '-':
- file = open(destpath, 'wb')
- verifier = lbs.ChecksumVerifier(m.items.checksum)
- size = 0
- for block in m.data():
- data = store.get(block)
- verifier.update(data)
- size += len(data)
- file.write(data)
- file.close()
- if int(m.fields['size']) != size:
- raise ValueError("File size does not match!")
- if not verifier.valid():
- raise ValueError("Bad checksum found")
- elif m.items.type == 'd':
- if filename != '.':
- os.mkdir(destpath)
+ if m.items.type in ('-', 'f', 'd'):
+ pass
elif m.items.type == 'l':
- os.symlink(m.items.contents, destpath)
+ try:
+ target = m.items.target
+ except:
+ # Old (v0.2 format) name for 'target'
+ target = m.items.contents
+ os.symlink(target, destpath)
elif m.items.type == 'p':
os.mkfifo(destpath)
elif m.items.type in ('c', 'b'):
cmd_object_checksums(args)
elif cmd == 'read-snapshots':
cmd_read_snapshots(args)
+elif cmd == 'read-metadata':
+ cmd_read_metadata(args[0])
elif cmd == 'list-snapshot-sizes':
cmd_list_snapshot_sizes()
elif cmd == 'verify-snapshots':