3 # Utility for managing LBS archives.
5 import getpass, os, stat, sys, time
6 from optparse import OptionParser
9 parser = OptionParser(usage="%prog [option]... command [arg]...")
10 parser.add_option("-v", action="store_true", dest="verbose", default=False,
11 help="increase verbosity")
12 parser.add_option("--store", dest="store",
13 help="specify path to backup data store")
14 parser.add_option("--localdb", dest="localdb",
15 help="specify path to local database")
16 (options, args) = parser.parse_args(sys.argv[1:])
18 # Read a passphrase from the user and store it in the LBS_GPG_PASSPHRASE
19 # environment variable.
21 ENV_KEY = 'LBS_GPG_PASSPHRASE'
22 if not os.environ.has_key(ENV_KEY):
23 os.environ[ENV_KEY] = getpass.getpass()
25 # Delete old snapshots from the local database, though do not actually schedule
26 # any segment cleaning.
27 # Syntax: $0 --localdb=LOCALDB prune-db
29 db = lbs.LocalDatabase(options.localdb)
31 # Delete old snapshots from the local database.
35 # Run the segment cleaner.
36 # Syntax: $0 --localdb=LOCALDB clean
37 def cmd_clean(clean_threshold=7.0):
38 db = lbs.LocalDatabase(options.localdb)
40 # Delete old snapshots from the local database.
43 # Expire segments which are poorly-utilized.
44 for s in db.get_segment_cleaning_list():
45 if s.cleaning_benefit > clean_threshold:
46 print "Cleaning segment %d (benefit %.2f)" % (s.id,
48 db.mark_segment_expired(s)
51 db.balance_expired_objects()
54 # List snapshots stored.
55 # Syntax: $0 --data=DATADIR list-snapshots
56 def cmd_list_snapshots():
57 store = lbs.LowlevelDataStore(options.store)
58 for s in sorted(store.list_snapshots()):
61 # List size of data needed for each snapshot.
62 # Syntax: $0 --data=DATADIR list-snapshot-sizes
63 def cmd_list_snapshot_sizes():
64 lowlevel = lbs.LowlevelDataStore(options.store)
65 store = lbs.ObjectStore(lowlevel)
67 for s in sorted(lowlevel.list_snapshots()):
68 d = lbs.parse_full(store.load_snapshot(s))
69 segments = d['Segments'].split()
70 (size, added, removed) = (0, 0, 0)
72 segsize = lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
74 if seg not in previous: added += segsize
76 if seg not in segments:
77 removed += lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
78 previous = set(segments)
79 print "%s: %.3f +%.3f -%.3f" % (s, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2)
81 # Build checksum list for objects in the given segments, or all segments if
83 def cmd_object_checksums(segments):
85 lowlevel = lbs.LowlevelDataStore(options.store)
86 store = lbs.ObjectStore(lowlevel)
87 if len(segments) == 0:
88 segments = sorted(lowlevel.list_segments())
90 for (o, data) in store.load_segment(s):
91 csum = lbs.ChecksumCreator().update(data).compute()
92 print "%s/%s:%d:%s" % (s, o, len(data), csum)
95 # Read a snapshot file
96 def cmd_read_snapshots(snapshots):
98 lowlevel = lbs.LowlevelDataStore(options.store)
99 store = lbs.ObjectStore(lowlevel)
101 d = lbs.parse_full(store.load_snapshot(s))
103 print d['Segments'].split()
106 # Verify snapshot integrity
107 def cmd_verify_snapshots(snapshots):
109 lowlevel = lbs.LowlevelDataStore(options.store)
110 store = lbs.ObjectStore(lowlevel)
112 print "#### Snapshot", s
113 d = lbs.parse_full(store.load_snapshot(s))
114 print "## Root:", d['Root']
115 metadata = lbs.iterate_metadata(store, d['Root'])
117 if m.fields['type'] != '-': continue
118 print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
119 verifier = lbs.ChecksumVerifier(m.fields['checksum'])
121 for block in m.data():
122 data = store.get(block)
123 verifier.update(data)
125 if int(m.fields['size']) != size:
126 raise ValueError("File size does not match!")
127 if not verifier.valid():
128 raise ValueError("Bad checksum found")
131 # Restore a snapshot, or some subset of files from it
132 def cmd_restore_snapshot(args):
134 lowlevel = lbs.LowlevelDataStore(options.store)
135 store = lbs.ObjectStore(lowlevel)
136 snapshot = lbs.parse_full(store.load_snapshot(args[0]))
141 print "Warning: %s: %s" % (m.items.name, msg)
143 for m in lbs.iterate_metadata(store, snapshot['Root']):
144 pathname = os.path.normpath(m.items.name)
145 while os.path.isabs(pathname):
146 pathname = pathname[1:]
148 destpath = os.path.join(destdir, pathname)
149 (path, filename) = os.path.split(destpath)
151 # TODO: Check for ../../../paths that might attempt to write outside
152 # the destination directory. Maybe also check attempts to follow
153 # symlinks pointing outside?
156 if not os.path.isdir(path):
159 if m.items.type == '-':
160 file = open(destpath, 'wb')
161 verifier = lbs.ChecksumVerifier(m.items.checksum)
163 for block in m.data():
164 data = store.get(block)
165 verifier.update(data)
169 if int(m.fields['size']) != size:
170 raise ValueError("File size does not match!")
171 if not verifier.valid():
172 raise ValueError("Bad checksum found")
173 elif m.items.type == 'd':
176 elif m.items.type == 'l':
177 os.symlink(m.items.contents, destpath)
178 elif m.items.type == 'p':
180 elif m.items.type in ('c', 'b'):
181 if m.items.type == 'c':
182 mode = 0600 | stat.S_IFCHR
184 mode = 0600 | stat.S_IFBLK
185 os.mknod(destpath, mode, os.makedev(*m.items.device))
186 elif m.items.type == 's':
187 pass # TODO: Implement
189 warn(m, "Unknown type code: " + m.items.type)
193 warn(m, "Error restoring: %s" % (e,))
197 uid = m.items.user[0]
198 gid = m.items.group[0]
199 os.lchown(destpath, uid, gid)
201 warn(m, "Error restoring file ownership: %s" % (e,))
203 if m.items.type == 'l':
207 os.chmod(destpath, m.items.mode)
209 warn(m, "Error restoring file permissions: %s" % (e,))
212 os.utime(destpath, (time.time(), m.items.mtime))
214 warn(m, "Error restoring file timestamps: %s" % (e,))
225 elif cmd == 'prune-db':
227 elif cmd == 'list-snapshots':
229 elif cmd == 'object-sums':
230 cmd_object_checksums(args)
231 elif cmd == 'read-snapshots':
232 cmd_read_snapshots(args)
233 elif cmd == 'list-snapshot-sizes':
234 cmd_list_snapshot_sizes()
235 elif cmd == 'verify-snapshots':
236 cmd_verify_snapshots(args)
237 elif cmd == 'restore-snapshot':
238 cmd_restore_snapshot(args)
240 print "Unknown command:", cmd