3 # Utility for managing LBS archives.
5 import getpass, os, stat, sys, time
6 from optparse import OptionParser
9 # We support up to "LBS Snapshot v0.6" formats, but are also limited by the lbs
11 FORMAT_VERSION = min(lbs.FORMAT_VERSION, (0, 6))
13 def check_version(format):
14 ver = lbs.parse_metadata_version(format)
15 if ver > FORMAT_VERSION:
16 raise RuntimeError("Unsupported LBS format: " + format)
18 parser = OptionParser(usage="%prog [option]... command [arg]...")
19 parser.add_option("-v", action="store_true", dest="verbose", default=False,
20 help="increase verbosity")
21 parser.add_option("--store", dest="store",
22 help="specify path to backup data store")
23 parser.add_option("--localdb", dest="localdb",
24 help="specify path to local database")
25 (options, args) = parser.parse_args(sys.argv[1:])
27 # Read a passphrase from the user and store it in the LBS_GPG_PASSPHRASE
28 # environment variable.
30 ENV_KEY = 'LBS_GPG_PASSPHRASE'
31 if not os.environ.has_key(ENV_KEY):
32 os.environ[ENV_KEY] = getpass.getpass()
34 # Delete old snapshots from the local database, though do not actually schedule
35 # any segment cleaning.
36 # Syntax: $0 --localdb=LOCALDB prune-db
38 db = lbs.LocalDatabase(options.localdb)
40 # Delete old snapshots from the local database.
44 # Run the segment cleaner.
45 # Syntax: $0 --localdb=LOCALDB clean
46 def cmd_clean(clean_threshold=7.0):
47 db = lbs.LocalDatabase(options.localdb)
49 # Delete old snapshots from the local database.
52 # Expire segments which are poorly-utilized.
53 for s in db.get_segment_cleaning_list():
54 if s.cleaning_benefit > clean_threshold:
55 print "Cleaning segment %d (benefit %.2f)" % (s.id,
57 db.mark_segment_expired(s)
60 db.balance_expired_objects()
63 # List snapshots stored.
64 # Syntax: $0 --data=DATADIR list-snapshots
65 def cmd_list_snapshots():
66 store = lbs.LowlevelDataStore(options.store)
67 for s in sorted(store.list_snapshots()):
70 # List size of data needed for each snapshot.
71 # Syntax: $0 --data=DATADIR list-snapshot-sizes
72 def cmd_list_snapshot_sizes():
73 lowlevel = lbs.LowlevelDataStore(options.store)
74 store = lbs.ObjectStore(lowlevel)
76 for s in sorted(lowlevel.list_snapshots()):
77 d = lbs.parse_full(store.load_snapshot(s))
78 check_version(d['Format'])
79 segments = d['Segments'].split()
80 (size, added, removed) = (0, 0, 0)
82 segsize = lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
84 if seg not in previous: added += segsize
86 if seg not in segments:
87 removed += lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
88 previous = set(segments)
89 print "%s: %.3f +%.3f -%.3f" % (s, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2)
91 # Build checksum list for objects in the given segments, or all segments if
93 def cmd_object_checksums(segments):
95 lowlevel = lbs.LowlevelDataStore(options.store)
96 store = lbs.ObjectStore(lowlevel)
97 if len(segments) == 0:
98 segments = sorted(lowlevel.list_segments())
100 for (o, data) in store.load_segment(s):
101 csum = lbs.ChecksumCreator().update(data).compute()
102 print "%s/%s:%d:%s" % (s, o, len(data), csum)
105 # Read a snapshot file
106 def cmd_read_snapshots(snapshots):
108 lowlevel = lbs.LowlevelDataStore(options.store)
109 store = lbs.ObjectStore(lowlevel)
111 d = lbs.parse_full(store.load_snapshot(s))
112 check_version(d['Format'])
114 print d['Segments'].split()
117 # Produce a flattened metadata dump from a snapshot
118 def cmd_read_metadata(snapshot):
120 lowlevel = lbs.LowlevelDataStore(options.store)
121 store = lbs.ObjectStore(lowlevel)
122 d = lbs.parse_full(store.load_snapshot(snapshot))
123 check_version(d['Format'])
124 metadata = lbs.read_metadata(store, d['Root'])
129 # Verify snapshot integrity
130 def cmd_verify_snapshots(snapshots):
132 lowlevel = lbs.LowlevelDataStore(options.store)
133 store = lbs.ObjectStore(lowlevel)
135 lbs.accessed_segments.clear()
136 print "#### Snapshot", s
137 d = lbs.parse_full(store.load_snapshot(s))
138 check_version(d['Format'])
139 print "## Root:", d['Root']
140 metadata = lbs.iterate_metadata(store, d['Root'])
142 if m.fields['type'] not in ('-', 'f'): continue
143 print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
144 verifier = lbs.ChecksumVerifier(m.fields['checksum'])
146 for block in m.data():
147 data = store.get(block)
148 verifier.update(data)
150 if int(m.fields['size']) != size:
151 raise ValueError("File size does not match!")
152 if not verifier.valid():
153 raise ValueError("Bad checksum found")
155 # Verify that the list of segments included with the snapshot was
156 # actually accurate: covered all segments that were really read, and
157 # doesn't contain duplicates.
158 listed_segments = set(d['Segments'].split())
159 if lbs.accessed_segments - listed_segments:
160 print "Error: Some segments not listed in descriptor!"
161 print sorted(list(lbs.accessed_segments - listed_segments))
162 if listed_segments - lbs.accessed_segments :
163 print "Warning: Extra unused segments listed in descriptor!"
164 print sorted(list(listed_segments - lbs.accessed_segments))
167 # Restore a snapshot, or some subset of files from it
168 def cmd_restore_snapshot(args):
170 lowlevel = lbs.LowlevelDataStore(options.store)
171 store = lbs.ObjectStore(lowlevel)
172 snapshot = lbs.parse_full(store.load_snapshot(args[0]))
173 check_version(snapshot['Format'])
178 print "Warning: %s: %s" % (m.items.name, msg)
180 for m in lbs.iterate_metadata(store, snapshot['Root']):
181 pathname = os.path.normpath(m.items.name)
182 while os.path.isabs(pathname):
183 pathname = pathname[1:]
185 destpath = os.path.join(destdir, pathname)
186 (path, filename) = os.path.split(destpath)
188 # TODO: Check for ../../../paths that might attempt to write outside
189 # the destination directory. Maybe also check attempts to follow
190 # symlinks pointing outside?
193 if not os.path.isdir(path):
196 if m.items.type in ('-', 'f'):
197 file = open(destpath, 'wb')
198 verifier = lbs.ChecksumVerifier(m.items.checksum)
200 for block in m.data():
201 data = store.get(block)
202 verifier.update(data)
206 if int(m.fields['size']) != size:
207 raise ValueError("File size does not match!")
208 if not verifier.valid():
209 raise ValueError("Bad checksum found")
210 elif m.items.type == 'd':
213 elif m.items.type == 'l':
215 target = m.items.target
217 # Old (v0.2 format) name for 'target'
218 target = m.items.contents
219 os.symlink(target, destpath)
220 elif m.items.type == 'p':
222 elif m.items.type in ('c', 'b'):
223 if m.items.type == 'c':
224 mode = 0600 | stat.S_IFCHR
226 mode = 0600 | stat.S_IFBLK
227 os.mknod(destpath, mode, os.makedev(*m.items.device))
228 elif m.items.type == 's':
229 pass # TODO: Implement
231 warn(m, "Unknown type code: " + m.items.type)
235 warn(m, "Error restoring: %s" % (e,))
239 uid = m.items.user[0]
240 gid = m.items.group[0]
241 os.lchown(destpath, uid, gid)
243 warn(m, "Error restoring file ownership: %s" % (e,))
245 if m.items.type == 'l':
249 os.chmod(destpath, m.items.mode)
251 warn(m, "Error restoring file permissions: %s" % (e,))
254 os.utime(destpath, (time.time(), m.items.mtime))
256 warn(m, "Error restoring file timestamps: %s" % (e,))
267 elif cmd == 'prune-db':
269 elif cmd == 'list-snapshots':
271 elif cmd == 'object-sums':
272 cmd_object_checksums(args)
273 elif cmd == 'read-snapshots':
274 cmd_read_snapshots(args)
275 elif cmd == 'read-metadata':
276 cmd_read_metadata(args[0])
277 elif cmd == 'list-snapshot-sizes':
278 cmd_list_snapshot_sizes()
279 elif cmd == 'verify-snapshots':
280 cmd_verify_snapshots(args)
281 elif cmd == 'restore-snapshot':
282 cmd_restore_snapshot(args)
284 print "Unknown command:", cmd