3 # Utility for managing Cumulus archives.
5 import getpass, os, stat, sys, time
6 from optparse import OptionParser
8 # Automatically set Python path, based on script directory. This should be
9 # removed if the tools are properly installed somewhere.
10 script_directory = os.path.dirname(sys.argv[0])
11 sys.path.append(os.path.join(script_directory, 'python'))
18 # We support up to "LBS Snapshot v0.8" formats, but are also limited by the lbs
20 FORMAT_VERSION = min(lbs.FORMAT_VERSION, (0, 8))
22 def check_version(format):
23 ver = lbs.parse_metadata_version(format)
24 if ver > FORMAT_VERSION:
25 raise RuntimeError("Unsupported LBS format: " + format)
27 parser = OptionParser(usage="%prog [option]... command [arg]...")
28 parser.add_option("-v", action="store_true", dest="verbose", default=False,
29 help="increase verbosity")
30 parser.add_option("-n", action="store_true", dest="dry_run", default=False,
32 parser.add_option("--store", dest="store",
33 help="specify path to backup data store")
34 parser.add_option("--localdb", dest="localdb",
35 help="specify path to local database")
36 parser.add_option("--intent", dest="intent", default=1.0,
37 help="give expected next snapshot type when cleaning")
38 (options, args) = parser.parse_args(sys.argv[1:])
40 # Read a passphrase from the user and store it in the LBS_GPG_PASSPHRASE
41 # environment variable.
43 ENV_KEY = 'LBS_GPG_PASSPHRASE'
44 if not os.environ.has_key(ENV_KEY):
45 os.environ[ENV_KEY] = getpass.getpass()
47 # Delete old snapshots from the local database, though do not actually schedule
48 # any segment cleaning.
49 # Syntax: $0 --localdb=LOCALDB prune-db
51 db = lbs.LocalDatabase(options.localdb)
53 # Delete old snapshots from the local database.
57 # Run the segment cleaner.
58 # Syntax: $0 --localdb=LOCALDB clean
59 def cmd_clean(clean_threshold=7.0):
60 db = lbs.LocalDatabase(options.localdb)
62 # Delete old snapshots from the local database.
63 intent = float(options.intent)
64 for s in db.list_schemes():
65 db.garbage_collect(s, intent)
67 # Expire segments which are poorly-utilized.
68 for s in db.get_segment_cleaning_list():
69 if s.cleaning_benefit > clean_threshold:
70 print "Cleaning segment %d (benefit %.2f)" % (s.id,
72 db.mark_segment_expired(s)
75 db.balance_expired_objects()
78 # List snapshots stored.
79 # Syntax: $0 --data=DATADIR list-snapshots
80 def cmd_list_snapshots():
81 store = lbs.LowlevelDataStore(options.store)
82 for s in sorted(store.list_snapshots()):
85 # List size of data needed for each snapshot.
86 # Syntax: $0 --data=DATADIR list-snapshot-sizes
87 def cmd_list_snapshot_sizes():
88 lowlevel = lbs.LowlevelDataStore(options.store)
90 store = lbs.ObjectStore(lowlevel)
92 for s in sorted(lowlevel.list_snapshots()):
93 d = lbs.parse_full(store.load_snapshot(s))
94 check_version(d['Format'])
97 intent = float(d['Backup-Intent'])
101 segments = d['Segments'].split()
102 (size, added, removed, addcount, remcount) = (0, 0, 0, 0, 0)
104 segsize = lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
106 if seg not in previous:
110 if seg not in segments:
111 removed += lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
113 previous = set(segments)
114 print "%s [%s]: %.3f +%.3f -%.3f (+%d/-%d segments)" % (s, intent, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2, addcount, remcount)
116 # Search for any files which are not needed by any current snapshots and offer
118 # Syntax: $0 --store=DATADIR gc
119 def cmd_garbage_collect():
120 lowlevel = lbs.LowlevelDataStore(options.store)
122 store = lbs.ObjectStore(lowlevel)
123 snapshots = set(lowlevel.list_snapshots())
126 d = lbs.parse_full(store.load_snapshot(s))
127 check_version(d['Format'])
128 segments.update(d['Segments'].split())
130 referenced = snapshots.union(segments)
132 for (t, r) in cumulus.store.type_patterns.items():
133 for f in lowlevel.store.list(t):
135 if m is None or m.group(1) not in referenced:
136 print "Garbage:", (t, f)
137 reclaimed += lowlevel.store.stat(t, f)['size']
138 if not options.dry_run:
139 lowlevel.store.delete(t, f)
140 print "Reclaimed space:", reclaimed
142 # Build checksum list for objects in the given segments, or all segments if
143 # none are specified.
144 def cmd_object_checksums(segments):
146 lowlevel = lbs.LowlevelDataStore(options.store)
147 store = lbs.ObjectStore(lowlevel)
148 if len(segments) == 0:
149 segments = sorted(lowlevel.list_segments())
151 for (o, data) in store.load_segment(s):
152 csum = lbs.ChecksumCreator().update(data).compute()
153 print "%s/%s:%d:%s" % (s, o, len(data), csum)
156 # Read a snapshot file
157 def cmd_read_snapshots(snapshots):
159 lowlevel = lbs.LowlevelDataStore(options.store)
160 store = lbs.ObjectStore(lowlevel)
162 d = lbs.parse_full(store.load_snapshot(s))
163 check_version(d['Format'])
165 print d['Segments'].split()
168 # Produce a flattened metadata dump from a snapshot
169 def cmd_read_metadata(snapshot):
171 lowlevel = lbs.LowlevelDataStore(options.store)
172 store = lbs.ObjectStore(lowlevel)
173 d = lbs.parse_full(store.load_snapshot(snapshot))
174 check_version(d['Format'])
175 metadata = lbs.read_metadata(store, d['Root'])
186 # Verify snapshot integrity
187 def cmd_verify_snapshots(snapshots):
189 lowlevel = lbs.LowlevelDataStore(options.store)
190 store = lbs.ObjectStore(lowlevel)
192 lbs.accessed_segments.clear()
193 print "#### Snapshot", s
194 d = lbs.parse_full(store.load_snapshot(s))
195 check_version(d['Format'])
196 print "## Root:", d['Root']
197 metadata = lbs.iterate_metadata(store, d['Root'])
199 if m.fields['type'] not in ('-', 'f'): continue
200 print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
201 verifier = lbs.ChecksumVerifier(m.fields['checksum'])
203 for block in m.data():
204 data = store.get(block)
205 verifier.update(data)
207 if int(m.fields['size']) != size:
208 raise ValueError("File size does not match!")
209 if not verifier.valid():
210 raise ValueError("Bad checksum found")
212 # Verify that the list of segments included with the snapshot was
213 # actually accurate: covered all segments that were really read, and
214 # doesn't contain duplicates.
215 listed_segments = set(d['Segments'].split())
216 if lbs.accessed_segments - listed_segments:
217 print "Error: Some segments not listed in descriptor!"
218 print sorted(list(lbs.accessed_segments - listed_segments))
219 if listed_segments - lbs.accessed_segments :
220 print "Warning: Extra unused segments listed in descriptor!"
221 print sorted(list(listed_segments - lbs.accessed_segments))
224 # Restore a snapshot, or some subset of files from it
225 def cmd_restore_snapshot(args):
227 lowlevel = lbs.LowlevelDataStore(options.store)
228 store = lbs.ObjectStore(lowlevel)
229 snapshot = lbs.parse_full(store.load_snapshot(args[0]))
230 check_version(snapshot['Format'])
235 "Return true if the specified path should be included in the restore."
237 # No specification of what to restore => restore everything
238 if len(paths) == 0: return True
241 if path == p: return True
242 if path.startswith(p + "/"): return True
246 print "Warning: %s: %s" % (m.items.name, msg)
248 # Phase 1: Read the complete metadata log and create directory structure.
251 metadata_segments = {}
252 for m in lbs.iterate_metadata(store, snapshot['Root']):
253 pathname = os.path.normpath(m.items.name)
254 while os.path.isabs(pathname):
255 pathname = pathname[1:]
256 if not matchpath(pathname): continue
258 destpath = os.path.join(destdir, pathname)
259 if m.items.type == 'd':
262 (path, filename) = os.path.split(destpath)
264 metadata_items.append((pathname, m))
265 if m.items.type in ('-', 'f'):
266 metadata_paths[pathname] = m
267 for block in m.data():
268 (segment, object, checksum, slice) \
269 = lbs.ObjectStore.parse_ref(block)
270 if segment not in metadata_segments:
271 metadata_segments[segment] = set()
272 metadata_segments[segment].add(pathname)
275 if not os.path.isdir(path):
279 warn(m, "Error creating directory structure: %s" % (e,))
282 # Phase 2: Restore files, ordered by how data is stored in segments.
283 def restore_file(pathname, m):
284 assert m.items.type in ('-', 'f')
285 print "extract:", pathname
286 destpath = os.path.join(destdir, pathname)
288 file = open(destpath, 'wb')
289 verifier = lbs.ChecksumVerifier(m.items.checksum)
291 for block in m.data():
292 data = store.get(block)
293 verifier.update(data)
297 if int(m.fields['size']) != size:
298 raise ValueError("File size does not match!")
299 if not verifier.valid():
300 raise ValueError("Bad checksum found")
302 while metadata_segments:
303 (segment, items) = metadata_segments.popitem()
304 print "+ Segment", segment
305 for pathname in sorted(items):
306 if pathname in metadata_paths:
307 restore_file(pathname, metadata_paths[pathname])
308 del metadata_paths[pathname]
310 print "+ Remaining files"
311 while metadata_paths:
312 (pathname, m) = metadata_paths.popitem()
313 restore_file(pathname, m)
315 # Phase 3: Restore special files (symlinks, devices).
316 # Phase 4: Restore directory permissions and modification times.
317 for (pathname, m) in reversed(metadata_items):
318 print "permissions:", pathname
319 destpath = os.path.join(destdir, pathname)
320 (path, filename) = os.path.split(destpath)
322 # TODO: Check for ../../../paths that might attempt to write outside
323 # the destination directory. Maybe also check attempts to follow
324 # symlinks pointing outside?
327 if m.items.type in ('-', 'f', 'd'):
329 elif m.items.type == 'l':
331 target = m.items.target
333 # Old (v0.2 format) name for 'target'
334 target = m.items.contents
335 os.symlink(target, destpath)
336 elif m.items.type == 'p':
338 elif m.items.type in ('c', 'b'):
339 if m.items.type == 'c':
340 mode = 0600 | stat.S_IFCHR
342 mode = 0600 | stat.S_IFBLK
343 os.mknod(destpath, mode, os.makedev(*m.items.device))
344 elif m.items.type == 's':
345 pass # TODO: Implement
347 warn(m, "Unknown type code: " + m.items.type)
351 warn(m, "Error restoring: %s" % (e,))
355 uid = m.items.user[0]
356 gid = m.items.group[0]
357 os.lchown(destpath, uid, gid)
359 warn(m, "Error restoring file ownership: %s" % (e,))
361 if m.items.type == 'l':
365 os.chmod(destpath, m.items.mode)
367 warn(m, "Error restoring file permissions: %s" % (e,))
370 os.utime(destpath, (time.time(), m.items.mtime))
372 warn(m, "Error restoring file timestamps: %s" % (e,))
383 elif cmd == 'prune-db':
385 elif cmd == 'list-snapshots':
387 elif cmd == 'object-sums':
388 cmd_object_checksums(args)
389 elif cmd == 'read-snapshots':
390 cmd_read_snapshots(args)
391 elif cmd == 'read-metadata':
392 cmd_read_metadata(args[0])
393 elif cmd == 'list-snapshot-sizes':
394 cmd_list_snapshot_sizes()
396 cmd_garbage_collect()
397 elif cmd == 'verify-snapshots':
398 cmd_verify_snapshots(args)
399 elif cmd == 'restore-snapshot':
400 cmd_restore_snapshot(args)
402 print "Unknown command:", cmd