3 # Utility for managing Cumulus archives.
5 import getpass, os, stat, sys, time
6 from optparse import OptionParser
9 # We support up to "LBS Snapshot v0.6" formats, but are also limited by the lbs
11 FORMAT_VERSION = min(lbs.FORMAT_VERSION, (0, 6))
13 def check_version(format):
14 ver = lbs.parse_metadata_version(format)
15 if ver > FORMAT_VERSION:
16 raise RuntimeError("Unsupported LBS format: " + format)
18 parser = OptionParser(usage="%prog [option]... command [arg]...")
19 parser.add_option("-v", action="store_true", dest="verbose", default=False,
20 help="increase verbosity")
21 parser.add_option("--store", dest="store",
22 help="specify path to backup data store")
23 parser.add_option("--localdb", dest="localdb",
24 help="specify path to local database")
25 parser.add_option("--intent", dest="intent", default=1.0,
26 help="give expected next snapshot type when cleaning")
27 (options, args) = parser.parse_args(sys.argv[1:])
29 # Read a passphrase from the user and store it in the LBS_GPG_PASSPHRASE
30 # environment variable.
32 ENV_KEY = 'LBS_GPG_PASSPHRASE'
33 if not os.environ.has_key(ENV_KEY):
34 os.environ[ENV_KEY] = getpass.getpass()
36 # Delete old snapshots from the local database, though do not actually schedule
37 # any segment cleaning.
38 # Syntax: $0 --localdb=LOCALDB prune-db
40 db = lbs.LocalDatabase(options.localdb)
42 # Delete old snapshots from the local database.
46 # Run the segment cleaner.
47 # Syntax: $0 --localdb=LOCALDB clean
48 def cmd_clean(clean_threshold=7.0):
49 db = lbs.LocalDatabase(options.localdb)
51 # Delete old snapshots from the local database.
52 intent = float(options.intent)
53 for s in db.list_schemes():
54 db.garbage_collect(s, intent)
56 # Expire segments which are poorly-utilized.
57 for s in db.get_segment_cleaning_list():
58 if s.cleaning_benefit > clean_threshold:
59 print "Cleaning segment %d (benefit %.2f)" % (s.id,
61 db.mark_segment_expired(s)
64 db.balance_expired_objects()
67 # List snapshots stored.
68 # Syntax: $0 --data=DATADIR list-snapshots
69 def cmd_list_snapshots():
70 store = lbs.LowlevelDataStore(options.store)
71 for s in sorted(store.list_snapshots()):
74 # List size of data needed for each snapshot.
75 # Syntax: $0 --data=DATADIR list-snapshot-sizes
76 def cmd_list_snapshot_sizes():
77 lowlevel = lbs.LowlevelDataStore(options.store)
78 store = lbs.ObjectStore(lowlevel)
80 for s in sorted(lowlevel.list_snapshots()):
81 d = lbs.parse_full(store.load_snapshot(s))
82 check_version(d['Format'])
83 segments = d['Segments'].split()
84 (size, added, removed) = (0, 0, 0)
86 segsize = lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
88 if seg not in previous: added += segsize
90 if seg not in segments:
91 removed += lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
92 previous = set(segments)
93 print "%s: %.3f +%.3f -%.3f" % (s, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2)
95 # Build checksum list for objects in the given segments, or all segments if
97 def cmd_object_checksums(segments):
99 lowlevel = lbs.LowlevelDataStore(options.store)
100 store = lbs.ObjectStore(lowlevel)
101 if len(segments) == 0:
102 segments = sorted(lowlevel.list_segments())
104 for (o, data) in store.load_segment(s):
105 csum = lbs.ChecksumCreator().update(data).compute()
106 print "%s/%s:%d:%s" % (s, o, len(data), csum)
109 # Read a snapshot file
110 def cmd_read_snapshots(snapshots):
112 lowlevel = lbs.LowlevelDataStore(options.store)
113 store = lbs.ObjectStore(lowlevel)
115 d = lbs.parse_full(store.load_snapshot(s))
116 check_version(d['Format'])
118 print d['Segments'].split()
121 # Produce a flattened metadata dump from a snapshot
122 def cmd_read_metadata(snapshot):
124 lowlevel = lbs.LowlevelDataStore(options.store)
125 store = lbs.ObjectStore(lowlevel)
126 d = lbs.parse_full(store.load_snapshot(snapshot))
127 check_version(d['Format'])
128 metadata = lbs.read_metadata(store, d['Root'])
139 # Verify snapshot integrity
140 def cmd_verify_snapshots(snapshots):
142 lowlevel = lbs.LowlevelDataStore(options.store)
143 store = lbs.ObjectStore(lowlevel)
145 lbs.accessed_segments.clear()
146 print "#### Snapshot", s
147 d = lbs.parse_full(store.load_snapshot(s))
148 check_version(d['Format'])
149 print "## Root:", d['Root']
150 metadata = lbs.iterate_metadata(store, d['Root'])
152 if m.fields['type'] not in ('-', 'f'): continue
153 print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
154 verifier = lbs.ChecksumVerifier(m.fields['checksum'])
156 for block in m.data():
157 data = store.get(block)
158 verifier.update(data)
160 if int(m.fields['size']) != size:
161 raise ValueError("File size does not match!")
162 if not verifier.valid():
163 raise ValueError("Bad checksum found")
165 # Verify that the list of segments included with the snapshot was
166 # actually accurate: covered all segments that were really read, and
167 # doesn't contain duplicates.
168 listed_segments = set(d['Segments'].split())
169 if lbs.accessed_segments - listed_segments:
170 print "Error: Some segments not listed in descriptor!"
171 print sorted(list(lbs.accessed_segments - listed_segments))
172 if listed_segments - lbs.accessed_segments :
173 print "Warning: Extra unused segments listed in descriptor!"
174 print sorted(list(listed_segments - lbs.accessed_segments))
177 # Restore a snapshot, or some subset of files from it
178 def cmd_restore_snapshot(args):
180 lowlevel = lbs.LowlevelDataStore(options.store)
181 store = lbs.ObjectStore(lowlevel)
182 snapshot = lbs.parse_full(store.load_snapshot(args[0]))
183 check_version(snapshot['Format'])
188 "Return true if the specified path should be included in the restore."
190 # No specification of what to restore => restore everything
191 if len(paths) == 0: return True
194 if path == p: return True
195 if path.startswith(p + "/"): return True
199 print "Warning: %s: %s" % (m.items.name, msg)
201 # Phase 1: Read the complete metadata log and create directory structure.
204 metadata_segments = {}
205 for m in lbs.iterate_metadata(store, snapshot['Root']):
206 pathname = os.path.normpath(m.items.name)
207 while os.path.isabs(pathname):
208 pathname = pathname[1:]
209 if not matchpath(pathname): continue
211 destpath = os.path.join(destdir, pathname)
212 if m.items.type == 'd':
215 (path, filename) = os.path.split(destpath)
217 metadata_items.append((pathname, m))
218 if m.items.type in ('-', 'f'):
219 metadata_paths[pathname] = m
220 for block in m.data():
221 (segment, object, checksum, slice) \
222 = lbs.ObjectStore.parse_ref(block)
223 if segment not in metadata_segments:
224 metadata_segments[segment] = set()
225 metadata_segments[segment].add(pathname)
228 if not os.path.isdir(path):
232 warn(m, "Error creating directory structure: %s" % (e,))
235 # Phase 2: Restore files, ordered by how data is stored in segments.
236 def restore_file(pathname, m):
237 assert m.items.type in ('-', 'f')
238 print "extract:", pathname
239 destpath = os.path.join(destdir, pathname)
241 file = open(destpath, 'wb')
242 verifier = lbs.ChecksumVerifier(m.items.checksum)
244 for block in m.data():
245 data = store.get(block)
246 verifier.update(data)
250 if int(m.fields['size']) != size:
251 raise ValueError("File size does not match!")
252 if not verifier.valid():
253 raise ValueError("Bad checksum found")
255 while metadata_segments:
256 (segment, items) = metadata_segments.popitem()
257 print "+ Segment", segment
258 for pathname in sorted(items):
259 if pathname in metadata_paths:
260 restore_file(pathname, metadata_paths[pathname])
261 del metadata_paths[pathname]
263 print "+ Remaining files"
264 while metadata_paths:
265 (pathname, m) = metadata_paths.popitem()
266 restore_file(pathname, m)
268 # Phase 3: Restore special files (symlinks, devices).
269 # Phase 4: Restore directory permissions and modification times.
270 for (pathname, m) in reversed(metadata_items):
271 print "permissions:", pathname
272 destpath = os.path.join(destdir, pathname)
273 (path, filename) = os.path.split(destpath)
275 # TODO: Check for ../../../paths that might attempt to write outside
276 # the destination directory. Maybe also check attempts to follow
277 # symlinks pointing outside?
280 if m.items.type in ('-', 'f', 'd'):
282 elif m.items.type == 'l':
284 target = m.items.target
286 # Old (v0.2 format) name for 'target'
287 target = m.items.contents
288 os.symlink(target, destpath)
289 elif m.items.type == 'p':
291 elif m.items.type in ('c', 'b'):
292 if m.items.type == 'c':
293 mode = 0600 | stat.S_IFCHR
295 mode = 0600 | stat.S_IFBLK
296 os.mknod(destpath, mode, os.makedev(*m.items.device))
297 elif m.items.type == 's':
298 pass # TODO: Implement
300 warn(m, "Unknown type code: " + m.items.type)
304 warn(m, "Error restoring: %s" % (e,))
308 uid = m.items.user[0]
309 gid = m.items.group[0]
310 os.lchown(destpath, uid, gid)
312 warn(m, "Error restoring file ownership: %s" % (e,))
314 if m.items.type == 'l':
318 os.chmod(destpath, m.items.mode)
320 warn(m, "Error restoring file permissions: %s" % (e,))
323 os.utime(destpath, (time.time(), m.items.mtime))
325 warn(m, "Error restoring file timestamps: %s" % (e,))
336 elif cmd == 'prune-db':
338 elif cmd == 'list-snapshots':
340 elif cmd == 'object-sums':
341 cmd_object_checksums(args)
342 elif cmd == 'read-snapshots':
343 cmd_read_snapshots(args)
344 elif cmd == 'read-metadata':
345 cmd_read_metadata(args[0])
346 elif cmd == 'list-snapshot-sizes':
347 cmd_list_snapshot_sizes()
348 elif cmd == 'verify-snapshots':
349 cmd_verify_snapshots(args)
350 elif cmd == 'restore-snapshot':
351 cmd_restore_snapshot(args)
353 print "Unknown command:", cmd