3 # Utility for managing Cumulus archives.
5 import getpass, os, stat, sys, time
6 from optparse import OptionParser
12 # We support up to "LBS Snapshot v0.8" formats, but are also limited by the lbs
14 FORMAT_VERSION = min(lbs.FORMAT_VERSION, (0, 8))
16 def check_version(format):
17 ver = lbs.parse_metadata_version(format)
18 if ver > FORMAT_VERSION:
19 raise RuntimeError("Unsupported LBS format: " + format)
21 parser = OptionParser(usage="%prog [option]... command [arg]...")
22 parser.add_option("-v", action="store_true", dest="verbose", default=False,
23 help="increase verbosity")
24 parser.add_option("--store", dest="store",
25 help="specify path to backup data store")
26 parser.add_option("--localdb", dest="localdb",
27 help="specify path to local database")
28 parser.add_option("--intent", dest="intent", default=1.0,
29 help="give expected next snapshot type when cleaning")
30 (options, args) = parser.parse_args(sys.argv[1:])
32 # Read a passphrase from the user and store it in the LBS_GPG_PASSPHRASE
33 # environment variable.
35 ENV_KEY = 'LBS_GPG_PASSPHRASE'
36 if not os.environ.has_key(ENV_KEY):
37 os.environ[ENV_KEY] = getpass.getpass()
39 # Delete old snapshots from the local database, though do not actually schedule
40 # any segment cleaning.
41 # Syntax: $0 --localdb=LOCALDB prune-db
43 db = lbs.LocalDatabase(options.localdb)
45 # Delete old snapshots from the local database.
49 # Run the segment cleaner.
50 # Syntax: $0 --localdb=LOCALDB clean
51 def cmd_clean(clean_threshold=7.0):
52 db = lbs.LocalDatabase(options.localdb)
54 # Delete old snapshots from the local database.
55 intent = float(options.intent)
56 for s in db.list_schemes():
57 db.garbage_collect(s, intent)
59 # Expire segments which are poorly-utilized.
60 for s in db.get_segment_cleaning_list():
61 if s.cleaning_benefit > clean_threshold:
62 print "Cleaning segment %d (benefit %.2f)" % (s.id,
64 db.mark_segment_expired(s)
67 db.balance_expired_objects()
70 # List snapshots stored.
71 # Syntax: $0 --data=DATADIR list-snapshots
72 def cmd_list_snapshots():
73 store = lbs.LowlevelDataStore(options.store)
74 for s in sorted(store.list_snapshots()):
77 # List size of data needed for each snapshot.
78 # Syntax: $0 --data=DATADIR list-snapshot-sizes
79 def cmd_list_snapshot_sizes():
80 lowlevel = lbs.LowlevelDataStore(options.store)
81 store = lbs.ObjectStore(lowlevel)
83 for s in sorted(lowlevel.list_snapshots()):
84 d = lbs.parse_full(store.load_snapshot(s))
85 check_version(d['Format'])
88 intent = float(d['Backup-Intent'])
92 segments = d['Segments'].split()
93 (size, added, removed) = (0, 0, 0)
95 segsize = lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
97 if seg not in previous: added += segsize
99 if seg not in segments:
100 removed += lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
101 previous = set(segments)
102 print "%s [%s]: %.3f +%.3f -%.3f" % (s, intent, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2)
104 # Build checksum list for objects in the given segments, or all segments if
105 # none are specified.
106 def cmd_object_checksums(segments):
108 lowlevel = lbs.LowlevelDataStore(options.store)
109 store = lbs.ObjectStore(lowlevel)
110 if len(segments) == 0:
111 segments = sorted(lowlevel.list_segments())
113 for (o, data) in store.load_segment(s):
114 csum = lbs.ChecksumCreator().update(data).compute()
115 print "%s/%s:%d:%s" % (s, o, len(data), csum)
118 # Read a snapshot file
119 def cmd_read_snapshots(snapshots):
121 lowlevel = lbs.LowlevelDataStore(options.store)
122 store = lbs.ObjectStore(lowlevel)
124 d = lbs.parse_full(store.load_snapshot(s))
125 check_version(d['Format'])
127 print d['Segments'].split()
130 # Produce a flattened metadata dump from a snapshot
131 def cmd_read_metadata(snapshot):
133 lowlevel = lbs.LowlevelDataStore(options.store)
134 store = lbs.ObjectStore(lowlevel)
135 d = lbs.parse_full(store.load_snapshot(snapshot))
136 check_version(d['Format'])
137 metadata = lbs.read_metadata(store, d['Root'])
148 # Verify snapshot integrity
149 def cmd_verify_snapshots(snapshots):
151 lowlevel = lbs.LowlevelDataStore(options.store)
152 store = lbs.ObjectStore(lowlevel)
154 lbs.accessed_segments.clear()
155 print "#### Snapshot", s
156 d = lbs.parse_full(store.load_snapshot(s))
157 check_version(d['Format'])
158 print "## Root:", d['Root']
159 metadata = lbs.iterate_metadata(store, d['Root'])
161 if m.fields['type'] not in ('-', 'f'): continue
162 print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
163 verifier = lbs.ChecksumVerifier(m.fields['checksum'])
165 for block in m.data():
166 data = store.get(block)
167 verifier.update(data)
169 if int(m.fields['size']) != size:
170 raise ValueError("File size does not match!")
171 if not verifier.valid():
172 raise ValueError("Bad checksum found")
174 # Verify that the list of segments included with the snapshot was
175 # actually accurate: covered all segments that were really read, and
176 # doesn't contain duplicates.
177 listed_segments = set(d['Segments'].split())
178 if lbs.accessed_segments - listed_segments:
179 print "Error: Some segments not listed in descriptor!"
180 print sorted(list(lbs.accessed_segments - listed_segments))
181 if listed_segments - lbs.accessed_segments :
182 print "Warning: Extra unused segments listed in descriptor!"
183 print sorted(list(listed_segments - lbs.accessed_segments))
186 # Restore a snapshot, or some subset of files from it
187 def cmd_restore_snapshot(args):
189 lowlevel = lbs.LowlevelDataStore(options.store)
190 store = lbs.ObjectStore(lowlevel)
191 snapshot = lbs.parse_full(store.load_snapshot(args[0]))
192 check_version(snapshot['Format'])
197 "Return true if the specified path should be included in the restore."
199 # No specification of what to restore => restore everything
200 if len(paths) == 0: return True
203 if path == p: return True
204 if path.startswith(p + "/"): return True
208 print "Warning: %s: %s" % (m.items.name, msg)
210 # Phase 1: Read the complete metadata log and create directory structure.
213 metadata_segments = {}
214 for m in lbs.iterate_metadata(store, snapshot['Root']):
215 pathname = os.path.normpath(m.items.name)
216 while os.path.isabs(pathname):
217 pathname = pathname[1:]
218 if not matchpath(pathname): continue
220 destpath = os.path.join(destdir, pathname)
221 if m.items.type == 'd':
224 (path, filename) = os.path.split(destpath)
226 metadata_items.append((pathname, m))
227 if m.items.type in ('-', 'f'):
228 metadata_paths[pathname] = m
229 for block in m.data():
230 (segment, object, checksum, slice) \
231 = lbs.ObjectStore.parse_ref(block)
232 if segment not in metadata_segments:
233 metadata_segments[segment] = set()
234 metadata_segments[segment].add(pathname)
237 if not os.path.isdir(path):
241 warn(m, "Error creating directory structure: %s" % (e,))
244 # Phase 2: Restore files, ordered by how data is stored in segments.
245 def restore_file(pathname, m):
246 assert m.items.type in ('-', 'f')
247 print "extract:", pathname
248 destpath = os.path.join(destdir, pathname)
250 file = open(destpath, 'wb')
251 verifier = lbs.ChecksumVerifier(m.items.checksum)
253 for block in m.data():
254 data = store.get(block)
255 verifier.update(data)
259 if int(m.fields['size']) != size:
260 raise ValueError("File size does not match!")
261 if not verifier.valid():
262 raise ValueError("Bad checksum found")
264 while metadata_segments:
265 (segment, items) = metadata_segments.popitem()
266 print "+ Segment", segment
267 for pathname in sorted(items):
268 if pathname in metadata_paths:
269 restore_file(pathname, metadata_paths[pathname])
270 del metadata_paths[pathname]
272 print "+ Remaining files"
273 while metadata_paths:
274 (pathname, m) = metadata_paths.popitem()
275 restore_file(pathname, m)
277 # Phase 3: Restore special files (symlinks, devices).
278 # Phase 4: Restore directory permissions and modification times.
279 for (pathname, m) in reversed(metadata_items):
280 print "permissions:", pathname
281 destpath = os.path.join(destdir, pathname)
282 (path, filename) = os.path.split(destpath)
284 # TODO: Check for ../../../paths that might attempt to write outside
285 # the destination directory. Maybe also check attempts to follow
286 # symlinks pointing outside?
289 if m.items.type in ('-', 'f', 'd'):
291 elif m.items.type == 'l':
293 target = m.items.target
295 # Old (v0.2 format) name for 'target'
296 target = m.items.contents
297 os.symlink(target, destpath)
298 elif m.items.type == 'p':
300 elif m.items.type in ('c', 'b'):
301 if m.items.type == 'c':
302 mode = 0600 | stat.S_IFCHR
304 mode = 0600 | stat.S_IFBLK
305 os.mknod(destpath, mode, os.makedev(*m.items.device))
306 elif m.items.type == 's':
307 pass # TODO: Implement
309 warn(m, "Unknown type code: " + m.items.type)
313 warn(m, "Error restoring: %s" % (e,))
317 uid = m.items.user[0]
318 gid = m.items.group[0]
319 os.lchown(destpath, uid, gid)
321 warn(m, "Error restoring file ownership: %s" % (e,))
323 if m.items.type == 'l':
327 os.chmod(destpath, m.items.mode)
329 warn(m, "Error restoring file permissions: %s" % (e,))
332 os.utime(destpath, (time.time(), m.items.mtime))
334 warn(m, "Error restoring file timestamps: %s" % (e,))
345 elif cmd == 'prune-db':
347 elif cmd == 'list-snapshots':
349 elif cmd == 'object-sums':
350 cmd_object_checksums(args)
351 elif cmd == 'read-snapshots':
352 cmd_read_snapshots(args)
353 elif cmd == 'read-metadata':
354 cmd_read_metadata(args[0])
355 elif cmd == 'list-snapshot-sizes':
356 cmd_list_snapshot_sizes()
357 elif cmd == 'verify-snapshots':
358 cmd_verify_snapshots(args)
359 elif cmd == 'restore-snapshot':
360 cmd_restore_snapshot(args)
362 print "Unknown command:", cmd