1 """High-level interface for working with LBS archives.
3 This module provides an easy interface for reading from and manipulating
4 various parts of an LBS archive:
5 - listing the snapshots and segments present
6 - reading segment contents
7 - parsing snapshot descriptors and snapshot metadata logs
8 - reading and maintaining the local object database
11 from __future__ import division
12 import os, re, sha, tarfile, tempfile, thread
13 from pysqlite2 import dbapi2 as sqlite3
15 import cumulus.store, cumulus.store.file
17 # The largest supported snapshot format that can be understood.
18 FORMAT_VERSION = (0, 8) # LBS Snapshot v0.8
20 # Maximum number of nested indirect references allowed in a snapshot.
21 MAX_RECURSION_DEPTH = 3
23 # All segments which have been accessed this session.
24 accessed_segments = set()
26 # Table of methods used to filter segments before storage, and corresponding
27 # filename extensions. These are listed in priority order (methods earlier in
28 # the list are tried first).
30 (".gpg", "cumulus-filter-gpg --decrypt"),
32 (".bz2", "bzip2 -dc"),
36 """Decode a URI-encoded (%xx escapes) string."""
37 def hex_decode(m): return chr(int(m.group(1), 16))
38 return re.sub(r"%([0-9a-f]{2})", hex_decode, s)
40 """Encode a string to URI-encoded (%xx escapes) form."""
42 if c > '+' and c < '\x7f' and c != '@':
45 return "%%%02x" % (ord(c),)
46 return ''.join(hex_encode(c) for c in s)
49 """A class which merely acts as a data container.
51 Instances of this class (or its subclasses) are merely used to store data
52 in various attributes. No methods are provided.
56 return "<%s %s>" % (self.__class__, self.__dict__)
58 CHECKSUM_ALGORITHMS = {
62 class ChecksumCreator:
63 """Compute an LBS checksum for provided data.
65 The algorithm used is selectable, but currently defaults to sha1.
68 def __init__(self, algorithm='sha1'):
69 self.algorithm = algorithm
70 self.hash = CHECKSUM_ALGORITHMS[algorithm]()
72 def update(self, data):
73 self.hash.update(data)
77 return "%s=%s" % (self.algorithm, self.hash.hexdigest())
79 class ChecksumVerifier:
80 """Verify whether a checksum from a snapshot matches the supplied data."""
82 def __init__(self, checksumstr):
83 """Create an object to check the supplied checksum."""
85 (algo, checksum) = checksumstr.split("=", 1)
86 self.checksum = checksum
87 self.hash = CHECKSUM_ALGORITHMS[algo]()
89 def update(self, data):
90 self.hash.update(data)
93 """Return a boolean indicating whether the checksum matches."""
95 result = self.hash.hexdigest()
96 return result == self.checksum
98 class LowlevelDataStore:
99 """Access to the backup store containing segments and snapshot descriptors.
101 Instances of this class are used to get direct filesystem-level access to
102 the backup data. To read a backup, a caller will ordinarily not care about
103 direct access to backup segments, but will instead merely need to access
104 objects from those segments. The ObjectStore class provides a suitable
105 wrapper around a DataStore to give this high-level access.
108 def __init__(self, path):
109 if isinstance(path, cumulus.store.Store):
111 elif path.find(":") >= 0:
112 self.store = cumulus.store.open(path)
114 self.store = cumulus.store.file.FileStore(path)
116 def _classify(self, filename):
117 for (t, r) in cumulus.store.type_patterns.items():
118 if r.match(filename):
120 return (None, filename)
125 def lowlevel_open(self, filename):
126 """Return a file-like object for reading data from the given file."""
128 (type, filename) = self._classify(filename)
129 return self.store.get(type, filename)
131 def lowlevel_stat(self, filename):
132 """Return a dictionary of information about the given file.
134 Currently, the only defined field is 'size', giving the size of the
138 (type, filename) = self._classify(filename)
139 return self.store.stat(type, filename)
141 # Slightly higher-level list methods.
142 def list_snapshots(self):
143 for f in self.store.list('snapshots'):
144 m = cumulus.store.type_patterns['snapshots'].match(f)
145 if m: yield m.group(1)
147 def list_segments(self):
148 for f in self.store.list('segments'):
149 m = cumulus.store.type_patterns['segments'].match(f)
150 if m: yield m.group(1)
153 def __init__(self, data_store):
154 self.store = data_store
159 def get_cachedir(self):
160 if self.cachedir is None:
161 self.cachedir = tempfile.mkdtemp(".lbs")
165 if self.cachedir is not None:
166 # TODO: Avoid use of system, make this safer
167 os.system("rm -rf " + self.cachedir)
171 def parse_ref(refstr):
172 m = re.match(r"^zero\[(\d+)\]$", refstr)
174 return ("zero", None, None, (0, int(m.group(1))))
176 m = re.match(r"^([-0-9a-f]+)\/([0-9a-f]+)(\(\S+\))?(\[(((\d+)\+)?(\d+)|=(\d+))\])?$", refstr)
181 checksum = m.group(3)
184 if checksum is not None:
185 checksum = checksum.lstrip("(").rstrip(")")
187 if slice is not None:
188 if m.group(9) is not None:
189 # Size-assertion slice
190 slice = (0, int(m.group(9)), True)
191 elif m.group(6) is None:
193 slice = (0, int(m.group(8)), False)
195 slice = (int(m.group(7)), int(m.group(8)), False)
197 return (segment, object, checksum, slice)
199 def get_segment(self, segment):
200 accessed_segments.add(segment)
202 for (extension, filter) in SEGMENT_FILTERS:
204 raw = self.store.lowlevel_open(segment + ".tar" + extension)
206 (input, output) = os.popen2(filter)
207 def copy_thread(src, dst):
210 block = src.read(BLOCK_SIZE)
211 if len(block) == 0: break
215 thread.start_new_thread(copy_thread, (raw, input))
220 raise cumulus.store.NotFoundError
222 def load_segment(self, segment):
223 seg = tarfile.open(segment, 'r|', self.get_segment(segment))
225 data_obj = seg.extractfile(item)
226 path = item.name.split('/')
227 if len(path) == 2 and path[0] == segment:
228 yield (path[1], data_obj.read())
230 def load_snapshot(self, snapshot):
231 file = self.store.lowlevel_open("snapshot-" + snapshot + ".lbs")
232 return file.read().splitlines(True)
234 def extract_segment(self, segment):
235 segdir = os.path.join(self.get_cachedir(), segment)
237 for (object, data) in self.load_segment(segment):
238 f = open(os.path.join(segdir, object), 'wb')
242 def load_object(self, segment, object):
243 accessed_segments.add(segment)
244 path = os.path.join(self.get_cachedir(), segment, object)
245 if not os.access(path, os.R_OK):
246 self.extract_segment(segment)
247 if segment in self.lru_list: self.lru_list.remove(segment)
248 self.lru_list.append(segment)
249 while len(self.lru_list) > self.CACHE_SIZE:
250 os.system("rm -rf " + os.path.join(self.cachedir, self.lru_list[0]))
251 self.lru_list = self.lru_list[1:]
252 return open(path, 'rb').read()
254 def get(self, refstr):
255 """Fetch the given object and return it.
257 The input should be an object reference, in string form.
260 (segment, object, checksum, slice) = self.parse_ref(refstr)
262 if segment == "zero":
263 return "\0" * slice[1]
265 data = self.load_object(segment, object)
267 if checksum is not None:
268 verifier = ChecksumVerifier(checksum)
269 verifier.update(data)
270 if not verifier.valid():
273 if slice is not None:
274 (start, length, exact) = slice
275 if exact and len(data) != length: raise ValueError
276 data = data[start:start+length]
277 if len(data) != length: raise IndexError
281 def parse(lines, terminate=None):
282 """Generic parser for RFC822-style "Key: Value" data streams.
284 This parser can be used to read metadata logs and snapshot root descriptor
287 lines must be an iterable object which yields a sequence of lines of input.
289 If terminate is specified, it is used as a predicate to determine when to
290 stop reading input lines.
297 # Strip off a trailing newline, if present
298 if len(l) > 0 and l[-1] == "\n":
301 if terminate is not None and terminate(l):
302 if len(dict) > 0: yield dict
307 m = re.match(r"^([-\w]+):\s*(.*)$", l)
309 dict[m.group(1)] = m.group(2)
310 last_key = m.group(1)
311 elif len(l) > 0 and l[0].isspace() and last_key is not None:
316 if len(dict) > 0: yield dict
318 def parse_full(lines):
320 return parse(lines).next()
321 except StopIteration:
324 def parse_metadata_version(s):
325 """Convert a string with the snapshot version format to a tuple."""
327 m = re.match(r"^LBS Snapshot v(\d+(\.\d+)*)$", s)
331 return tuple([int(d) for d in m.group(1).split(".")])
333 def read_metadata(object_store, root):
334 """Iterate through all lines in the metadata log, following references."""
336 # Stack for keeping track of recursion when following references to
337 # portions of the log. The last entry in the stack corresponds to the
338 # object currently being parsed. Each entry is a list of lines which have
339 # been reversed, so that popping successive lines from the end of each list
340 # will return lines of the metadata log in order.
343 def follow_ref(refstr):
344 if len(stack) >= MAX_RECURSION_DEPTH: raise OverflowError
345 lines = object_store.get(refstr).splitlines(True)
351 while len(stack) > 0:
358 # An indirect reference which we must follow?
359 if len(line) > 0 and line[0] == '@':
367 """Metadata for a single file (or directory or...) from a snapshot."""
369 # Functions for parsing various datatypes that can appear in a metadata log
373 """Decode an integer, expressed in decimal, octal, or hexadecimal."""
374 if s.startswith("0x"):
376 elif s.startswith("0"):
383 """Decode a URI-encoded (%xx escapes) string."""
388 """An unecoded string."""
393 """Decode a user/group to a tuple of uid/gid followed by name."""
395 uid = MetadataItem.decode_int(items[0])
398 if items[1].startswith("(") and items[1].endswith(")"):
399 name = MetadataItem.decode_str(items[1][1:-1])
403 def decode_device(s):
404 """Decode a device major/minor number."""
405 (major, minor) = map(MetadataItem.decode_int, s.split("/"))
406 return (major, minor)
410 def __init__(self, fields, object_store):
411 """Initialize from a dictionary of key/value pairs from metadata log."""
414 self.object_store = object_store
416 self.items = self.Items()
417 for (k, v) in fields.items():
418 if k in self.field_types:
419 decoder = self.field_types[k]
420 setattr(self.items, k, decoder(v))
424 """Return an iterator for the data blocks that make up a file."""
426 # This traverses the list of blocks that make up a file, following
427 # indirect references. It is implemented in much the same way as
428 # read_metadata, so see that function for details of the technique.
430 objects = self.fields['data'].split()
434 def follow_ref(refstr):
435 if len(stack) >= MAX_RECURSION_DEPTH: raise OverflowError
436 objects = self.object_store.get(refstr).split()
438 stack.append(objects)
440 while len(stack) > 0:
447 # An indirect reference which we must follow?
448 if len(ref) > 0 and ref[0] == '@':
453 # Description of fields that might appear, and how they should be parsed.
454 MetadataItem.field_types = {
455 'name': MetadataItem.decode_str,
456 'type': MetadataItem.raw_str,
457 'mode': MetadataItem.decode_int,
458 'device': MetadataItem.decode_device,
459 'user': MetadataItem.decode_user,
460 'group': MetadataItem.decode_user,
461 'ctime': MetadataItem.decode_int,
462 'mtime': MetadataItem.decode_int,
463 'links': MetadataItem.decode_int,
464 'inode': MetadataItem.raw_str,
465 'checksum': MetadataItem.decode_str,
466 'size': MetadataItem.decode_int,
467 'contents': MetadataItem.decode_str,
468 'target': MetadataItem.decode_str,
471 def iterate_metadata(object_store, root):
472 for d in parse(read_metadata(object_store, root), lambda l: len(l) == 0):
473 yield MetadataItem(d, object_store)
476 """Access to the local database of snapshot contents and object checksums.
478 The local database is consulted when creating a snapshot to determine what
479 data can be re-used from old snapshots. Segment cleaning is performed by
480 manipulating the data in the local database; the local database also
481 includes enough data to guide the segment cleaning process.
484 def __init__(self, path, dbname="localdb.sqlite"):
485 self.db_connection = sqlite3.connect(path + "/" + dbname)
487 # Low-level database access. Use these methods when there isn't a
488 # higher-level interface available. Exception: do, however, remember to
489 # use the commit() method after making changes to make sure they are
490 # actually saved, even when going through higher-level interfaces.
492 "Commit any pending changes to the local database."
493 self.db_connection.commit()
496 "Roll back any pending changes to the local database."
497 self.db_connection.rollback()
500 "Return a DB-API cursor for directly accessing the local database."
501 return self.db_connection.cursor()
503 def list_schemes(self):
504 """Return the list of snapshots found in the local database.
506 The returned value is a list of tuples (id, scheme, name, time, intent).
510 cur.execute("select distinct scheme from snapshots")
511 schemes = [row[0] for row in cur.fetchall()]
515 def garbage_collect(self, scheme, intent=1.0):
516 """Delete entries from old snapshots from the database.
518 Only snapshots with the specified scheme name will be deleted. If
519 intent is given, it gives the intended next snapshot type, to determine
520 how aggressively to clean (for example, intent=7 could be used if the
521 next snapshot will be a weekly snapshot).
526 # Find the id of the last snapshot to be created. This is used for
527 # measuring time in a way: we record this value in each segment we
528 # expire on this run, and then on a future run can tell if there have
529 # been intervening backups made.
530 cur.execute("select max(snapshotid) from snapshots")
531 last_snapshotid = cur.fetchone()[0]
533 # Get the list of old snapshots for this scheme. Delete all the old
534 # ones. Rules for what to keep:
535 # - Always keep the most recent snapshot.
536 # - If snapshot X is younger than Y, and X has higher intent, then Y
538 cur.execute("""select snapshotid, name, intent,
539 julianday('now') - timestamp as age
540 from snapshots where scheme = ?
541 order by age""", (scheme,))
545 for (id, name, snap_intent, snap_age) in cur.fetchall():
547 if snap_intent < max_intent:
548 # Delete small-intent snapshots if there is a more recent
549 # large-intent snapshot.
551 elif snap_intent == intent:
552 # Delete previous snapshots with the specified intent level.
555 if can_delete and not first:
556 print "Delete snapshot %d (%s)" % (id, name)
557 cur.execute("delete from snapshots where snapshotid = ?",
560 max_intent = max(max_intent, snap_intent)
562 # Delete entries in the segments_used table which are for non-existent
564 cur.execute("""delete from segments_used
565 where snapshotid not in
566 (select snapshotid from snapshots)""")
568 # Find segments which contain no objects used by any current snapshots,
569 # and delete them from the segment table.
570 cur.execute("""delete from segments where segmentid not in
571 (select segmentid from segments_used)""")
573 # Delete unused objects in the block_index table. By "unused", we mean
574 # any object which was stored in a segment which has been deleted, and
575 # any object in a segment which was marked for cleaning and has had
576 # cleaning performed already (the expired time is less than the current
577 # largest snapshot id).
578 cur.execute("""delete from block_index
579 where segmentid not in (select segmentid from segments)
580 or segmentid in (select segmentid from segments
581 where expire_time < ?)""",
584 # Remove sub-block signatures for deleted objects.
585 cur.execute("""delete from subblock_signatures
587 (select blockid from block_index)""")
590 class SegmentInfo(Struct): pass
592 def get_segment_cleaning_list(self, age_boost=0.0):
593 """Return a list of all current segments with information for cleaning.
595 Return all segments which are currently known in the local database
596 (there might be other, older segments in the archive itself), and
597 return usage statistics for each to help decide which segments to
600 The returned list will be sorted by estimated cleaning benefit, with
601 segments that are best to clean at the start of the list.
603 If specified, the age_boost parameter (measured in days) will added to
604 the age of each segment, as a way of adjusting the benefit computation
605 before a long-lived snapshot is taken (for example, age_boost might be
606 set to 7 when cleaning prior to taking a weekly snapshot).
611 cur.execute("""select segmentid, used, size, mtime,
612 julianday('now') - mtime as age from segment_info
613 where expire_time is null""")
615 info = self.SegmentInfo()
617 info.used_bytes = row[1]
618 info.size_bytes = row[2]
620 info.age_days = row[4]
622 # If data is not available for whatever reason, treat it as 0.0.
623 if info.age_days is None:
625 if info.used_bytes is None:
626 info.used_bytes = 0.0
628 # Benefit calculation: u is the estimated fraction of each segment
629 # which is utilized (bytes belonging to objects still in use
630 # divided by total size; this doesn't take compression or storage
631 # overhead into account, but should give a reasonable estimate).
633 # The total benefit is a heuristic that combines several factors:
634 # the amount of space that can be reclaimed (1 - u), an ageing
635 # factor (info.age_days) that favors cleaning old segments to young
636 # ones and also is more likely to clean segments that will be
637 # rewritten for long-lived snapshots (age_boost), and finally a
638 # penalty factor for the cost of re-uploading data (u + 0.1).
639 u = info.used_bytes / info.size_bytes
640 info.cleaning_benefit \
641 = (1 - u) * (info.age_days + age_boost) / (u + 0.1)
643 segments.append(info)
645 segments.sort(cmp, key=lambda s: s.cleaning_benefit, reverse=True)
648 def mark_segment_expired(self, segment):
649 """Mark a segment for cleaning in the local database.
651 The segment parameter should be either a SegmentInfo object or an
652 integer segment id. Objects in the given segment will be marked as
653 expired, which means that any future snapshots that would re-use those
654 objects will instead write out a new copy of the object, and thus no
655 future snapshots will depend upon the given segment.
658 if isinstance(segment, int):
660 elif isinstance(segment, self.SegmentInfo):
663 raise TypeError("Invalid segment: %s, must be of type int or SegmentInfo, not %s" % (segment, type(segment)))
666 cur.execute("select max(snapshotid) from snapshots")
667 last_snapshotid = cur.fetchone()[0]
668 cur.execute("update segments set expire_time = ? where segmentid = ?",
669 (last_snapshotid, id))
670 cur.execute("update block_index set expired = 0 where segmentid = ?",
673 def balance_expired_objects(self):
674 """Analyze expired objects in segments to be cleaned and group by age.
676 Update the block_index table of the local database to group expired
677 objects by age. The exact number of buckets and the cutoffs for each
678 are dynamically determined. Calling this function after marking
679 segments expired will help in the segment cleaning process, by ensuring
680 that when active objects from clean segments are rewritten, they will
681 be placed into new segments roughly grouped by age.
684 # The expired column of the block_index table is used when generating a
685 # new LBS snapshot. A null value indicates that an object may be
686 # re-used. Otherwise, an object must be written into a new segment if
687 # needed. Objects with distinct expired values will be written into
688 # distinct segments, to allow for some grouping by age. The value 0 is
689 # somewhat special in that it indicates any rewritten objects can be
690 # placed in the same segment as completely new objects; this can be
691 # used for very young objects which have been expired, or objects not
692 # expected to be encountered.
694 # In the balancing process, all objects which are not used in any
695 # current snapshots will have expired set to 0. Objects which have
696 # been seen will be sorted by age and will have expired values set to
697 # 0, 1, 2, and so on based on age (with younger objects being assigned
698 # lower values). The number of buckets and the age cutoffs is
699 # determined by looking at the distribution of block ages.
703 # Mark all expired objects with expired = 0; these objects will later
704 # have values set to indicate groupings of objects when repacking.
705 cur.execute("""update block_index set expired = 0
706 where expired is not null""")
708 # We will want to aim for at least one full segment for each bucket
709 # that we eventually create, but don't know how many bytes that should
710 # be due to compression. So compute the average number of bytes in
711 # each expired segment as a rough estimate for the minimum size of each
712 # bucket. (This estimate could be thrown off by many not-fully-packed
713 # segments, but for now don't worry too much about that.) If we can't
714 # compute an average, it's probably because there are no expired
715 # segments, so we have no more work to do.
716 cur.execute("""select avg(size) from segments
718 (select distinct segmentid from block_index
719 where expired is not null)""")
720 segment_size_estimate = cur.fetchone()[0]
721 if not segment_size_estimate:
724 # Next, extract distribution of expired objects (number and size) by
725 # age. Save the timestamp for "now" so that the classification of
726 # blocks into age buckets will not change later in the function, after
727 # time has passed. Set any timestamps in the future to now, so we are
728 # guaranteed that for the rest of this function, age is always
730 cur.execute("select julianday('now')")
731 now = cur.fetchone()[0]
733 cur.execute("""update block_index set timestamp = ?
734 where timestamp > ? and expired is not null""",
737 cur.execute("""select round(? - timestamp) as age, count(*), sum(size)
738 from block_index where expired = 0
739 group by age order by age""", (now,))
740 distribution = cur.fetchall()
742 # Start to determine the buckets for expired objects. Heuristics used:
743 # - An upper bound on the number of buckets is given by the number of
744 # segments we estimate it will take to store all data. In fact,
745 # aim for a couple of segments per bucket.
746 # - Place very young objects in bucket 0 (place with new objects)
747 # unless there are enough of them to warrant a separate bucket.
748 # - Try not to create unnecessarily many buckets, since fewer buckets
749 # will allow repacked data to be grouped based on spatial locality
750 # (while more buckets will group by temporal locality). We want a
753 total_bytes = sum([i[2] for i in distribution])
754 target_buckets = 2 * (total_bytes / segment_size_estimate) ** 0.4
755 min_size = 1.5 * segment_size_estimate
756 target_size = max(2 * segment_size_estimate,
757 total_bytes / target_buckets)
759 print "segment_size:", segment_size_estimate
760 print "distribution:", distribution
761 print "total_bytes:", total_bytes
762 print "target_buckets:", target_buckets
763 print "min, target size:", min_size, target_size
765 # Chosen cutoffs. Each bucket consists of objects with age greater
766 # than one cutoff value, but not greater than the next largest cutoff.
769 # Starting with the oldest objects, begin grouping together into
770 # buckets of size at least target_size bytes.
771 distribution.reverse()
773 min_age_bucket = False
774 for (age, items, size) in distribution:
775 if bucket_size >= target_size \
776 or (age < MIN_AGE and not min_age_bucket):
777 if bucket_size < target_size and len(cutoffs) > 0:
784 min_age_bucket = True
786 # The last (youngest) bucket will be group 0, unless it has enough data
787 # to be of size min_size by itself, or there happen to be no objects
788 # less than MIN_AGE at all.
789 if bucket_size >= min_size or not min_age_bucket:
793 print "cutoffs:", cutoffs
795 # Update the database to assign each object to the appropriate bucket.
797 for i in range(len(cutoffs)):
798 cur.execute("""update block_index set expired = ?
799 where round(? - timestamp) > ?
800 and expired is not null""",
801 (i, now, cutoffs[i]))