- reading and maintaining the local object database
"""
-from __future__ import division
+from __future__ import division, print_function, unicode_literals
+
import hashlib
import itertools
import os
import re
import sqlite3
+import sys
import tarfile
import tempfile
-import thread
+try:
+ import _thread
+except ImportError:
+ import thread as _thread
import cumulus.store
import cumulus.store.file
+if sys.version < '3':
+ StringTypes = (str, unicode)
+else:
+ StringTypes = (str,)
+
# The largest supported snapshot format that can be understood.
FORMAT_VERSION = (0, 11) # Cumulus Snapshot v0.11
except cumulus.store.NotFoundError:
pass
if not success:
- raise cumulus.store.NotFoundError(basename)
+ raise cumulus.store.NotFoundError(backend)
def _build_segments_searchpath(prefix):
for (extension, filter) in SEGMENT_FILTERS:
[SearchPathEntry("meta", ".sha1sums"),
SearchPathEntry("checksums", ".sha1sums"),
SearchPathEntry("", ".sha1sums")]),
+ "meta": SearchPath(
+ r"^snapshot-(.*)\.meta(\.\S+)?$",
+ _build_segments_searchpath("meta")),
"segments": SearchPath(
(r"^([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})"
r"\.tar(\.\S+)?$"),
store may either be a Store object or URL.
"""
- if type(backend) in (str, unicode):
+ if type(backend) in StringTypes:
if backend.find(":") >= 0:
self._backend = cumulus.store.open(backend)
else:
return ((x[1].group(1), x[0])
for x in SEARCH_PATHS[filetype].list(self._backend))
+ def prefetch_generic(self):
+ """Calls scan on directories to prefetch file metadata."""
+ directories = set()
+ for typeinfo in SEARCH_PATHS.values():
+ directories.update(typeinfo.directories())
+ for d in directories:
+ print("Prefetch", d)
+ self._backend.scan(d)
+
class CumulusStore:
def __init__(self, backend):
if isinstance(backend, BackendWrapper):
dst.write(block)
src.close()
dst.close()
- thread.start_new_thread(copy_thread, (filehandle, input))
+ _thread.start_new_thread(copy_thread, (filehandle, input))
return output
def get_segment(self, segment):
return data
+ def prefetch(self):
+ self.backend.prefetch_generic()
+
def parse(lines, terminate=None):
"""Generic parser for RFC822-style "Key: Value" data streams.
def parse_full(lines):
try:
- return parse(lines).next()
+ return next(parse(lines))
except StopIteration:
return {}
can_delete = True
if can_delete and not first:
- print "Delete snapshot %d (%s)" % (id, name)
+ print("Delete snapshot %d (%s)" % (id, name))
cur.execute("delete from snapshots where snapshotid = ?",
(id,))
first = False
target_size = max(2 * segment_size_estimate,
total_bytes / target_buckets)
- print "segment_size:", segment_size_estimate
- print "distribution:", distribution
- print "total_bytes:", total_bytes
- print "target_buckets:", target_buckets
- print "min, target size:", min_size, target_size
+ print("segment_size:", segment_size_estimate)
+ print("distribution:", distribution)
+ print("total_bytes:", total_bytes)
+ print("target_buckets:", target_buckets)
+ print("min, target size:", min_size, target_size)
# Chosen cutoffs. Each bucket consists of objects with age greater
# than one cutoff value, but not greater than the next largest cutoff.
cutoffs.append(-1)
cutoffs.append(-1)
- print "cutoffs:", cutoffs
+ print("cutoffs:", cutoffs)
# Update the database to assign each object to the appropriate bucket.
cutoffs.reverse()