- reading and maintaining the local object database
"""
-from __future__ import division
+
import hashlib
import itertools
import os
import sqlite3
import tarfile
import tempfile
-import thread
+import _thread
import cumulus.store
import cumulus.store.file
except cumulus.store.NotFoundError:
pass
if not success:
- raise cumulus.store.NotFoundError(basename)
+ raise cumulus.store.NotFoundError(backend)
def _build_segments_searchpath(prefix):
for (extension, filter) in SEGMENT_FILTERS:
[SearchPathEntry("meta", ".sha1sums"),
SearchPathEntry("checksums", ".sha1sums"),
SearchPathEntry("", ".sha1sums")]),
+ "meta": SearchPath(
+ r"^snapshot-(.*)\.meta(\.\S+)?$",
+ _build_segments_searchpath("meta")),
"segments": SearchPath(
(r"^([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})"
r"\.tar(\.\S+)?$"),
store may either be a Store object or URL.
"""
- if type(backend) in (str, unicode):
+ if type(backend) in (str, str):
if backend.find(":") >= 0:
self._backend = cumulus.store.open(backend)
else:
return ((x[1].group(1), x[0])
for x in SEARCH_PATHS[filetype].list(self._backend))
+ def prefetch_generic(self):
+ """Calls scan on directories to prefetch file metadata."""
+ directories = set()
+ for typeinfo in list(SEARCH_PATHS.values()):
+ directories.update(typeinfo.directories())
+ for d in directories:
+ print("Prefetch", d)
+ self._backend.scan(d)
+
class CumulusStore:
def __init__(self, backend):
if isinstance(backend, BackendWrapper):
dst.write(block)
src.close()
dst.close()
- thread.start_new_thread(copy_thread, (filehandle, input))
+ _thread.start_new_thread(copy_thread, (filehandle, input))
return output
def get_segment(self, segment):
return data
+ def prefetch(self):
+ self.backend.prefetch_generic()
+
def parse(lines, terminate=None):
"""Generic parser for RFC822-style "Key: Value" data streams.
def parse_full(lines):
try:
- return parse(lines).next()
+ return next(parse(lines))
except StopIteration:
return {}
@staticmethod
def decode_device(s):
"""Decode a device major/minor number."""
- (major, minor) = map(MetadataItem.decode_int, s.split("/"))
+ (major, minor) = list(map(MetadataItem.decode_int, s.split("/")))
return (major, minor)
class Items: pass
self.object_store = object_store
self.keys = []
self.items = self.Items()
- for (k, v) in fields.items():
+ for (k, v) in list(fields.items()):
if k in self.field_types:
decoder = self.field_types[k]
setattr(self.items, k, decoder(v))
can_delete = True
if can_delete and not first:
- print "Delete snapshot %d (%s)" % (id, name)
+ print("Delete snapshot %d (%s)" % (id, name))
cur.execute("delete from snapshots where snapshotid = ?",
(id,))
first = False
target_size = max(2 * segment_size_estimate,
total_bytes / target_buckets)
- print "segment_size:", segment_size_estimate
- print "distribution:", distribution
- print "total_bytes:", total_bytes
- print "target_buckets:", target_buckets
- print "min, target size:", min_size, target_size
+ print("segment_size:", segment_size_estimate)
+ print("distribution:", distribution)
+ print("total_bytes:", total_bytes)
+ print("target_buckets:", target_buckets)
+ print("min, target size:", min_size, target_size)
# Chosen cutoffs. Each bucket consists of objects with age greater
# than one cutoff value, but not greater than the next largest cutoff.
cutoffs.append(-1)
cutoffs.append(-1)
- print "cutoffs:", cutoffs
+ print("cutoffs:", cutoffs)
# Update the database to assign each object to the appropriate bucket.
cutoffs.reverse()