X-Git-Url: http://git.vrable.net/?p=cumulus.git;a=blobdiff_plain;f=main.cc;h=e10a04aaae0e4cc4cc5439c343ca4a8931e1cb7d;hp=6b9f457eae60f3a29f268071981e134f683c7545;hb=3d780590edec4583eb3ef0ca16120afd0f7451f9;hpb=36e15463221ffb8f2e1dc9903705d7be81d1d1f8 diff --git a/main.cc b/main.cc index 6b9f457..e10a04a 100644 --- a/main.cc +++ b/main.cc @@ -1,8 +1,6 @@ -/* Cumulus: Smart Filesystem Backup to Dumb Servers - * - * Copyright (C) 2006-2009 The Regents of the University of California - * Copyright (C) 2012 Google Inc. - * Written by Michael Vrable +/* Cumulus: Efficient Filesystem Backup to the Cloud + * Copyright (C) 2006-2009, 2012 The Cumulus Developers + * See the AUTHORS file for a list of contributors. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -49,13 +47,14 @@ #include #include "exclude.h" +#include "hash.h" #include "localdb.h" #include "metadata.h" #include "remote.h" #include "store.h" -#include "sha1.h" #include "subfile.h" #include "util.h" +#include "third_party/sha1.h" using std::list; using std::map; @@ -83,9 +82,6 @@ static char *block_buf; * invocations to help in creating incremental snapshots. */ LocalDb *db; -/* Keep track of all segments which are needed to reconstruct the snapshot. */ -std::set segment_list; - /* Snapshot intent: 1=daily, 7=weekly, etc. This is not used directly, but is * stored in the local database and can help guide segment cleaning and * snapshot expiration policies. */ @@ -99,13 +95,6 @@ bool flag_rebuild_statcache = false; /* Whether verbose output is enabled. */ bool verbose = false; -/* Ensure that the given segment is listed as a dependency of the current - * snapshot. */ -void add_segment(const string& segment) -{ - segment_list.insert(segment); -} - /* Attempts to open a regular file read-only, but with safety checks for files * that might not be fully trusted. */ int safe_open(const string& path, struct stat *stat_buf) @@ -233,8 +222,6 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, i != old_blocks.end(); ++i) { const ObjectReference &ref = *i; object_list.push_back(ref.to_string()); - if (ref.is_normal()) - add_segment(ref.get_segment()); db->UseObject(ref); } size = stat_buf.st_size; @@ -244,7 +231,7 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, /* If the file is new or changed, we must read in the contents a block at a * time. */ if (!cached) { - SHA1Checksum hash; + Hash *hash = Hash::New(); Subfile subfile(db); subfile.load_old_blocks(old_blocks); @@ -258,7 +245,7 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, break; } - hash.process(block_buf, bytes); + hash->update(block_buf, bytes); // Sparse file processing: if we read a block of all zeroes, encode // that explicitly. @@ -275,9 +262,10 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, double block_age = 0.0; ObjectReference ref; - SHA1Checksum block_hash; - block_hash.process(block_buf, bytes); - string block_csum = block_hash.checksum_str(); + Hash *hash = Hash::New(); + hash->update(block_buf, bytes); + string block_csum = hash->digest_str(); + delete hash; if (all_zero) { ref = ObjectReference(ObjectReference::REF_ZERO); @@ -333,8 +321,6 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, while (!refs.empty()) { ref = refs.front(); refs.pop_front(); object_list.push_back(ref.to_string()); - if (ref.is_normal()) - add_segment(ref.get_segment()); db->UseObject(ref); } size += bytes; @@ -343,7 +329,8 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, status = "old"; } - file_info["checksum"] = hash.checksum_str(); + file_info["checksum"] = hash->digest_str(); + delete hash; } // Sanity check: if we are rebuilding the statcache, but the file looks @@ -541,6 +528,7 @@ void try_merge_filter(const string& path, const string& basedir) * one block (1 MB) worth of data. If the file doesn't seems like it might * be larger than that, don't parse the rules in it. */ ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE); + close(fd); if (bytes < 0 || bytes >= static_cast(LBS_BLOCK_SIZE - 1)) { /* TODO: Add more strict resource limits on merge files? */ fprintf(stderr, @@ -585,7 +573,8 @@ void scanfile(const string& path) DIR *dir = opendir(path.c_str()); if (dir == NULL) { - fprintf(stderr, "Error: %m\n"); + fprintf(stderr, "Error reading directory %s: %m\n", + path.c_str()); return; } @@ -681,6 +670,8 @@ void usage(const char *program) int main(int argc, char *argv[]) { + hash_init(); + string backup_dest = "", backup_script = ""; string localdb_dir = ""; string backup_scheme = ""; @@ -855,7 +846,6 @@ int main(int argc, char *argv[]) } ObjectReference root_ref = metawriter->close(); - add_segment(root_ref.get_segment()); string backup_root = root_ref.to_string(); delete metawriter; @@ -876,6 +866,7 @@ int main(int argc, char *argv[]) "checksums"); FILE *checksums = fdopen(checksum_file->get_fd(), "w"); + std::set segment_list = db->GetUsedSegments(); for (std::set::iterator i = segment_list.begin(); i != segment_list.end(); ++i) { string seg_path, seg_csum;