-/* Cumulus: Smart Filesystem Backup to Dumb Servers
- *
- * Copyright (C) 2006-2009 The Regents of the University of California
- * Copyright (C) 2012 Google Inc.
- * Written by Michael Vrable <mvrable@cs.ucsd.edu>
+/* Cumulus: Efficient Filesystem Backup to the Cloud
+ * Copyright (C) 2006-2009, 2012 The Cumulus Developers
+ * See the AUTHORS file for a list of contributors.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <vector>
#include "exclude.h"
+#include "hash.h"
#include "localdb.h"
#include "metadata.h"
#include "remote.h"
#include "store.h"
-#include "sha1.h"
#include "subfile.h"
#include "util.h"
+#include "third_party/sha1.h"
using std::list;
using std::map;
* invocations to help in creating incremental snapshots. */
LocalDb *db;
-/* Keep track of all segments which are needed to reconstruct the snapshot. */
-std::set<string> segment_list;
-
/* Snapshot intent: 1=daily, 7=weekly, etc. This is not used directly, but is
* stored in the local database and can help guide segment cleaning and
* snapshot expiration policies. */
/* Whether verbose output is enabled. */
bool verbose = false;
-/* Ensure that the given segment is listed as a dependency of the current
- * snapshot. */
-void add_segment(const string& segment)
-{
- segment_list.insert(segment);
-}
-
/* Attempts to open a regular file read-only, but with safety checks for files
* that might not be fully trusted. */
int safe_open(const string& path, struct stat *stat_buf)
i != old_blocks.end(); ++i) {
const ObjectReference &ref = *i;
object_list.push_back(ref.to_string());
- if (ref.is_normal())
- add_segment(ref.get_segment());
db->UseObject(ref);
}
size = stat_buf.st_size;
/* If the file is new or changed, we must read in the contents a block at a
* time. */
if (!cached) {
- SHA1Checksum hash;
+ Hash *hash = Hash::New();
Subfile subfile(db);
subfile.load_old_blocks(old_blocks);
break;
}
- hash.process(block_buf, bytes);
+ hash->update(block_buf, bytes);
// Sparse file processing: if we read a block of all zeroes, encode
// that explicitly.
double block_age = 0.0;
ObjectReference ref;
- SHA1Checksum block_hash;
- block_hash.process(block_buf, bytes);
- string block_csum = block_hash.checksum_str();
+ Hash *hash = Hash::New();
+ hash->update(block_buf, bytes);
+ string block_csum = hash->digest_str();
+ delete hash;
if (all_zero) {
ref = ObjectReference(ObjectReference::REF_ZERO);
while (!refs.empty()) {
ref = refs.front(); refs.pop_front();
object_list.push_back(ref.to_string());
- if (ref.is_normal())
- add_segment(ref.get_segment());
db->UseObject(ref);
}
size += bytes;
status = "old";
}
- file_info["checksum"] = hash.checksum_str();
+ file_info["checksum"] = hash->digest_str();
+ delete hash;
}
// Sanity check: if we are rebuilding the statcache, but the file looks
* one block (1 MB) worth of data. If the file doesn't seems like it might
* be larger than that, don't parse the rules in it. */
ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
+ close(fd);
if (bytes < 0 || bytes >= static_cast<ssize_t>(LBS_BLOCK_SIZE - 1)) {
/* TODO: Add more strict resource limits on merge files? */
fprintf(stderr,
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
- fprintf(stderr, "Error: %m\n");
+ fprintf(stderr, "Error reading directory %s: %m\n",
+ path.c_str());
return;
}
int main(int argc, char *argv[])
{
+ hash_init();
+
string backup_dest = "", backup_script = "";
string localdb_dir = "";
string backup_scheme = "";
}
ObjectReference root_ref = metawriter->close();
- add_segment(root_ref.get_segment());
string backup_root = root_ref.to_string();
delete metawriter;
"checksums");
FILE *checksums = fdopen(checksum_file->get_fd(), "w");
+ std::set<string> segment_list = db->GetUsedSegments();
for (std::set<string>::iterator i = segment_list.begin();
i != segment_list.end(); ++i) {
string seg_path, seg_csum;