X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=main.cc;h=c3f8c59ce57eba033fcef386a2d1bd45e3c0117f;hb=64bff41cb3ccdd60e767a5bb9ed8525d2dda1966;hp=15ddfc609a42684d111dde5ba34edd5aaedb565e;hpb=fc223ec17dc5e6691291a7b31f6daa8679b484cd;p=cumulus.git diff --git a/main.cc b/main.cc index 15ddfc6..c3f8c59 100644 --- a/main.cc +++ b/main.cc @@ -1,8 +1,6 @@ -/* Cumulus: Smart Filesystem Backup to Dumb Servers - * - * Copyright (C) 2006-2009 The Regents of the University of California - * Copyright (C) 2012 Google Inc. - * Written by Michael Vrable +/* Cumulus: Efficient Filesystem Backup to the Cloud + * Copyright (C) 2006-2009, 2012 The Cumulus Developers + * See the AUTHORS file for a list of contributors. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -49,13 +47,14 @@ #include #include "exclude.h" +#include "hash.h" #include "localdb.h" #include "metadata.h" #include "remote.h" #include "store.h" -#include "sha1.h" #include "subfile.h" #include "util.h" +#include "third_party/sha1.h" using std::list; using std::map; @@ -244,7 +243,7 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, /* If the file is new or changed, we must read in the contents a block at a * time. */ if (!cached) { - SHA1Checksum hash; + Hash *hash = Hash::New(); Subfile subfile(db); subfile.load_old_blocks(old_blocks); @@ -258,7 +257,7 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, break; } - hash.process(block_buf, bytes); + hash->update(block_buf, bytes); // Sparse file processing: if we read a block of all zeroes, encode // that explicitly. @@ -343,7 +342,8 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, status = "old"; } - file_info["checksum"] = hash.checksum_str(); + file_info["checksum"] = hash->digest_str(); + delete hash; } // Sanity check: if we are rebuilding the statcache, but the file looks @@ -541,6 +541,7 @@ void try_merge_filter(const string& path, const string& basedir) * one block (1 MB) worth of data. If the file doesn't seems like it might * be larger than that, don't parse the rules in it. */ ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE); + close(fd); if (bytes < 0 || bytes >= static_cast(LBS_BLOCK_SIZE - 1)) { /* TODO: Add more strict resource limits on merge files? */ fprintf(stderr, @@ -585,7 +586,8 @@ void scanfile(const string& path) DIR *dir = opendir(path.c_str()); if (dir == NULL) { - fprintf(stderr, "Error: %m\n"); + fprintf(stderr, "Error reading directory %s: %m\n", + path.c_str()); return; } @@ -653,8 +655,10 @@ void usage(const char *program) " --dest=PATH path where backup is to be written\n" " --upload-script=COMMAND\n" " program to invoke for each backup file generated\n" - " --exclude=PATH exclude files in PATH from snapshot\n" - " --exclude-name=NAME exclude files called NAME from snapshot\n" + " --exclude=PATTERN exclude files matching PATTERN from snapshot\n" + " --include=PATTERN include files matching PATTERN in snapshot\n" + " --dir-merge=PATTERN parse files matching PATTERN to read additional\n" + " subtree-specific include/exclude rules during backup\n" " --localdb=PATH local backup metadata is stored in PATH\n" " --tmpdir=PATH path for temporarily storing backup files\n" " (defaults to TMPDIR environment variable or /tmp)\n" @@ -679,6 +683,8 @@ void usage(const char *program) int main(int argc, char *argv[]) { + hash_init(); + string backup_dest = "", backup_script = ""; string localdb_dir = ""; string backup_scheme = "";