X-Git-Url: http://git.vrable.net/?p=cumulus.git;a=blobdiff_plain;f=store.cc;h=f87b671e3a1e943c02376f3145c8afdf61d63bec;hp=00d08780007dde3a8b2617ceb7e89d5d75afd5cd;hb=15f090d3b2f0bf3994aacd00ab07da3b1f65ea00;hpb=52df48ca169e07caa5c726d51ed4ed83aed748a5 diff --git a/store.cc b/store.cc index 00d0878..f87b671 100644 --- a/store.cc +++ b/store.cc @@ -1,13 +1,31 @@ -/* LBS: An LFS-inspired filesystem backup system - * Copyright (C) 2008 Michael Vrable +/* Cumulus: Smart Filesystem Backup to Dumb Servers * - * Backup data is stored in a collection of objects, which are grouped together + * Copyright (C) 2008 The Regents of the University of California + * Written by Michael Vrable + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* Backup data is stored in a collection of objects, which are grouped together * into segments for storage purposes. This implementation of the object store * represents segments as TAR files and objects as files within them. */ #include #include #include +#include #include #include #include @@ -23,8 +41,10 @@ #include #include +#include "hash.h" #include "store.h" #include "ref.h" +#include "util.h" using std::max; using std::list; @@ -37,16 +57,6 @@ using std::string; const char *filter_program = "bzip2 -c"; const char *filter_extension = ".bz2"; -static void cloexec(int fd) -{ - long flags = fcntl(fd, F_GETFD); - - if (flags < 0) - return; - - fcntl(fd, F_SETFD, flags | FD_CLOEXEC); -} - Tarfile::Tarfile(RemoteFile *file, const string &segment) : size(0), segment_name(segment) @@ -68,14 +78,14 @@ Tarfile::~Tarfile() tar_write(buf, TAR_BLOCK_SIZE); if (close(filter_fd) != 0) - throw IOException("Error closing Tarfile"); + fatal("Error closing Tarfile"); /* ...and wait for filter process to finish. */ int status; waitpid(filter_pid, &status, 0); if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { - throw IOException("Filter process error"); + fatal("Filter process error"); } close(real_fd); @@ -92,13 +102,13 @@ int spawn_filter(int fd_out, const char *program, pid_t *filter_pid) /* Create a pipe for communicating with the filter process. */ if (pipe(fds) < 0) { - throw IOException("Unable to create pipe for filter"); + fatal("Unable to create pipe for filter"); } /* Create a child process which can exec() the filter program. */ pid = fork(); if (pid < 0) - throw IOException("Unable to fork filter process"); + fatal("Unable to fork filter process"); if (pid > 0) { /* Parent process */ @@ -141,7 +151,7 @@ void Tarfile::tar_write(const char *data, size_t len) if (errno == EINTR) continue; fprintf(stderr, "Write error: %m\n"); - throw IOException("Write error"); + fatal("Write error"); } len -= res; @@ -163,7 +173,7 @@ void Tarfile::write_object(int id, const char *data, size_t len) sprintf(header.mode, "%07o", 0600); sprintf(header.uid, "%07o", 0); sprintf(header.gid, "%07o", 0); - sprintf(header.size, "%011o", len); + sprintf(header.size, "%011o", (int)len); sprintf(header.mtime, "%011o", (int)time(NULL)); header.typeflag = '0'; strcpy(header.magic, "ustar "); @@ -272,7 +282,7 @@ void TarSegmentStore::dump_stats() for (map >::iterator i = group_sizes.begin(); i != group_sizes.end(); ++i) { printf(" %s: %lld (%lld compressed)\n", i->first.c_str(), - i->second.first, i->second.second); + (long long)i->second.first, (long long)i->second.second); } } @@ -329,7 +339,8 @@ void LbsObject::checksum() { assert(written); - SHA1Checksum hash; - hash.process(data, data_len); - ref.set_checksum(hash.checksum_str()); + Hash *hash = Hash::New(); + hash->update(data, data_len); + ref.set_checksum(hash->digest_str()); + delete hash; }