-/* LBS: An LFS-inspired filesystem backup system
- * Copyright (C) 2007 Michael Vrable
+/* Cumulus: Efficient Filesystem Backup to the Cloud
+ * Copyright (C) 2008-2009 The Cumulus Developers
+ * See the AUTHORS file for a list of contributors.
*
- * Backup data is stored in a collection of objects, which are grouped together
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Backup data is stored in a collection of objects, which are grouped together
* into segments for storage purposes. This implementation of the object store
- * is built on top of libtar, and represents segments as TAR files and objects
- * as files within them. */
+ * represents segments as TAR files and objects as files within them. */
#include <assert.h>
+#include <errno.h>
#include <stdio.h>
+#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <string>
#include <iostream>
+#include "hash.h"
+#include "localdb.h"
#include "store.h"
#include "ref.h"
+#include "util.h"
using std::max;
using std::list;
using std::map;
+using std::pair;
using std::set;
using std::string;
const char *filter_program = "bzip2 -c";
const char *filter_extension = ".bz2";
-static void cloexec(int fd)
-{
- long flags = fcntl(fd, F_GETFD);
-
- if (flags < 0)
- return;
-
- fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
-}
-
-Tarfile::Tarfile(const string &path, const string &segment)
+Tarfile::Tarfile(RemoteFile *file, const string &segment)
: size(0),
segment_name(segment)
{
- real_fd = open(path.c_str(), O_WRONLY | O_CREAT, 0666);
- if (real_fd < 0)
- throw IOException("Error opening output file");
+ assert(sizeof(struct tar_header) == TAR_BLOCK_SIZE);
- filter_fd = spawn_filter(real_fd);
-
- if (tar_fdopen(&t, filter_fd, (char *)path.c_str(), NULL,
- O_WRONLY | O_CREAT, 0666, TAR_VERBOSE | TAR_GNU) == -1)
- throw IOException("Error opening Tarfile");
+ this->file = file;
+ real_fd = file->get_fd();
+ filter_fd = spawn_filter(real_fd, filter_program, &filter_pid);
}
Tarfile::~Tarfile()
{
- /* Close the tar file... */
- tar_append_eof(t);
+ char buf[TAR_BLOCK_SIZE];
+
+ /* Append the EOF marker: two blocks filled with nulls. */
+ memset(buf, 0, sizeof(buf));
+ tar_write(buf, TAR_BLOCK_SIZE);
+ tar_write(buf, TAR_BLOCK_SIZE);
- if (tar_close(t) != 0)
- throw IOException("Error closing Tarfile");
+ if (close(filter_fd) != 0)
+ fatal("Error closing Tarfile");
/* ...and wait for filter process to finish. */
int status;
waitpid(filter_pid, &status, 0);
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- throw IOException("Filter process error");
+ fatal("Filter process error");
}
close(real_fd);
* on the TAR output. The file descriptor to which output should be written
* must be specified; the return value is the file descriptor which will be
* attached to the standard input of the filter program. */
-int Tarfile::spawn_filter(int fd_out)
+int spawn_filter(int fd_out, const char *program, pid_t *filter_pid)
{
int fds[2];
+ pid_t pid;
/* Create a pipe for communicating with the filter process. */
if (pipe(fds) < 0) {
- throw IOException("Unable to create pipe for filter");
+ fatal("Unable to create pipe for filter");
}
/* Create a child process which can exec() the filter program. */
- filter_pid = fork();
- if (filter_pid < 0)
- throw IOException("Unable to fork filter process");
+ pid = fork();
+ if (pid < 0)
+ fatal("Unable to fork filter process");
- if (filter_pid > 0) {
+ if (pid > 0) {
/* Parent process */
close(fds[0]);
cloexec(fds[1]);
+ if (filter_pid != NULL)
+ *filter_pid = pid;
} else {
/* Child process. Rearrange file descriptors. stdin is fds[0], stdout
* is fd_out, stderr is unchanged. */
close(fd_out);
/* Exec the filter program. */
- execlp("/bin/sh", "/bin/sh", "-c", filter_program, NULL);
+ execlp("/bin/sh", "/bin/sh", "-c", program, NULL);
/* Should not reach here except for error cases. */
fprintf(stderr, "Could not exec filter: %m\n");
return fds[1];
}
-void Tarfile::write_object(int id, const char *data, size_t len)
+void Tarfile::tar_write(const char *data, size_t len)
{
- char buf[64];
- sprintf(buf, "%08x", id);
- string path = segment_name + "/" + buf;
+ size += len;
+
+ while (len > 0) {
+ int res = write(filter_fd, data, len);
- internal_write_object(path, data, len);
+ if (res < 0) {
+ if (errno == EINTR)
+ continue;
+ fprintf(stderr, "Write error: %m\n");
+ fatal("Write error");
+ }
+
+ len -= res;
+ data += res;
+ }
}
-void Tarfile::internal_write_object(const string &path,
- const char *data, size_t len)
+void Tarfile::write_object(int id, const char *data, size_t len)
{
- memset(&t->th_buf, 0, sizeof(struct tar_header));
+ struct tar_header header;
+ memset(&header, 0, sizeof(header));
- th_set_type(t, S_IFREG | 0600);
- th_set_user(t, 0);
- th_set_group(t, 0);
- th_set_mode(t, 0600);
- th_set_size(t, len);
- th_set_mtime(t, time(NULL));
- th_set_path(t, const_cast<char *>(path.c_str()));
- th_finish(t);
+ char buf[64];
+ sprintf(buf, "%08x", id);
+ string path = segment_name + "/" + buf;
- if (th_write(t) != 0)
- throw IOException("Error writing tar header");
+ assert(path.size() < 100);
+ memcpy(header.name, path.data(), path.size());
+ sprintf(header.mode, "%07o", 0600);
+ sprintf(header.uid, "%07o", 0);
+ sprintf(header.gid, "%07o", 0);
+ sprintf(header.size, "%011o", (int)len);
+ sprintf(header.mtime, "%011o", (int)time(NULL));
+ header.typeflag = '0';
+ strcpy(header.magic, "ustar ");
+ strcpy(header.uname, "root");
+ strcpy(header.gname, "root");
+
+ memset(header.chksum, ' ', sizeof(header.chksum));
+ int checksum = 0;
+ for (int i = 0; i < TAR_BLOCK_SIZE; i++) {
+ checksum += ((uint8_t *)&header)[i];
+ }
+ sprintf(header.chksum, "%06o", checksum);
- size += T_BLOCKSIZE;
+ tar_write((const char *)&header, TAR_BLOCK_SIZE);
if (len == 0)
return;
- size_t blocks = (len + T_BLOCKSIZE - 1) / T_BLOCKSIZE;
- size_t padding = blocks * T_BLOCKSIZE - len;
-
- for (size_t i = 0; i < blocks - 1; i++) {
- if (tar_block_write(t, &data[i * T_BLOCKSIZE]) == -1)
- throw IOException("Error writing tar block");
- }
-
- char block[T_BLOCKSIZE];
- memset(block, 0, sizeof(block));
- memcpy(block, &data[T_BLOCKSIZE * (blocks - 1)], T_BLOCKSIZE - padding);
- if (tar_block_write(t, block) == -1)
- throw IOException("Error writing final tar block");
+ tar_write(data, len);
- size += blocks * T_BLOCKSIZE;
+ char padbuf[TAR_BLOCK_SIZE];
+ size_t blocks = (len + TAR_BLOCK_SIZE - 1) / TAR_BLOCK_SIZE;
+ size_t padding = blocks * TAR_BLOCK_SIZE - len;
+ memset(padbuf, 0, padding);
+ tar_write(padbuf, padding);
}
/* Estimate the size based on the size of the actual output file on disk.
static const size_t SEGMENT_SIZE = 4 * 1024 * 1024;
-static map<string, int64_t> group_sizes;
+/* Backup size summary: segment type -> (uncompressed size, compressed size) */
+static map<string, pair<int64_t, int64_t> > group_sizes;
ObjectReference TarSegmentStore::write_object(const char *data, size_t len,
- const std::string &group)
+ const std::string &group,
+ const std::string &checksum,
+ double age)
{
struct segment_info *segment;
segment = new segment_info;
segment->name = generate_uuid();
-
- string filename = path + "/" + segment->name + ".tar";
- filename += filter_extension;
- segment->file = new Tarfile(filename, segment->name);
-
+ segment->group = group;
+ segment->basename = segment->name + ".tar";
+ segment->basename += filter_extension;
segment->count = 0;
+ segment->data_size = 0;
+ segment->rf = remote->alloc_file(segment->basename,
+ group == "metadata" ? "segments0"
+ : "segments1");
+ segment->file = new Tarfile(segment->rf, segment->name);
segments[group] = segment;
} else {
segment->file->write_object(id, data, len);
segment->count++;
+ segment->data_size += len;
- group_sizes[group] += len;
+ group_sizes[group].first += len;
ObjectReference ref(segment->name, id_buf);
+ ref.set_range(0, len, true);
+ if (checksum.size() > 0)
+ ref.set_checksum(checksum);
+ if (db != NULL)
+ db->StoreObject(ref, age);
// If this segment meets or exceeds the size target, close it so that
// future objects will go into a new segment.
void TarSegmentStore::dump_stats()
{
printf("Data written:\n");
- for (map<string, int64_t>::iterator i = group_sizes.begin();
+ for (map<string, pair<int64_t, int64_t> >::iterator i = group_sizes.begin();
i != group_sizes.end(); ++i) {
- printf(" %s: %lld\n", i->first.c_str(), i->second);
+ printf(" %s: %lld (%lld compressed)\n", i->first.c_str(),
+ (long long)i->second.first, (long long)i->second.second);
}
}
struct segment_info *segment = segments[group];
delete segment->file;
+
+ if (db != NULL) {
+ struct stat stat_buf;
+ int disk_size = 0;
+ if (stat(segment->rf->get_local_path().c_str(), &stat_buf) == 0) {
+ disk_size = stat_buf.st_size;
+ group_sizes[segment->group].second += disk_size;
+ }
+
+ SHA1Checksum segment_checksum;
+ string checksum;
+ if (segment_checksum.process_file(segment->rf->get_local_path().c_str())) {
+ checksum = segment_checksum.checksum_str();
+ }
+
+ db->SetSegmentMetadata(segment->name, segment->basename, checksum,
+ group, segment->data_size, disk_size);
+ }
+
+ segment->rf->send();
+
segments.erase(segments.find(group));
delete segment;
}
}
LbsObject::LbsObject()
- : group(""), data(NULL), data_len(0), written(false)
+ : group(""), age(0.0), data(NULL), data_len(0), written(false)
{
}
{
}
-void LbsObject::write(TarSegmentStore *store)
+void LbsObject::set_data(const char *d, size_t len, const char *checksum)
{
- assert(data != NULL);
- assert(!written);
+ data = d;
+ data_len = len;
- ref = store->write_object(data, data_len, group);
- written = true;
+ if (checksum != NULL) {
+ this->checksum = checksum;
+ } else {
+ Hash *hash = Hash::New();
+ hash->update(data, data_len);
+ this->checksum = hash->digest_str();
+ delete hash;
+ }
}
-void LbsObject::checksum()
+void LbsObject::write(TarSegmentStore *store)
{
- assert(written);
+ assert(data != NULL);
+ assert(!written);
- SHA1Checksum hash;
- hash.process(data, data_len);
- ref.set_checksum(hash.checksum_str());
+ ref = store->write_object(data, data_len, group, checksum, age);
+ written = true;
}