X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=store.cc;h=930794d50d12beccaa4caa5d223d82bea2b3b0e8;hb=a4cf5f4d8df46fa00992a210d587cd824cedcb08;hp=26797b319ed635341d1deeedf6bd161110644590;hpb=ac33ae99de4a6aa9cfaca2f7fd6746758264758e;p=cumulus.git diff --git a/store.cc b/store.cc index 26797b3..930794d 100644 --- a/store.cc +++ b/store.cc @@ -3,10 +3,10 @@ * * Backup data is stored in a collection of objects, which are grouped together * into segments for storage purposes. This implementation of the object store - * is built on top of libtar, and represents segments as TAR files and objects - * as files within them. */ + * represents segments as TAR files and objects as files within them. */ #include +#include #include #include #include @@ -16,7 +16,9 @@ #include #include +#include #include +#include #include #include #include @@ -24,11 +26,15 @@ #include "store.h" #include "ref.h" +using std::max; using std::list; +using std::map; using std::set; using std::string; -static char *const filter_program[] = {"bzip2", "-c", NULL}; +/* Default filter program is bzip2 */ +const char *filter_program = "bzip2 -c"; +const char *filter_extension = ".bz2"; static void cloexec(int fd) { @@ -44,23 +50,25 @@ Tarfile::Tarfile(const string &path, const string &segment) : size(0), segment_name(segment) { - real_fd = open(path.c_str(), O_WRONLY | O_CREAT, 0600); + assert(sizeof(struct tar_header) == TAR_BLOCK_SIZE); + + real_fd = open(path.c_str(), O_WRONLY | O_CREAT, 0666); if (real_fd < 0) throw IOException("Error opening output file"); filter_fd = spawn_filter(real_fd); - - if (tar_fdopen(&t, filter_fd, (char *)path.c_str(), NULL, - O_WRONLY | O_CREAT, 0600, TAR_VERBOSE | TAR_GNU) == -1) - throw IOException("Error opening Tarfile"); } Tarfile::~Tarfile() { - /* Close the tar file... */ - tar_append_eof(t); + char buf[TAR_BLOCK_SIZE]; - if (tar_close(t) != 0) + /* Append the EOF marker: two blocks filled with nulls. */ + memset(buf, 0, sizeof(buf)); + tar_write(buf, TAR_BLOCK_SIZE); + tar_write(buf, TAR_BLOCK_SIZE); + + if (close(filter_fd) != 0) throw IOException("Error closing Tarfile"); /* ...and wait for filter process to finish. */ @@ -110,7 +118,7 @@ int Tarfile::spawn_filter(int fd_out) close(fd_out); /* Exec the filter program. */ - execvp(filter_program[0], filter_program); + execlp("/bin/sh", "/bin/sh", "-c", filter_program, NULL); /* Should not reach here except for error cases. */ fprintf(stderr, "Could not exec filter: %m\n"); @@ -120,6 +128,25 @@ int Tarfile::spawn_filter(int fd_out) return fds[1]; } +void Tarfile::tar_write(const char *data, size_t len) +{ + size += len; + + while (len > 0) { + int res = write(filter_fd, data, len); + + if (res < 0) { + if (errno == EINTR) + continue; + fprintf(stderr, "Write error: %m\n"); + throw IOException("Write error"); + } + + len -= res; + data += res; + } +} + void Tarfile::write_object(int id, const char *data, size_t len) { char buf[64]; @@ -132,61 +159,66 @@ void Tarfile::write_object(int id, const char *data, size_t len) void Tarfile::internal_write_object(const string &path, const char *data, size_t len) { - memset(&t->th_buf, 0, sizeof(struct tar_header)); - - th_set_type(t, S_IFREG | 0600); - th_set_user(t, 0); - th_set_group(t, 0); - th_set_mode(t, 0600); - th_set_size(t, len); - th_set_mtime(t, time(NULL)); - th_set_path(t, const_cast(path.c_str())); - th_finish(t); - - if (th_write(t) != 0) - throw IOException("Error writing tar header"); + struct tar_header header; + memset(&header, 0, sizeof(header)); + + assert(path.size() < 100); + memcpy(header.name, path.data(), path.size()); + sprintf(header.mode, "%07o", 0600); + sprintf(header.uid, "%07o", 0); + sprintf(header.gid, "%07o", 0); + sprintf(header.size, "%011o", len); + sprintf(header.mtime, "%011o", (int)time(NULL)); + header.typeflag = '0'; + strcpy(header.magic, "ustar "); + strcpy(header.uname, "root"); + strcpy(header.gname, "root"); + + memset(header.chksum, ' ', sizeof(header.chksum)); + int checksum = 0; + for (int i = 0; i < TAR_BLOCK_SIZE; i++) { + checksum += ((uint8_t *)&header)[i]; + } + sprintf(header.chksum, "%06o", checksum); - size += T_BLOCKSIZE; + tar_write((const char *)&header, TAR_BLOCK_SIZE); if (len == 0) return; - size_t blocks = (len + T_BLOCKSIZE - 1) / T_BLOCKSIZE; - size_t padding = blocks * T_BLOCKSIZE - len; - - for (size_t i = 0; i < blocks - 1; i++) { - if (tar_block_write(t, &data[i * T_BLOCKSIZE]) == -1) - throw IOException("Error writing tar block"); - } + tar_write(data, len); - char block[T_BLOCKSIZE]; - memset(block, 0, sizeof(block)); - memcpy(block, &data[T_BLOCKSIZE * (blocks - 1)], T_BLOCKSIZE - padding); - if (tar_block_write(t, block) == -1) - throw IOException("Error writing final tar block"); - - size += blocks * T_BLOCKSIZE; + char padbuf[TAR_BLOCK_SIZE]; + size_t blocks = (len + TAR_BLOCK_SIZE - 1) / TAR_BLOCK_SIZE; + size_t padding = blocks * TAR_BLOCK_SIZE - len; + memset(padbuf, 0, padding); + tar_write(padbuf, padding); } /* Estimate the size based on the size of the actual output file on disk. - * However, the filter may not have written all data yet, and in the event that - * it is buffering data to a large extent, also use */ + * However, it might be the case that the filter program is buffering all its + * data, and might potentially not write a single byte until we have closed + * our end of the pipe. If we don't do so until we see data written, we have + * a problem. So, arbitrarily pick an upper bound on the compression ratio + * that the filter will achieve (128:1), and return a size estimate which is + * the larger of a) bytes actually seen written to disk, and b) input + * bytes/128. */ size_t Tarfile::size_estimate() { struct stat statbuf; - if (fstat(real_fd, &statbuf) == 0) { - size_t disk_size = statbuf.st_size; - - if (disk_size >= size / 128) - return disk_size; - } + if (fstat(real_fd, &statbuf) == 0) + return max((int64_t)statbuf.st_size, (int64_t)(size / 128)); + /* Couldn't stat the file on disk, so just return the actual number of + * bytes, before compression. */ return size; } static const size_t SEGMENT_SIZE = 4 * 1024 * 1024; +static map group_sizes; + ObjectReference TarSegmentStore::write_object(const char *data, size_t len, const std::string &group) { @@ -198,10 +230,10 @@ ObjectReference TarSegmentStore::write_object(const char *data, size_t len, segment = new segment_info; segment->name = generate_uuid(); - - string filename = path + "/" + segment->name + ".tar.bz2"; - segment->file = new Tarfile(filename, segment->name); - + segment->basename = segment->name + ".tar"; + segment->basename += filter_extension; + segment->fullname = path + "/" + segment->basename; + segment->file = new Tarfile(segment->fullname, segment->name); segment->count = 0; segments[group] = segment; @@ -216,6 +248,8 @@ ObjectReference TarSegmentStore::write_object(const char *data, size_t len, segment->file->write_object(id, data, len); segment->count++; + group_sizes[group] += len; + ObjectReference ref(segment->name, id_buf); // If this segment meets or exceeds the size target, close it so that @@ -232,11 +266,29 @@ void TarSegmentStore::sync() close_segment(segments.begin()->first); } +void TarSegmentStore::dump_stats() +{ + printf("Data written:\n"); + for (map::iterator i = group_sizes.begin(); + i != group_sizes.end(); ++i) { + printf(" %s: %lld\n", i->first.c_str(), i->second); + } +} + void TarSegmentStore::close_segment(const string &group) { struct segment_info *segment = segments[group]; delete segment->file; + + if (db != NULL) { + SHA1Checksum segment_checksum; + if (segment_checksum.process_file(segment->fullname.c_str())) { + string checksum = segment_checksum.checksum_str(); + db->SetSegmentChecksum(segment->name, segment->basename, checksum); + } + } + segments.erase(segments.find(group)); delete segment; }