X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=scandir.cc;h=49d8c329351ee92d55ddcbf5e006a210850c96e7;hb=6bb973b965a30832c3e2d9f6a24e80d3309ef89e;hp=325a7c9831630d710c7eb4797cb95ff29e0384f7;hpb=9dbc03e5108e468457ff3e337e49618716853fe5;p=cumulus.git diff --git a/scandir.cc b/scandir.cc index 325a7c9..49d8c32 100644 --- a/scandir.cc +++ b/scandir.cc @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -24,6 +25,7 @@ #include #include "localdb.h" +#include "metadata.h" #include "store.h" #include "sha1.h" #include "statcache.h" @@ -43,13 +45,12 @@ using std::ostream; static const char lbs_version[] = LBS_STRINGIFY(LBS_VERSION); static TarSegmentStore *tss = NULL; +static MetadataWriter *metawriter = NULL; /* Buffer for holding a single block of data read from a file. */ static const size_t LBS_BLOCK_SIZE = 1024 * 1024; static char *block_buf; -static const size_t LBS_METADATA_BLOCK_SIZE = 65536; - /* Local database, which tracks objects written in this and previous * invocations to help in creating incremental snapshots. */ LocalDb *db; @@ -58,13 +59,6 @@ LocalDb *db; * skipping files which have not changed. */ StatCache *statcache; -/* Contents of the root object. This will contain a set of indirect links to - * the metadata objects. */ -std::ostringstream metadata_root; - -/* Buffer for building up metadata. */ -std::ostringstream metadata; - /* Keep track of all segments which are needed to reconstruct the snapshot. */ std::set segment_list; @@ -77,28 +71,11 @@ std::list searches; // Directories we don't want to save, but bool relative_paths = true; -/* Ensure contents of metadata are flushed to an object. */ -void metadata_flush() +/* Ensure that the given segment is listed as a dependency of the current + * snapshot. */ +void add_segment(const string& segment) { - string m = metadata.str(); - if (m.size() == 0) - return; - - /* Write current metadata information to a new object. */ - LbsObject *meta = new LbsObject; - meta->set_group("metadata"); - meta->set_data(m.data(), m.size()); - meta->write(tss); - meta->checksum(); - - /* Write a reference to this block in the root. */ - ObjectReference ref = meta->get_ref(); - metadata_root << "@" << ref.to_string() << "\n"; - segment_list.insert(ref.get_segment()); - - delete meta; - - metadata.str(""); + segment_list.insert(segment); } /* Read data from a file descriptor and return the amount of data read. A @@ -135,6 +112,7 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, { int64_t size = 0; list object_list; + const char *status = NULL; /* Status indicator printed out */ /* Look up this file in the old stat cache, if we can. If the stat * information indicates that the file has not changed, do not bother @@ -152,6 +130,7 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, const ObjectReference &ref = *i; if (!db->IsAvailable(ref)) { cached = false; + status = "repack"; break; } } @@ -173,8 +152,6 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, /* If the file is new or changed, we must read in the contents a block at a * time. */ if (!cached) { - printf(" [new]\n"); - SHA1Checksum hash; while (true) { ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE); @@ -199,6 +176,7 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, // Store a copy of the object if one does not yet exist if (ref.get_segment().size() == 0) { LbsObject *o = new LbsObject; + int object_group; /* We might still have seen this checksum before, if the object * was stored at some time in the past, but we have decided to @@ -211,10 +189,21 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, * Additionally, keep track of the age of the data by looking * up the age of the block which was expired and using that * instead of the current time. */ - if (db->IsOldObject(block_csum, bytes, &block_age)) - o->set_group("compacted"); - else + if (db->IsOldObject(block_csum, bytes, + &block_age, &object_group)) { + if (object_group == 0) { + o->set_group("data"); + } else { + char group[32]; + sprintf(group, "compacted-%d", object_group); + o->set_group(group); + } + if (status == NULL) + status = "partial"; + } else { o->set_group("data"); + status = "new"; + } o->set_data(block_buf, bytes); o->write(tss); @@ -227,11 +216,17 @@ int64_t dumpfile(int fd, dictionary &file_info, const string &path, segment_list.insert(ref.get_segment()); db->UseObject(ref); size += bytes; + + if (status == NULL) + status = "old"; } file_info["checksum"] = hash.checksum_str(); } + if (status != NULL) + printf(" [%s]\n", status); + statcache->Save(path, &stat_buf, file_info["checksum"], object_list); /* For files that only need to be broken apart into a few objects, store @@ -295,6 +290,13 @@ void dump_inode(const string& path, // Path within snapshot file_info["group"] += " (" + uri_encode(grp->gr_name) + ")"; } + if (stat_buf.st_nlink > 1 && (stat_buf.st_mode & S_IFMT) != S_IFDIR) { + file_info["links"] = encode_int(stat_buf.st_nlink); + file_info["inode"] = encode_int(major(stat_buf.st_dev)) + + "/" + encode_int(minor(stat_buf.st_dev)) + + "/" + encode_int(stat_buf.st_ino); + } + char inode_type; switch (stat_buf.st_mode & S_IFMT) { @@ -322,7 +324,7 @@ void dump_inode(const string& path, // Path within snapshot fprintf(stderr, "error reading symlink: %m\n"); } else if (len <= stat_buf.st_size) { buf[len] = '\0'; - file_info["contents"] = uri_encode(buf); + file_info["target"] = uri_encode(buf); } else if (len > stat_buf.st_size) { fprintf(stderr, "error reading symlink: name truncated\n"); } @@ -330,11 +332,10 @@ void dump_inode(const string& path, // Path within snapshot delete[] buf; break; case S_IFREG: - inode_type = '-'; + inode_type = 'f'; file_size = dumpfile(fd, file_info, path, stat_buf); file_info["size"] = encode_int(file_size); - close(fd); if (file_size < 0) return; // error occurred; do not dump file @@ -356,13 +357,7 @@ void dump_inode(const string& path, // Path within snapshot file_info["type"] = string(1, inode_type); - metadata << "name: " << uri_encode(path) << "\n"; - dict_output(metadata, file_info); - metadata << "\n"; - - // Break apart metadata listing if it becomes too large. - if (metadata.str().size() > LBS_METADATA_BLOCK_SIZE) - metadata_flush(); + metawriter->add(path, file_info); } void scanfile(const string& path, bool include) @@ -543,6 +538,7 @@ void usage(const char *program) { fprintf( stderr, + "LBS %s\n\n" "Usage: %s [OPTION]... --dest=DEST PATHS...\n" "Produce backup snapshot of files in SOURCE and store to DEST.\n" "\n" @@ -555,17 +551,19 @@ void usage(const char *program) " --filter-extension=EXT\n" " string to append to segment files\n" " (defaults to \".bz2\")\n" + " --signature-filter=COMMAND\n" + " program though which to filter descriptor\n" " --scheme=NAME optional name for this snapshot\n", - program + lbs_version, program ); } int main(int argc, char *argv[]) { - string backup_source = "."; string backup_dest = ""; string localdb_dir = ""; string backup_scheme = ""; + string signature_filter = ""; while (1) { static struct option long_options[] = { @@ -575,6 +573,7 @@ int main(int argc, char *argv[]) {"filter-extension", 1, 0, 0}, // 3 {"dest", 1, 0, 0}, // 4 {"scheme", 1, 0, 0}, // 5 + {"signature-filter", 1, 0, 0}, // 6 {NULL, 0, 0, 0}, }; @@ -607,6 +606,9 @@ int main(int argc, char *argv[]) case 5: // --scheme backup_scheme = optarg; break; + case 6: // --signature-filter + signature_filter = optarg; + break; default: fprintf(stderr, "Unhandled long option!\n"); return 1; @@ -617,20 +619,14 @@ int main(int argc, char *argv[]) } } - if (argc < optind + 2) { + if (optind == argc) { usage(argv[0]); return 1; } searches.push_back("."); - if (optind == argc) { - add_include("."); - } else { - for (int i = optind; i < argc; i++) - add_include(argv[i]); - } - - backup_source = argv[optind]; + for (int i = optind; i < argc; i++) + add_include(argv[i]); if (backup_dest == "") { fprintf(stderr, @@ -666,7 +662,6 @@ int main(int argc, char *argv[]) printf(" %s\n", i->c_str()); } - tss = new TarSegmentStore(backup_dest); block_buf = new char[LBS_BLOCK_SIZE]; /* Store the time when the backup started, so it can be included in the @@ -686,55 +681,118 @@ int main(int argc, char *argv[]) db->Open(database_path.c_str(), desc_buf, backup_scheme.size() ? backup_scheme.c_str() : NULL); + tss = new TarSegmentStore(backup_dest, db); + + metawriter = new MetadataWriter(tss); + /* Initialize the stat cache, for skipping over unchanged files. */ statcache = new StatCache; - statcache->Open(localdb_dir.c_str(), desc_buf); + statcache->Open(localdb_dir.c_str(), desc_buf, + backup_scheme.size() ? backup_scheme.c_str() : NULL); scanfile(".", false); - metadata_flush(); - const string md = metadata_root.str(); - - LbsObject *root = new LbsObject; - root->set_group("metadata"); - root->set_data(md.data(), md.size()); - root->write(tss); - root->checksum(); - segment_list.insert(root->get_ref().get_segment()); - - string backup_root = root->get_ref().to_string(); - delete root; - - db->Close(); + ObjectReference root_ref = metawriter->close(); + add_segment(root_ref.get_segment()); + string backup_root = root_ref.to_string(); statcache->Close(); delete statcache; + delete metawriter; + tss->sync(); tss->dump_stats(); delete tss; + /* Write out a checksums file which lists the checksums for all the + * segments included in this snapshot. The format is designed so that it + * may be easily verified using the sha1sums command. */ + const char csum_type[] = "sha1"; + string checksum_filename = backup_dest + "/snapshot-"; + if (backup_scheme.size() > 0) + checksum_filename += backup_scheme + "-"; + checksum_filename = checksum_filename + desc_buf + "." + csum_type + "sums"; + FILE *checksums = fopen(checksum_filename.c_str(), "w"); + if (checksums != NULL) { + for (std::set::iterator i = segment_list.begin(); + i != segment_list.end(); ++i) { + string seg_path, seg_csum; + if (db->GetSegmentChecksum(*i, &seg_path, &seg_csum)) { + const char *raw_checksum = NULL; + if (strncmp(seg_csum.c_str(), csum_type, + strlen(csum_type)) == 0) { + raw_checksum = seg_csum.c_str() + strlen(csum_type); + if (*raw_checksum == '=') + raw_checksum++; + else + raw_checksum = NULL; + } + + if (raw_checksum != NULL) + fprintf(checksums, "%s *%s\n", + raw_checksum, seg_path.c_str()); + } + } + fclose(checksums); + } else { + fprintf(stderr, "ERROR: Unable to write checksums file: %m\n"); + } + + db->Close(); + /* Write a backup descriptor file, which says which segments are needed and * where to start to restore this snapshot. The filename is based on the - * current time. */ + * current time. If a signature filter program was specified, filter the + * data through that to give a chance to sign the descriptor contents. */ string desc_filename = backup_dest + "/snapshot-"; if (backup_scheme.size() > 0) desc_filename += backup_scheme + "-"; desc_filename = desc_filename + desc_buf + ".lbs"; - std::ofstream descriptor(desc_filename.c_str()); - descriptor << "Format: LBS Snapshot v0.2\n"; - descriptor << "Producer: LBS " << lbs_version << "\n"; + int descriptor_fd = open(desc_filename.c_str(), O_WRONLY | O_CREAT, 0666); + if (descriptor_fd < 0) { + fprintf(stderr, "Unable to open descriptor output file: %m\n"); + return 1; + } + pid_t signature_pid = 0; + if (signature_filter.size() > 0) { + int new_fd = spawn_filter(descriptor_fd, signature_filter.c_str(), + &signature_pid); + close(descriptor_fd); + descriptor_fd = new_fd; + } + FILE *descriptor = fdopen(descriptor_fd, "w"); + + fprintf(descriptor, "Format: LBS Snapshot v0.6\n"); + fprintf(descriptor, "Producer: LBS %s\n", lbs_version); strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z", &time_buf); - descriptor << "Date: " << desc_buf << "\n"; + fprintf(descriptor, "Date: %s\n", desc_buf); if (backup_scheme.size() > 0) - descriptor << "Scheme: " << backup_scheme << "\n"; - descriptor << "Root: " << backup_root << "\n"; + fprintf(descriptor, "Scheme: %s\n", backup_scheme.c_str()); + fprintf(descriptor, "Root: %s\n", backup_root.c_str()); + + SHA1Checksum checksum_csum; + if (checksum_csum.process_file(checksum_filename.c_str())) { + string csum = checksum_csum.checksum_str(); + fprintf(descriptor, "Checksums: %s\n", csum.c_str()); + } - descriptor << "Segments:\n"; + fprintf(descriptor, "Segments:\n"); for (std::set::iterator i = segment_list.begin(); i != segment_list.end(); ++i) { - descriptor << " " << *i << "\n"; + fprintf(descriptor, " %s\n", i->c_str()); + } + + fclose(descriptor); + + if (signature_pid) { + int status; + waitpid(signature_pid, &status, 0); + + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + throw IOException("Signature filter process error"); + } } return 0;