1 /* Cumulus: Smart Filesystem Backup to Dumb Servers
3 * Copyright (C) 2006-2009 The Regents of the University of California
4 * Copyright (C) 2012 Google Inc.
5 * Written by Michael Vrable <mvrable@cs.ucsd.edu>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 /* Main entry point for Cumulus. Contains logic for traversing the filesystem
23 * and constructing a backup. */
36 #include <sys/sysmacros.h>
37 #include <sys/types.h>
59 #include "third_party/sha1.h"
67 /* Version information. This will be filled in by the Makefile. */
68 #ifndef CUMULUS_VERSION
69 #define CUMULUS_VERSION Unknown
71 #define CUMULUS_STRINGIFY(s) CUMULUS_STRINGIFY2(s)
72 #define CUMULUS_STRINGIFY2(s) #s
73 static const char cumulus_version[] = CUMULUS_STRINGIFY(CUMULUS_VERSION);
75 static RemoteStore *remote = NULL;
76 static TarSegmentStore *tss = NULL;
77 static MetadataWriter *metawriter = NULL;
79 /* Buffer for holding a single block of data read from a file. */
80 static const size_t LBS_BLOCK_SIZE = 1024 * 1024;
81 static char *block_buf;
83 /* Local database, which tracks objects written in this and previous
84 * invocations to help in creating incremental snapshots. */
87 /* Keep track of all segments which are needed to reconstruct the snapshot. */
88 std::set<string> segment_list;
90 /* Snapshot intent: 1=daily, 7=weekly, etc. This is not used directly, but is
91 * stored in the local database and can help guide segment cleaning and
92 * snapshot expiration policies. */
93 double snapshot_intent = 1.0;
95 /* Selection of files to include/exclude in the snapshot. */
96 PathFilterList filter_rules;
98 bool flag_rebuild_statcache = false;
100 /* Whether verbose output is enabled. */
101 bool verbose = false;
103 /* Ensure that the given segment is listed as a dependency of the current
105 void add_segment(const string& segment)
107 segment_list.insert(segment);
110 /* Attempts to open a regular file read-only, but with safety checks for files
111 * that might not be fully trusted. */
112 int safe_open(const string& path, struct stat *stat_buf)
116 /* Be paranoid when opening the file. We have no guarantee that the
117 * file was not replaced between the stat() call above and the open()
118 * call below, so we might not even be opening a regular file. We
119 * supply flags to open to to guard against various conditions before
120 * we can perform an lstat to check that the file is still a regular
122 * - O_NOFOLLOW: in the event the file was replaced by a symlink
123 * - O_NONBLOCK: prevents open() from blocking if the file was
125 * We also add in O_NOATIME, since this may reduce disk writes (for
126 * inode updates). However, O_NOATIME may result in EPERM, so if the
127 * initial open fails, try again without O_NOATIME. */
128 fd = open(path.c_str(), O_RDONLY|O_NOATIME|O_NOFOLLOW|O_NONBLOCK);
130 fd = open(path.c_str(), O_RDONLY|O_NOFOLLOW|O_NONBLOCK);
133 fprintf(stderr, "Unable to open file %s: %m\n", path.c_str());
137 /* Drop the use of the O_NONBLOCK flag; we only wanted that for file
139 long flags = fcntl(fd, F_GETFL);
140 fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
142 /* Re-check file attributes, storing them into stat_buf if that is
144 struct stat internal_stat_buf;
145 if (stat_buf == NULL)
146 stat_buf = &internal_stat_buf;
148 /* Perform the stat call again, and check that we still have a regular
150 if (fstat(fd, stat_buf) < 0) {
151 fprintf(stderr, "fstat: %m\n");
156 if ((stat_buf->st_mode & S_IFMT) != S_IFREG) {
157 fprintf(stderr, "file is no longer a regular file!\n");
165 /* Read data from a file descriptor and return the amount of data read. A
166 * short read (less than the requested size) will only occur if end-of-file is
168 ssize_t file_read(int fd, char *buf, size_t maxlen)
170 size_t bytes_read = 0;
173 ssize_t res = read(fd, buf, maxlen);
177 fprintf(stderr, "error reading file: %m\n");
179 } else if (res == 0) {
191 /* Read the contents of a file (specified by an open file descriptor) and copy
192 * the data to the store. Returns the size of the file (number of bytes
193 * dumped), or -1 on error. */
194 int64_t dumpfile(int fd, dictionary &file_info, const string &path,
195 struct stat& stat_buf)
198 list<string> object_list;
199 const char *status = NULL; /* Status indicator printed out */
201 /* Look up this file in the old stat cache, if we can. If the stat
202 * information indicates that the file has not changed, do not bother
203 * re-reading the entire contents. Even if the information has been
204 * changed, we can use the list of old blocks in the search for a sub-block
205 * incremental representation. */
207 list<ObjectReference> old_blocks;
209 bool found = metawriter->find(path);
211 old_blocks = metawriter->get_blocks();
214 && !flag_rebuild_statcache
215 && metawriter->is_unchanged(&stat_buf)) {
218 /* If any of the blocks in the object have been expired, then we should
219 * fall back to fully reading in the file. */
220 for (list<ObjectReference>::const_iterator i = old_blocks.begin();
221 i != old_blocks.end(); ++i) {
222 const ObjectReference &ref = *i;
223 if (!db->IsAvailable(ref)) {
230 /* If everything looks okay, use the cached information */
232 file_info["checksum"] = metawriter->get_checksum();
233 for (list<ObjectReference>::const_iterator i = old_blocks.begin();
234 i != old_blocks.end(); ++i) {
235 const ObjectReference &ref = *i;
236 object_list.push_back(ref.to_string());
238 add_segment(ref.get_segment());
241 size = stat_buf.st_size;
245 /* If the file is new or changed, we must read in the contents a block at a
248 Hash *hash = Hash::New();
250 subfile.load_old_blocks(old_blocks);
253 ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
257 fprintf(stderr, "Backup contents for %s may be incorrect\n",
262 hash->update(block_buf, bytes);
264 // Sparse file processing: if we read a block of all zeroes, encode
266 bool all_zero = true;
267 for (int i = 0; i < bytes; i++) {
268 if (block_buf[i] != 0) {
274 // Either find a copy of this block in an already-existing segment,
275 // or index it so it can be re-used in the future
276 double block_age = 0.0;
279 SHA1Checksum block_hash;
280 block_hash.process(block_buf, bytes);
281 string block_csum = block_hash.checksum_str();
284 ref = ObjectReference(ObjectReference::REF_ZERO);
285 ref.set_range(0, bytes);
287 ref = db->FindObject(block_csum, bytes);
290 list<ObjectReference> refs;
292 // Store a copy of the object if one does not yet exist
294 LbsObject *o = new LbsObject;
297 /* We might still have seen this checksum before, if the object
298 * was stored at some time in the past, but we have decided to
299 * clean the segment the object was originally stored in
300 * (FindObject will not return such objects). When rewriting
301 * the object contents, put it in a separate group, so that old
302 * objects get grouped together. The hope is that these old
303 * objects will continue to be used in the future, and we
304 * obtain segments which will continue to be well-utilized.
305 * Additionally, keep track of the age of the data by looking
306 * up the age of the block which was expired and using that
307 * instead of the current time. */
308 if (db->IsOldObject(block_csum, bytes,
309 &block_age, &object_group)) {
310 if (object_group == 0) {
311 o->set_group("data");
314 sprintf(group, "compacted-%d", object_group);
320 o->set_group("data");
324 subfile.analyze_new_block(block_buf, bytes);
325 refs = subfile.create_incremental(tss, o, block_age);
327 if (flag_rebuild_statcache && ref.is_normal()) {
328 subfile.analyze_new_block(block_buf, bytes);
329 subfile.store_analyzed_signatures(ref);
334 while (!refs.empty()) {
335 ref = refs.front(); refs.pop_front();
336 object_list.push_back(ref.to_string());
338 add_segment(ref.get_segment());
347 file_info["checksum"] = hash->digest_str();
351 // Sanity check: if we are rebuilding the statcache, but the file looks
352 // like it hasn't changed, then the newly-computed checksum should match
353 // the checksum in the statcache. If not, we have possible disk corruption
354 // and report a warning.
355 if (flag_rebuild_statcache) {
357 && metawriter->is_unchanged(&stat_buf)
358 && file_info["checksum"] != metawriter->get_checksum()) {
360 "Warning: Checksum for %s does not match expected value\n"
364 metawriter->get_checksum().c_str(),
365 file_info["checksum"].c_str());
369 if (verbose && status != NULL)
370 printf(" [%s]\n", status);
372 string blocklist = "";
373 for (list<string>::iterator i = object_list.begin();
374 i != object_list.end(); ++i) {
375 if (i != object_list.begin())
379 file_info["data"] = blocklist;
384 /* Look up a user/group and convert it to string form (either strictly numeric
385 * or numeric plus symbolic). Caches the results of the call to
386 * getpwuid/getgrgid. */
387 string user_to_string(uid_t uid) {
388 static map<uid_t, string> user_cache;
389 map<uid_t, string>::const_iterator i = user_cache.find(uid);
390 if (i != user_cache.end())
393 string result = encode_int(uid);
394 struct passwd *pwd = getpwuid(uid);
395 if (pwd != NULL && pwd->pw_name != NULL) {
396 result += " (" + uri_encode(pwd->pw_name) + ")";
398 user_cache[uid] = result;
402 string group_to_string(gid_t gid) {
403 static map<gid_t, string> group_cache;
404 map<gid_t, string>::const_iterator i = group_cache.find(gid);
405 if (i != group_cache.end())
408 string result = encode_int(gid);
409 struct group *grp = getgrgid(gid);
410 if (grp != NULL && grp->gr_name != NULL) {
411 result += " (" + uri_encode(grp->gr_name) + ")";
413 group_cache[gid] = result;
417 /* Dump a specified filesystem object (file, directory, etc.) based on its
418 * inode information. If the object is a regular file, an open filehandle is
420 void dump_inode(const string& path, // Path within snapshot
421 const string& fullpath, // Path to object in filesystem
422 struct stat& stat_buf, // Results of stat() call
423 int fd) // Open filehandle if regular file
426 dictionary file_info;
431 printf("%s\n", path.c_str());
432 metawriter->find(path);
434 file_info["name"] = uri_encode(path);
435 file_info["mode"] = encode_int(stat_buf.st_mode & 07777, 8);
436 file_info["ctime"] = encode_int(stat_buf.st_ctime);
437 file_info["mtime"] = encode_int(stat_buf.st_mtime);
438 file_info["user"] = user_to_string(stat_buf.st_uid);
439 file_info["group"] = group_to_string(stat_buf.st_gid);
441 time_t now = time(NULL);
442 if (now - stat_buf.st_ctime < 30 || now - stat_buf.st_mtime < 30)
443 if ((stat_buf.st_mode & S_IFMT) != S_IFDIR)
444 file_info["volatile"] = "1";
446 if (stat_buf.st_nlink > 1 && (stat_buf.st_mode & S_IFMT) != S_IFDIR) {
447 file_info["links"] = encode_int(stat_buf.st_nlink);
450 file_info["inode"] = encode_int(major(stat_buf.st_dev))
451 + "/" + encode_int(minor(stat_buf.st_dev))
452 + "/" + encode_int(stat_buf.st_ino);
456 switch (stat_buf.st_mode & S_IFMT) {
465 inode_type = ((stat_buf.st_mode & S_IFMT) == S_IFBLK) ? 'b' : 'c';
466 file_info["device"] = encode_int(major(stat_buf.st_rdev))
467 + "/" + encode_int(minor(stat_buf.st_rdev));
472 /* Use the reported file size to allocate a buffer large enough to read
473 * the symlink. Allocate slightly more space, so that we ask for more
474 * bytes than we expect and so check for truncation. */
475 buf = new char[stat_buf.st_size + 2];
476 len = readlink(fullpath.c_str(), buf, stat_buf.st_size + 1);
478 fprintf(stderr, "error reading symlink: %m\n");
479 } else if (len <= stat_buf.st_size) {
481 file_info["target"] = uri_encode(buf);
482 } else if (len > stat_buf.st_size) {
483 fprintf(stderr, "error reading symlink: name truncated\n");
491 file_size = dumpfile(fd, file_info, path, stat_buf);
492 file_info["size"] = encode_int(file_size);
495 return; // error occurred; do not dump file
497 if (file_size != stat_buf.st_size) {
498 fprintf(stderr, "Warning: Size of %s changed during reading\n",
500 file_info["volatile"] = "1";
509 fprintf(stderr, "Unknown inode type: mode=%x\n", stat_buf.st_mode);
513 file_info["type"] = string(1, inode_type);
515 metawriter->add(file_info);
518 /* Converts a path to the normalized form used in the metadata log. Paths are
519 * written as relative (without any leading slashes). The root directory is
520 * referred to as ".". */
521 string metafile_path(const string& path)
523 const char *newpath = path.c_str();
526 if (*newpath == '\0')
531 void try_merge_filter(const string& path, const string& basedir)
533 struct stat stat_buf;
534 if (lstat(path.c_str(), &stat_buf) < 0)
536 if ((stat_buf.st_mode & S_IFMT) != S_IFREG)
538 int fd = safe_open(path, NULL);
542 /* As a very crude limit on the complexity of merge rules, only read up to
543 * one block (1 MB) worth of data. If the file doesn't seems like it might
544 * be larger than that, don't parse the rules in it. */
545 ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
547 if (bytes < 0 || bytes >= static_cast<ssize_t>(LBS_BLOCK_SIZE - 1)) {
548 /* TODO: Add more strict resource limits on merge files? */
550 "Unable to read filter merge file (possibly size too large\n");
553 filter_rules.merge_patterns(metafile_path(path), basedir,
554 string(block_buf, bytes));
557 void scanfile(const string& path)
560 struct stat stat_buf;
563 string output_path = metafile_path(path);
565 if (lstat(path.c_str(), &stat_buf) < 0) {
566 fprintf(stderr, "lstat(%s): %m\n", path.c_str());
570 bool is_directory = ((stat_buf.st_mode & S_IFMT) == S_IFDIR);
571 if (!filter_rules.is_included(output_path, is_directory))
574 if ((stat_buf.st_mode & S_IFMT) == S_IFREG) {
575 fd = safe_open(path, &stat_buf);
580 dump_inode(output_path, path, stat_buf, fd);
585 /* If we hit a directory, now that we've written the directory itself,
586 * recursively scan the directory. */
588 DIR *dir = opendir(path.c_str());
591 fprintf(stderr, "Error reading directory %s: %m\n",
597 vector<string> contents;
598 while ((ent = readdir(dir)) != NULL) {
599 string filename(ent->d_name);
600 if (filename == "." || filename == "..")
602 contents.push_back(filename);
607 sort(contents.begin(), contents.end());
611 /* First pass through the directory items: look for any filter rules to
612 * merge and do so. */
613 for (vector<string>::iterator i = contents.begin();
614 i != contents.end(); ++i) {
618 else if (path == "/")
621 filename = path + "/" + *i;
622 if (filter_rules.is_mergefile(metafile_path(filename))) {
624 printf("Merging directory filter rules %s\n",
627 try_merge_filter(filename, output_path);
631 /* Second pass: recursively scan all items in the directory for backup;
632 * scanfile() will check if the item should be included or not. */
633 for (vector<string>::iterator i = contents.begin();
634 i != contents.end(); ++i) {
635 const string& filename = *i;
638 else if (path == "/")
639 scanfile("/" + filename);
641 scanfile(path + "/" + filename);
644 filter_rules.restore();
648 void usage(const char *program)
653 "Usage: %s [OPTION]... --dest=DEST PATHS...\n"
654 "Produce backup snapshot of files in SOURCE and store to DEST.\n"
657 " --dest=PATH path where backup is to be written\n"
658 " --upload-script=COMMAND\n"
659 " program to invoke for each backup file generated\n"
660 " --exclude=PATTERN exclude files matching PATTERN from snapshot\n"
661 " --include=PATTERN include files matching PATTERN in snapshot\n"
662 " --dir-merge=PATTERN parse files matching PATTERN to read additional\n"
663 " subtree-specific include/exclude rules during backup\n"
664 " --localdb=PATH local backup metadata is stored in PATH\n"
665 " --tmpdir=PATH path for temporarily storing backup files\n"
666 " (defaults to TMPDIR environment variable or /tmp)\n"
667 " --filter=COMMAND program through which to filter segment data\n"
668 " (defaults to \"bzip2 -c\")\n"
669 " --filter-extension=EXT\n"
670 " string to append to segment files\n"
671 " (defaults to \".bz2\")\n"
672 " --signature-filter=COMMAND\n"
673 " program though which to filter descriptor\n"
674 " --scheme=NAME optional name for this snapshot\n"
675 " --intent=FLOAT intended backup type: 1=daily, 7=weekly, ...\n"
676 " (defaults to \"1\")\n"
677 " --full-metadata do not re-use metadata from previous backups\n"
678 " --rebuild-statcache re-read all file data to verify statcache\n"
679 " -v --verbose list files as they are backed up\n"
681 "Exactly one of --dest or --upload-script must be specified.\n",
682 cumulus_version, program
686 int main(int argc, char *argv[])
690 string backup_dest = "", backup_script = "";
691 string localdb_dir = "";
692 string backup_scheme = "";
693 string signature_filter = "";
695 string tmp_dir = "/tmp";
696 if (getenv("TMPDIR") != NULL)
697 tmp_dir = getenv("TMPDIR");
700 static struct option long_options[] = {
701 {"localdb", 1, 0, 0}, // 0
702 {"filter", 1, 0, 0}, // 1
703 {"filter-extension", 1, 0, 0}, // 2
704 {"dest", 1, 0, 0}, // 3
705 {"scheme", 1, 0, 0}, // 4
706 {"signature-filter", 1, 0, 0}, // 5
707 {"intent", 1, 0, 0}, // 6
708 {"full-metadata", 0, 0, 0}, // 7
709 {"tmpdir", 1, 0, 0}, // 8
710 {"upload-script", 1, 0, 0}, // 9
711 {"rebuild-statcache", 0, 0, 0}, // 10
712 {"include", 1, 0, 0}, // 11
713 {"exclude", 1, 0, 0}, // 12
714 {"dir-merge", 1, 0, 0}, // 13
715 // Aliases for short options
716 {"verbose", 0, 0, 'v'},
721 int c = getopt_long(argc, argv, "v", long_options, &long_index);
727 switch (long_index) {
729 localdb_dir = optarg;
732 filter_program = optarg;
734 case 2: // --filter-extension
735 filter_extension = optarg;
738 backup_dest = optarg;
741 backup_scheme = optarg;
743 case 5: // --signature-filter
744 signature_filter = optarg;
747 snapshot_intent = atof(optarg);
748 if (snapshot_intent <= 0)
751 case 7: // --full-metadata
752 flag_full_metadata = true;
757 case 9: // --upload-script
758 backup_script = optarg;
760 case 10: // --rebuild-statcache
761 flag_rebuild_statcache = true;
763 case 11: // --include
764 filter_rules.add_pattern(PathFilterList::INCLUDE, optarg, "");
766 case 12: // --exclude
767 filter_rules.add_pattern(PathFilterList::EXCLUDE, optarg, "");
769 case 13: // --dir-merge
770 filter_rules.add_pattern(PathFilterList::DIRMERGE, optarg, "");
773 fprintf(stderr, "Unhandled long option!\n");
788 if (optind == argc) {
793 if (backup_dest == "" && backup_script == "") {
795 "Error: Backup destination must be specified using --dest= or --upload-script=\n");
800 if (backup_dest != "" && backup_script != "") {
802 "Error: Cannot specify both --dest= and --upload-script=\n");
807 // Default for --localdb is the same as --dest
808 if (localdb_dir == "") {
809 localdb_dir = backup_dest;
811 if (localdb_dir == "") {
813 "Error: Must specify local database path with --localdb=\n");
818 block_buf = new char[LBS_BLOCK_SIZE];
820 /* Initialize the remote storage layer. If using an upload script, create
821 * a temporary directory for staging files. Otherwise, write backups
822 * directly to the destination directory. */
823 if (backup_script != "") {
824 tmp_dir = tmp_dir + "/lbs." + generate_uuid();
825 if (mkdir(tmp_dir.c_str(), 0700) < 0) {
826 fprintf(stderr, "Cannot create temporary directory %s: %m\n",
830 remote = new RemoteStore(tmp_dir, backup_script=backup_script);
832 remote = new RemoteStore(backup_dest);
835 /* Store the time when the backup started, so it can be included in the
838 struct tm time_buf_local, time_buf_utc;
841 localtime_r(&now, &time_buf_local);
842 gmtime_r(&now, &time_buf_utc);
843 strftime(desc_buf, sizeof(desc_buf), "%Y%m%dT%H%M%S", &time_buf_utc);
845 /* Open the local database which tracks all objects that are stored
846 * remotely, for efficient incrementals. Provide it with the name of this
848 string database_path = localdb_dir + "/localdb.sqlite";
850 db->Open(database_path.c_str(), desc_buf, backup_scheme.c_str(),
853 tss = new TarSegmentStore(remote, db);
855 /* Initialize the stat cache, for skipping over unchanged files. */
856 metawriter = new MetadataWriter(tss, localdb_dir.c_str(), desc_buf,
857 backup_scheme.c_str());
859 for (int i = optind; i < argc; i++) {
863 ObjectReference root_ref = metawriter->close();
864 add_segment(root_ref.get_segment());
865 string backup_root = root_ref.to_string();
873 /* Write out a checksums file which lists the checksums for all the
874 * segments included in this snapshot. The format is designed so that it
875 * may be easily verified using the sha1sums command. */
876 const char csum_type[] = "sha1";
877 string checksum_filename = "snapshot-";
878 if (backup_scheme.size() > 0)
879 checksum_filename += backup_scheme + "-";
880 checksum_filename = checksum_filename + desc_buf + "." + csum_type + "sums";
881 RemoteFile *checksum_file = remote->alloc_file(checksum_filename,
883 FILE *checksums = fdopen(checksum_file->get_fd(), "w");
885 for (std::set<string>::iterator i = segment_list.begin();
886 i != segment_list.end(); ++i) {
887 string seg_path, seg_csum;
888 if (db->GetSegmentChecksum(*i, &seg_path, &seg_csum)) {
889 const char *raw_checksum = NULL;
890 if (strncmp(seg_csum.c_str(), csum_type,
891 strlen(csum_type)) == 0) {
892 raw_checksum = seg_csum.c_str() + strlen(csum_type);
893 if (*raw_checksum == '=')
899 if (raw_checksum != NULL)
900 fprintf(checksums, "%s *%s\n",
901 raw_checksum, seg_path.c_str());
906 SHA1Checksum checksum_csum;
908 checksum_filename = checksum_file->get_local_path();
909 if (checksum_csum.process_file(checksum_filename.c_str())) {
910 csum = checksum_csum.checksum_str();
913 checksum_file->send();
917 /* All other files should be flushed to remote storage before writing the
918 * backup descriptor below, so that it is not possible to have a backup
919 * descriptor written out depending on non-existent (not yet written)
923 /* Write a backup descriptor file, which says which segments are needed and
924 * where to start to restore this snapshot. The filename is based on the
925 * current time. If a signature filter program was specified, filter the
926 * data through that to give a chance to sign the descriptor contents. */
927 string desc_filename = "snapshot-";
928 if (backup_scheme.size() > 0)
929 desc_filename += backup_scheme + "-";
930 desc_filename = desc_filename + desc_buf + ".lbs";
932 RemoteFile *descriptor_file = remote->alloc_file(desc_filename,
934 int descriptor_fd = descriptor_file->get_fd();
935 if (descriptor_fd < 0) {
936 fprintf(stderr, "Unable to open descriptor output file: %m\n");
939 pid_t signature_pid = 0;
940 if (signature_filter.size() > 0) {
941 int new_fd = spawn_filter(descriptor_fd, signature_filter.c_str(),
943 close(descriptor_fd);
944 descriptor_fd = new_fd;
946 FILE *descriptor = fdopen(descriptor_fd, "w");
948 fprintf(descriptor, "Format: Cumulus Snapshot v0.11\n");
949 fprintf(descriptor, "Producer: Cumulus %s\n", cumulus_version);
950 strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z",
952 fprintf(descriptor, "Date: %s\n", desc_buf);
953 if (backup_scheme.size() > 0)
954 fprintf(descriptor, "Scheme: %s\n", backup_scheme.c_str());
955 fprintf(descriptor, "Backup-Intent: %g\n", snapshot_intent);
956 fprintf(descriptor, "Root: %s\n", backup_root.c_str());
958 if (csum.size() > 0) {
959 fprintf(descriptor, "Checksums: %s\n", csum.c_str());
962 fprintf(descriptor, "Segments:\n");
963 for (std::set<string>::iterator i = segment_list.begin();
964 i != segment_list.end(); ++i) {
965 fprintf(descriptor, " %s\n", i->c_str());
972 waitpid(signature_pid, &status, 0);
974 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
975 fatal("Signature filter process error");
979 descriptor_file->send();
984 if (backup_script != "") {
985 if (rmdir(tmp_dir.c_str()) < 0) {
987 "Warning: Cannot delete temporary directory %s: %m\n",