1 /* Cumulus: Efficient Filesystem Backup to the Cloud
2 * Copyright (C) 2006-2009, 2012 The Cumulus Developers
3 * See the AUTHORS file for a list of contributors.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 /* Main entry point for Cumulus. Contains logic for traversing the filesystem
21 * and constructing a backup. */
34 #include <sys/sysmacros.h>
35 #include <sys/types.h>
57 #include "third_party/sha1.h"
65 /* Version information. This will be filled in by the Makefile. */
66 #ifndef CUMULUS_VERSION
67 #define CUMULUS_VERSION Unknown
69 #define CUMULUS_STRINGIFY(s) CUMULUS_STRINGIFY2(s)
70 #define CUMULUS_STRINGIFY2(s) #s
71 static const char cumulus_version[] = CUMULUS_STRINGIFY(CUMULUS_VERSION);
73 static RemoteStore *remote = NULL;
74 static TarSegmentStore *tss = NULL;
75 static MetadataWriter *metawriter = NULL;
77 /* Buffer for holding a single block of data read from a file. */
78 static const size_t LBS_BLOCK_SIZE = 1024 * 1024;
79 static char *block_buf;
81 /* Local database, which tracks objects written in this and previous
82 * invocations to help in creating incremental snapshots. */
85 /* Keep track of all segments which are needed to reconstruct the snapshot. */
86 std::set<string> segment_list;
88 /* Snapshot intent: 1=daily, 7=weekly, etc. This is not used directly, but is
89 * stored in the local database and can help guide segment cleaning and
90 * snapshot expiration policies. */
91 double snapshot_intent = 1.0;
93 /* Selection of files to include/exclude in the snapshot. */
94 PathFilterList filter_rules;
96 bool flag_rebuild_statcache = false;
98 /* Whether verbose output is enabled. */
101 /* Ensure that the given segment is listed as a dependency of the current
103 void add_segment(const string& segment)
105 segment_list.insert(segment);
108 /* Attempts to open a regular file read-only, but with safety checks for files
109 * that might not be fully trusted. */
110 int safe_open(const string& path, struct stat *stat_buf)
114 /* Be paranoid when opening the file. We have no guarantee that the
115 * file was not replaced between the stat() call above and the open()
116 * call below, so we might not even be opening a regular file. We
117 * supply flags to open to to guard against various conditions before
118 * we can perform an lstat to check that the file is still a regular
120 * - O_NOFOLLOW: in the event the file was replaced by a symlink
121 * - O_NONBLOCK: prevents open() from blocking if the file was
123 * We also add in O_NOATIME, since this may reduce disk writes (for
124 * inode updates). However, O_NOATIME may result in EPERM, so if the
125 * initial open fails, try again without O_NOATIME. */
126 fd = open(path.c_str(), O_RDONLY|O_NOATIME|O_NOFOLLOW|O_NONBLOCK);
128 fd = open(path.c_str(), O_RDONLY|O_NOFOLLOW|O_NONBLOCK);
131 fprintf(stderr, "Unable to open file %s: %m\n", path.c_str());
135 /* Drop the use of the O_NONBLOCK flag; we only wanted that for file
137 long flags = fcntl(fd, F_GETFL);
138 fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
140 /* Re-check file attributes, storing them into stat_buf if that is
142 struct stat internal_stat_buf;
143 if (stat_buf == NULL)
144 stat_buf = &internal_stat_buf;
146 /* Perform the stat call again, and check that we still have a regular
148 if (fstat(fd, stat_buf) < 0) {
149 fprintf(stderr, "fstat: %m\n");
154 if ((stat_buf->st_mode & S_IFMT) != S_IFREG) {
155 fprintf(stderr, "file is no longer a regular file!\n");
163 /* Read data from a file descriptor and return the amount of data read. A
164 * short read (less than the requested size) will only occur if end-of-file is
166 ssize_t file_read(int fd, char *buf, size_t maxlen)
168 size_t bytes_read = 0;
171 ssize_t res = read(fd, buf, maxlen);
175 fprintf(stderr, "error reading file: %m\n");
177 } else if (res == 0) {
189 /* Read the contents of a file (specified by an open file descriptor) and copy
190 * the data to the store. Returns the size of the file (number of bytes
191 * dumped), or -1 on error. */
192 int64_t dumpfile(int fd, dictionary &file_info, const string &path,
193 struct stat& stat_buf)
196 list<string> object_list;
197 const char *status = NULL; /* Status indicator printed out */
199 /* Look up this file in the old stat cache, if we can. If the stat
200 * information indicates that the file has not changed, do not bother
201 * re-reading the entire contents. Even if the information has been
202 * changed, we can use the list of old blocks in the search for a sub-block
203 * incremental representation. */
205 list<ObjectReference> old_blocks;
207 bool found = metawriter->find(path);
209 old_blocks = metawriter->get_blocks();
212 && !flag_rebuild_statcache
213 && metawriter->is_unchanged(&stat_buf)) {
216 /* If any of the blocks in the object have been expired, then we should
217 * fall back to fully reading in the file. */
218 for (list<ObjectReference>::const_iterator i = old_blocks.begin();
219 i != old_blocks.end(); ++i) {
220 const ObjectReference &ref = *i;
221 if (!db->IsAvailable(ref)) {
228 /* If everything looks okay, use the cached information */
230 file_info["checksum"] = metawriter->get_checksum();
231 for (list<ObjectReference>::const_iterator i = old_blocks.begin();
232 i != old_blocks.end(); ++i) {
233 const ObjectReference &ref = *i;
234 object_list.push_back(ref.to_string());
236 add_segment(ref.get_segment());
239 size = stat_buf.st_size;
243 /* If the file is new or changed, we must read in the contents a block at a
246 Hash *hash = Hash::New();
248 subfile.load_old_blocks(old_blocks);
251 ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
255 fprintf(stderr, "Backup contents for %s may be incorrect\n",
260 hash->update(block_buf, bytes);
262 // Sparse file processing: if we read a block of all zeroes, encode
264 bool all_zero = true;
265 for (int i = 0; i < bytes; i++) {
266 if (block_buf[i] != 0) {
272 // Either find a copy of this block in an already-existing segment,
273 // or index it so it can be re-used in the future
274 double block_age = 0.0;
277 SHA1Checksum block_hash;
278 block_hash.process(block_buf, bytes);
279 string block_csum = block_hash.checksum_str();
282 ref = ObjectReference(ObjectReference::REF_ZERO);
283 ref.set_range(0, bytes);
285 ref = db->FindObject(block_csum, bytes);
288 list<ObjectReference> refs;
290 // Store a copy of the object if one does not yet exist
292 LbsObject *o = new LbsObject;
295 /* We might still have seen this checksum before, if the object
296 * was stored at some time in the past, but we have decided to
297 * clean the segment the object was originally stored in
298 * (FindObject will not return such objects). When rewriting
299 * the object contents, put it in a separate group, so that old
300 * objects get grouped together. The hope is that these old
301 * objects will continue to be used in the future, and we
302 * obtain segments which will continue to be well-utilized.
303 * Additionally, keep track of the age of the data by looking
304 * up the age of the block which was expired and using that
305 * instead of the current time. */
306 if (db->IsOldObject(block_csum, bytes,
307 &block_age, &object_group)) {
308 if (object_group == 0) {
309 o->set_group("data");
312 sprintf(group, "compacted-%d", object_group);
318 o->set_group("data");
322 subfile.analyze_new_block(block_buf, bytes);
323 refs = subfile.create_incremental(tss, o, block_age);
325 if (flag_rebuild_statcache && ref.is_normal()) {
326 subfile.analyze_new_block(block_buf, bytes);
327 subfile.store_analyzed_signatures(ref);
332 while (!refs.empty()) {
333 ref = refs.front(); refs.pop_front();
334 object_list.push_back(ref.to_string());
336 add_segment(ref.get_segment());
345 file_info["checksum"] = hash->digest_str();
349 // Sanity check: if we are rebuilding the statcache, but the file looks
350 // like it hasn't changed, then the newly-computed checksum should match
351 // the checksum in the statcache. If not, we have possible disk corruption
352 // and report a warning.
353 if (flag_rebuild_statcache) {
355 && metawriter->is_unchanged(&stat_buf)
356 && file_info["checksum"] != metawriter->get_checksum()) {
358 "Warning: Checksum for %s does not match expected value\n"
362 metawriter->get_checksum().c_str(),
363 file_info["checksum"].c_str());
367 if (verbose && status != NULL)
368 printf(" [%s]\n", status);
370 string blocklist = "";
371 for (list<string>::iterator i = object_list.begin();
372 i != object_list.end(); ++i) {
373 if (i != object_list.begin())
377 file_info["data"] = blocklist;
382 /* Look up a user/group and convert it to string form (either strictly numeric
383 * or numeric plus symbolic). Caches the results of the call to
384 * getpwuid/getgrgid. */
385 string user_to_string(uid_t uid) {
386 static map<uid_t, string> user_cache;
387 map<uid_t, string>::const_iterator i = user_cache.find(uid);
388 if (i != user_cache.end())
391 string result = encode_int(uid);
392 struct passwd *pwd = getpwuid(uid);
393 if (pwd != NULL && pwd->pw_name != NULL) {
394 result += " (" + uri_encode(pwd->pw_name) + ")";
396 user_cache[uid] = result;
400 string group_to_string(gid_t gid) {
401 static map<gid_t, string> group_cache;
402 map<gid_t, string>::const_iterator i = group_cache.find(gid);
403 if (i != group_cache.end())
406 string result = encode_int(gid);
407 struct group *grp = getgrgid(gid);
408 if (grp != NULL && grp->gr_name != NULL) {
409 result += " (" + uri_encode(grp->gr_name) + ")";
411 group_cache[gid] = result;
415 /* Dump a specified filesystem object (file, directory, etc.) based on its
416 * inode information. If the object is a regular file, an open filehandle is
418 void dump_inode(const string& path, // Path within snapshot
419 const string& fullpath, // Path to object in filesystem
420 struct stat& stat_buf, // Results of stat() call
421 int fd) // Open filehandle if regular file
424 dictionary file_info;
429 printf("%s\n", path.c_str());
430 metawriter->find(path);
432 file_info["name"] = uri_encode(path);
433 file_info["mode"] = encode_int(stat_buf.st_mode & 07777, 8);
434 file_info["ctime"] = encode_int(stat_buf.st_ctime);
435 file_info["mtime"] = encode_int(stat_buf.st_mtime);
436 file_info["user"] = user_to_string(stat_buf.st_uid);
437 file_info["group"] = group_to_string(stat_buf.st_gid);
439 time_t now = time(NULL);
440 if (now - stat_buf.st_ctime < 30 || now - stat_buf.st_mtime < 30)
441 if ((stat_buf.st_mode & S_IFMT) != S_IFDIR)
442 file_info["volatile"] = "1";
444 if (stat_buf.st_nlink > 1 && (stat_buf.st_mode & S_IFMT) != S_IFDIR) {
445 file_info["links"] = encode_int(stat_buf.st_nlink);
448 file_info["inode"] = encode_int(major(stat_buf.st_dev))
449 + "/" + encode_int(minor(stat_buf.st_dev))
450 + "/" + encode_int(stat_buf.st_ino);
454 switch (stat_buf.st_mode & S_IFMT) {
463 inode_type = ((stat_buf.st_mode & S_IFMT) == S_IFBLK) ? 'b' : 'c';
464 file_info["device"] = encode_int(major(stat_buf.st_rdev))
465 + "/" + encode_int(minor(stat_buf.st_rdev));
470 /* Use the reported file size to allocate a buffer large enough to read
471 * the symlink. Allocate slightly more space, so that we ask for more
472 * bytes than we expect and so check for truncation. */
473 buf = new char[stat_buf.st_size + 2];
474 len = readlink(fullpath.c_str(), buf, stat_buf.st_size + 1);
476 fprintf(stderr, "error reading symlink: %m\n");
477 } else if (len <= stat_buf.st_size) {
479 file_info["target"] = uri_encode(buf);
480 } else if (len > stat_buf.st_size) {
481 fprintf(stderr, "error reading symlink: name truncated\n");
489 file_size = dumpfile(fd, file_info, path, stat_buf);
490 file_info["size"] = encode_int(file_size);
493 return; // error occurred; do not dump file
495 if (file_size != stat_buf.st_size) {
496 fprintf(stderr, "Warning: Size of %s changed during reading\n",
498 file_info["volatile"] = "1";
507 fprintf(stderr, "Unknown inode type: mode=%x\n", stat_buf.st_mode);
511 file_info["type"] = string(1, inode_type);
513 metawriter->add(file_info);
516 /* Converts a path to the normalized form used in the metadata log. Paths are
517 * written as relative (without any leading slashes). The root directory is
518 * referred to as ".". */
519 string metafile_path(const string& path)
521 const char *newpath = path.c_str();
524 if (*newpath == '\0')
529 void try_merge_filter(const string& path, const string& basedir)
531 struct stat stat_buf;
532 if (lstat(path.c_str(), &stat_buf) < 0)
534 if ((stat_buf.st_mode & S_IFMT) != S_IFREG)
536 int fd = safe_open(path, NULL);
540 /* As a very crude limit on the complexity of merge rules, only read up to
541 * one block (1 MB) worth of data. If the file doesn't seems like it might
542 * be larger than that, don't parse the rules in it. */
543 ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
545 if (bytes < 0 || bytes >= static_cast<ssize_t>(LBS_BLOCK_SIZE - 1)) {
546 /* TODO: Add more strict resource limits on merge files? */
548 "Unable to read filter merge file (possibly size too large\n");
551 filter_rules.merge_patterns(metafile_path(path), basedir,
552 string(block_buf, bytes));
555 void scanfile(const string& path)
558 struct stat stat_buf;
561 string output_path = metafile_path(path);
563 if (lstat(path.c_str(), &stat_buf) < 0) {
564 fprintf(stderr, "lstat(%s): %m\n", path.c_str());
568 bool is_directory = ((stat_buf.st_mode & S_IFMT) == S_IFDIR);
569 if (!filter_rules.is_included(output_path, is_directory))
572 if ((stat_buf.st_mode & S_IFMT) == S_IFREG) {
573 fd = safe_open(path, &stat_buf);
578 dump_inode(output_path, path, stat_buf, fd);
583 /* If we hit a directory, now that we've written the directory itself,
584 * recursively scan the directory. */
586 DIR *dir = opendir(path.c_str());
589 fprintf(stderr, "Error reading directory %s: %m\n",
595 vector<string> contents;
596 while ((ent = readdir(dir)) != NULL) {
597 string filename(ent->d_name);
598 if (filename == "." || filename == "..")
600 contents.push_back(filename);
605 sort(contents.begin(), contents.end());
609 /* First pass through the directory items: look for any filter rules to
610 * merge and do so. */
611 for (vector<string>::iterator i = contents.begin();
612 i != contents.end(); ++i) {
616 else if (path == "/")
619 filename = path + "/" + *i;
620 if (filter_rules.is_mergefile(metafile_path(filename))) {
622 printf("Merging directory filter rules %s\n",
625 try_merge_filter(filename, output_path);
629 /* Second pass: recursively scan all items in the directory for backup;
630 * scanfile() will check if the item should be included or not. */
631 for (vector<string>::iterator i = contents.begin();
632 i != contents.end(); ++i) {
633 const string& filename = *i;
636 else if (path == "/")
637 scanfile("/" + filename);
639 scanfile(path + "/" + filename);
642 filter_rules.restore();
646 void usage(const char *program)
651 "Usage: %s [OPTION]... --dest=DEST PATHS...\n"
652 "Produce backup snapshot of files in SOURCE and store to DEST.\n"
655 " --dest=PATH path where backup is to be written\n"
656 " --upload-script=COMMAND\n"
657 " program to invoke for each backup file generated\n"
658 " --exclude=PATTERN exclude files matching PATTERN from snapshot\n"
659 " --include=PATTERN include files matching PATTERN in snapshot\n"
660 " --dir-merge=PATTERN parse files matching PATTERN to read additional\n"
661 " subtree-specific include/exclude rules during backup\n"
662 " --localdb=PATH local backup metadata is stored in PATH\n"
663 " --tmpdir=PATH path for temporarily storing backup files\n"
664 " (defaults to TMPDIR environment variable or /tmp)\n"
665 " --filter=COMMAND program through which to filter segment data\n"
666 " (defaults to \"bzip2 -c\")\n"
667 " --filter-extension=EXT\n"
668 " string to append to segment files\n"
669 " (defaults to \".bz2\")\n"
670 " --signature-filter=COMMAND\n"
671 " program though which to filter descriptor\n"
672 " --scheme=NAME optional name for this snapshot\n"
673 " --intent=FLOAT intended backup type: 1=daily, 7=weekly, ...\n"
674 " (defaults to \"1\")\n"
675 " --full-metadata do not re-use metadata from previous backups\n"
676 " --rebuild-statcache re-read all file data to verify statcache\n"
677 " -v --verbose list files as they are backed up\n"
679 "Exactly one of --dest or --upload-script must be specified.\n",
680 cumulus_version, program
684 int main(int argc, char *argv[])
688 string backup_dest = "", backup_script = "";
689 string localdb_dir = "";
690 string backup_scheme = "";
691 string signature_filter = "";
693 string tmp_dir = "/tmp";
694 if (getenv("TMPDIR") != NULL)
695 tmp_dir = getenv("TMPDIR");
698 static struct option long_options[] = {
699 {"localdb", 1, 0, 0}, // 0
700 {"filter", 1, 0, 0}, // 1
701 {"filter-extension", 1, 0, 0}, // 2
702 {"dest", 1, 0, 0}, // 3
703 {"scheme", 1, 0, 0}, // 4
704 {"signature-filter", 1, 0, 0}, // 5
705 {"intent", 1, 0, 0}, // 6
706 {"full-metadata", 0, 0, 0}, // 7
707 {"tmpdir", 1, 0, 0}, // 8
708 {"upload-script", 1, 0, 0}, // 9
709 {"rebuild-statcache", 0, 0, 0}, // 10
710 {"include", 1, 0, 0}, // 11
711 {"exclude", 1, 0, 0}, // 12
712 {"dir-merge", 1, 0, 0}, // 13
713 // Aliases for short options
714 {"verbose", 0, 0, 'v'},
719 int c = getopt_long(argc, argv, "v", long_options, &long_index);
725 switch (long_index) {
727 localdb_dir = optarg;
730 filter_program = optarg;
732 case 2: // --filter-extension
733 filter_extension = optarg;
736 backup_dest = optarg;
739 backup_scheme = optarg;
741 case 5: // --signature-filter
742 signature_filter = optarg;
745 snapshot_intent = atof(optarg);
746 if (snapshot_intent <= 0)
749 case 7: // --full-metadata
750 flag_full_metadata = true;
755 case 9: // --upload-script
756 backup_script = optarg;
758 case 10: // --rebuild-statcache
759 flag_rebuild_statcache = true;
761 case 11: // --include
762 filter_rules.add_pattern(PathFilterList::INCLUDE, optarg, "");
764 case 12: // --exclude
765 filter_rules.add_pattern(PathFilterList::EXCLUDE, optarg, "");
767 case 13: // --dir-merge
768 filter_rules.add_pattern(PathFilterList::DIRMERGE, optarg, "");
771 fprintf(stderr, "Unhandled long option!\n");
786 if (optind == argc) {
791 if (backup_dest == "" && backup_script == "") {
793 "Error: Backup destination must be specified using --dest= or --upload-script=\n");
798 if (backup_dest != "" && backup_script != "") {
800 "Error: Cannot specify both --dest= and --upload-script=\n");
805 // Default for --localdb is the same as --dest
806 if (localdb_dir == "") {
807 localdb_dir = backup_dest;
809 if (localdb_dir == "") {
811 "Error: Must specify local database path with --localdb=\n");
816 block_buf = new char[LBS_BLOCK_SIZE];
818 /* Initialize the remote storage layer. If using an upload script, create
819 * a temporary directory for staging files. Otherwise, write backups
820 * directly to the destination directory. */
821 if (backup_script != "") {
822 tmp_dir = tmp_dir + "/lbs." + generate_uuid();
823 if (mkdir(tmp_dir.c_str(), 0700) < 0) {
824 fprintf(stderr, "Cannot create temporary directory %s: %m\n",
828 remote = new RemoteStore(tmp_dir, backup_script=backup_script);
830 remote = new RemoteStore(backup_dest);
833 /* Store the time when the backup started, so it can be included in the
836 struct tm time_buf_local, time_buf_utc;
839 localtime_r(&now, &time_buf_local);
840 gmtime_r(&now, &time_buf_utc);
841 strftime(desc_buf, sizeof(desc_buf), "%Y%m%dT%H%M%S", &time_buf_utc);
843 /* Open the local database which tracks all objects that are stored
844 * remotely, for efficient incrementals. Provide it with the name of this
846 string database_path = localdb_dir + "/localdb.sqlite";
848 db->Open(database_path.c_str(), desc_buf, backup_scheme.c_str(),
851 tss = new TarSegmentStore(remote, db);
853 /* Initialize the stat cache, for skipping over unchanged files. */
854 metawriter = new MetadataWriter(tss, localdb_dir.c_str(), desc_buf,
855 backup_scheme.c_str());
857 for (int i = optind; i < argc; i++) {
861 ObjectReference root_ref = metawriter->close();
862 add_segment(root_ref.get_segment());
863 string backup_root = root_ref.to_string();
871 /* Write out a checksums file which lists the checksums for all the
872 * segments included in this snapshot. The format is designed so that it
873 * may be easily verified using the sha1sums command. */
874 const char csum_type[] = "sha1";
875 string checksum_filename = "snapshot-";
876 if (backup_scheme.size() > 0)
877 checksum_filename += backup_scheme + "-";
878 checksum_filename = checksum_filename + desc_buf + "." + csum_type + "sums";
879 RemoteFile *checksum_file = remote->alloc_file(checksum_filename,
881 FILE *checksums = fdopen(checksum_file->get_fd(), "w");
883 for (std::set<string>::iterator i = segment_list.begin();
884 i != segment_list.end(); ++i) {
885 string seg_path, seg_csum;
886 if (db->GetSegmentChecksum(*i, &seg_path, &seg_csum)) {
887 const char *raw_checksum = NULL;
888 if (strncmp(seg_csum.c_str(), csum_type,
889 strlen(csum_type)) == 0) {
890 raw_checksum = seg_csum.c_str() + strlen(csum_type);
891 if (*raw_checksum == '=')
897 if (raw_checksum != NULL)
898 fprintf(checksums, "%s *%s\n",
899 raw_checksum, seg_path.c_str());
904 SHA1Checksum checksum_csum;
906 checksum_filename = checksum_file->get_local_path();
907 if (checksum_csum.process_file(checksum_filename.c_str())) {
908 csum = checksum_csum.checksum_str();
911 checksum_file->send();
915 /* All other files should be flushed to remote storage before writing the
916 * backup descriptor below, so that it is not possible to have a backup
917 * descriptor written out depending on non-existent (not yet written)
921 /* Write a backup descriptor file, which says which segments are needed and
922 * where to start to restore this snapshot. The filename is based on the
923 * current time. If a signature filter program was specified, filter the
924 * data through that to give a chance to sign the descriptor contents. */
925 string desc_filename = "snapshot-";
926 if (backup_scheme.size() > 0)
927 desc_filename += backup_scheme + "-";
928 desc_filename = desc_filename + desc_buf + ".lbs";
930 RemoteFile *descriptor_file = remote->alloc_file(desc_filename,
932 int descriptor_fd = descriptor_file->get_fd();
933 if (descriptor_fd < 0) {
934 fprintf(stderr, "Unable to open descriptor output file: %m\n");
937 pid_t signature_pid = 0;
938 if (signature_filter.size() > 0) {
939 int new_fd = spawn_filter(descriptor_fd, signature_filter.c_str(),
941 close(descriptor_fd);
942 descriptor_fd = new_fd;
944 FILE *descriptor = fdopen(descriptor_fd, "w");
946 fprintf(descriptor, "Format: Cumulus Snapshot v0.11\n");
947 fprintf(descriptor, "Producer: Cumulus %s\n", cumulus_version);
948 strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z",
950 fprintf(descriptor, "Date: %s\n", desc_buf);
951 if (backup_scheme.size() > 0)
952 fprintf(descriptor, "Scheme: %s\n", backup_scheme.c_str());
953 fprintf(descriptor, "Backup-Intent: %g\n", snapshot_intent);
954 fprintf(descriptor, "Root: %s\n", backup_root.c_str());
956 if (csum.size() > 0) {
957 fprintf(descriptor, "Checksums: %s\n", csum.c_str());
960 fprintf(descriptor, "Segments:\n");
961 for (std::set<string>::iterator i = segment_list.begin();
962 i != segment_list.end(); ++i) {
963 fprintf(descriptor, " %s\n", i->c_str());
970 waitpid(signature_pid, &status, 0);
972 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
973 fatal("Signature filter process error");
977 descriptor_file->send();
982 if (backup_script != "") {
983 if (rmdir(tmp_dir.c_str()) < 0) {
985 "Warning: Cannot delete temporary directory %s: %m\n",