-/* Recursively descend the filesystem and visit each file. */
+/* Cumulus: Smart Filesystem Backup to Dumb Servers
+ *
+ * Copyright (C) 2006-2008 The Regents of the University of California
+ * Written by Michael Vrable <mvrable@cs.ucsd.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Main entry point for LBS. Contains logic for traversing the filesystem and
+ * constructing a backup. */
#include <dirent.h>
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
+#include <sys/wait.h>
#include <unistd.h>
#include <algorithm>
#include <vector>
#include "localdb.h"
+#include "metadata.h"
+#include "remote.h"
#include "store.h"
#include "sha1.h"
-#include "statcache.h"
+#include "subfile.h"
#include "util.h"
using std::list;
using std::ostream;
/* Version information. This will be filled in by the Makefile. */
-#ifndef LBS_VERSION
-#define LBS_VERSION Unknown
+#ifndef CUMULUS_VERSION
+#define CUMULUS_VERSION Unknown
#endif
-#define LBS_STRINGIFY(s) LBS_STRINGIFY2(s)
-#define LBS_STRINGIFY2(s) #s
-static const char lbs_version[] = LBS_STRINGIFY(LBS_VERSION);
+#define CUMULUS_STRINGIFY(s) CUMULUS_STRINGIFY2(s)
+#define CUMULUS_STRINGIFY2(s) #s
+static const char cumulus_version[] = CUMULUS_STRINGIFY(CUMULUS_VERSION);
+static RemoteStore *remote = NULL;
static TarSegmentStore *tss = NULL;
+static MetadataWriter *metawriter = NULL;
/* Buffer for holding a single block of data read from a file. */
static const size_t LBS_BLOCK_SIZE = 1024 * 1024;
static char *block_buf;
-static const size_t LBS_METADATA_BLOCK_SIZE = 65536;
-
/* Local database, which tracks objects written in this and previous
* invocations to help in creating incremental snapshots. */
LocalDb *db;
-/* Stat cache, which stored data locally to speed the backup process by quickly
- * skipping files which have not changed. */
-StatCache *statcache;
-
-/* Contents of the root object. This will contain a set of indirect links to
- * the metadata objects. */
-std::ostringstream metadata_root;
-
-/* Buffer for building up metadata. */
-std::ostringstream metadata;
-
/* Keep track of all segments which are needed to reconstruct the snapshot. */
std::set<string> segment_list;
+/* Snapshot intent: 1=daily, 7=weekly, etc. This is not used directly, but is
+ * stored in the local database and can help guide segment cleaning and
+ * snapshot expiration policies. */
+double snapshot_intent = 1.0;
+
/* Selection of files to include/exclude in the snapshot. */
std::list<string> includes; // Paths in which files should be saved
std::list<string> excludes; // Paths which will not be saved
+std::list<string> excluded_names; // Directories which will not be saved
std::list<string> searches; // Directories we don't want to save, but
// do want to descend searching for data
// in included paths
bool relative_paths = true;
-/* Ensure contents of metadata are flushed to an object. */
-void metadata_flush()
-{
- string m = metadata.str();
- if (m.size() == 0)
- return;
-
- /* Write current metadata information to a new object. */
- LbsObject *meta = new LbsObject;
- meta->set_group("metadata");
- meta->set_data(m.data(), m.size());
- meta->write(tss);
- meta->checksum();
+bool flag_rebuild_statcache = false;
- /* Write a reference to this block in the root. */
- ObjectReference ref = meta->get_ref();
- metadata_root << "@" << ref.to_string() << "\n";
- segment_list.insert(ref.get_segment());
+/* Whether verbose output is enabled. */
+bool verbose = false;
- delete meta;
-
- metadata.str("");
+/* Ensure that the given segment is listed as a dependency of the current
+ * snapshot. */
+void add_segment(const string& segment)
+{
+ segment_list.insert(segment);
}
/* Read data from a file descriptor and return the amount of data read. A
/* Look up this file in the old stat cache, if we can. If the stat
* information indicates that the file has not changed, do not bother
- * re-reading the entire contents. */
+ * re-reading the entire contents. Even if the information has been
+ * changed, we can use the list of old blocks in the search for a sub-block
+ * incremental representation. */
bool cached = false;
+ list<ObjectReference> old_blocks;
- if (statcache->Find(path, &stat_buf)) {
+ bool found = metawriter->find(path);
+ if (found)
+ old_blocks = metawriter->get_blocks();
+
+ if (found
+ && !flag_rebuild_statcache
+ && metawriter->is_unchanged(&stat_buf)) {
cached = true;
- const list<ObjectReference> &blocks = statcache->get_blocks();
/* If any of the blocks in the object have been expired, then we should
* fall back to fully reading in the file. */
- for (list<ObjectReference>::const_iterator i = blocks.begin();
- i != blocks.end(); ++i) {
+ for (list<ObjectReference>::const_iterator i = old_blocks.begin();
+ i != old_blocks.end(); ++i) {
const ObjectReference &ref = *i;
if (!db->IsAvailable(ref)) {
cached = false;
/* If everything looks okay, use the cached information */
if (cached) {
- file_info["checksum"] = statcache->get_checksum();
- for (list<ObjectReference>::const_iterator i = blocks.begin();
- i != blocks.end(); ++i) {
+ file_info["checksum"] = metawriter->get_checksum();
+ for (list<ObjectReference>::const_iterator i = old_blocks.begin();
+ i != old_blocks.end(); ++i) {
const ObjectReference &ref = *i;
object_list.push_back(ref.to_string());
- segment_list.insert(ref.get_segment());
+ if (ref.is_normal())
+ add_segment(ref.get_segment());
db->UseObject(ref);
}
size = stat_buf.st_size;
* time. */
if (!cached) {
SHA1Checksum hash;
+ Subfile subfile(db);
+ subfile.load_old_blocks(old_blocks);
+
while (true) {
ssize_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
if (bytes == 0)
hash.process(block_buf, bytes);
+ // Sparse file processing: if we read a block of all zeroes, encode
+ // that explicitly.
+ bool all_zero = true;
+ for (int i = 0; i < bytes; i++) {
+ if (block_buf[i] != 0) {
+ all_zero = false;
+ break;
+ }
+ }
+
// Either find a copy of this block in an already-existing segment,
// or index it so it can be re-used in the future
double block_age = 0.0;
+ ObjectReference ref;
+
SHA1Checksum block_hash;
block_hash.process(block_buf, bytes);
string block_csum = block_hash.checksum_str();
- ObjectReference ref = db->FindObject(block_csum, bytes);
+
+ if (all_zero) {
+ ref = ObjectReference(ObjectReference::REF_ZERO);
+ ref.set_range(0, bytes);
+ } else {
+ ref = db->FindObject(block_csum, bytes);
+ }
+
+ list<ObjectReference> refs;
// Store a copy of the object if one does not yet exist
- if (ref.get_segment().size() == 0) {
+ if (ref.is_null()) {
LbsObject *o = new LbsObject;
+ int object_group;
/* We might still have seen this checksum before, if the object
* was stored at some time in the past, but we have decided to
* Additionally, keep track of the age of the data by looking
* up the age of the block which was expired and using that
* instead of the current time. */
- if (db->IsOldObject(block_csum, bytes, &block_age)) {
- o->set_group("compacted");
+ if (db->IsOldObject(block_csum, bytes,
+ &block_age, &object_group)) {
+ if (object_group == 0) {
+ o->set_group("data");
+ } else {
+ char group[32];
+ sprintf(group, "compacted-%d", object_group);
+ o->set_group(group);
+ }
if (status == NULL)
status = "partial";
} else {
status = "new";
}
- o->set_data(block_buf, bytes);
- o->write(tss);
- ref = o->get_ref();
- db->StoreObject(ref, block_csum, bytes, block_age);
- delete o;
+ subfile.analyze_new_block(block_buf, bytes);
+ refs = subfile.create_incremental(tss, o, block_age);
+ } else {
+ if (flag_rebuild_statcache && ref.is_normal()) {
+ subfile.analyze_new_block(block_buf, bytes);
+ subfile.store_analyzed_signatures(ref);
+ }
+ refs.push_back(ref);
}
- object_list.push_back(ref.to_string());
- segment_list.insert(ref.get_segment());
- db->UseObject(ref);
+ while (!refs.empty()) {
+ ref = refs.front(); refs.pop_front();
+ object_list.push_back(ref.to_string());
+ if (ref.is_normal())
+ add_segment(ref.get_segment());
+ db->UseObject(ref);
+ }
size += bytes;
if (status == NULL)
file_info["checksum"] = hash.checksum_str();
}
- if (status != NULL)
- printf(" [%s]\n", status);
-
- statcache->Save(path, &stat_buf, file_info["checksum"], object_list);
-
- /* For files that only need to be broken apart into a few objects, store
- * the list of objects directly. For larger files, store the data
- * out-of-line and provide a pointer to the indrect object. */
- if (object_list.size() < 8) {
- string blocklist = "";
- for (list<string>::iterator i = object_list.begin();
- i != object_list.end(); ++i) {
- if (i != object_list.begin())
- blocklist += " ";
- blocklist += *i;
- }
- file_info["data"] = blocklist;
- } else {
- string blocklist = "";
- for (list<string>::iterator i = object_list.begin();
- i != object_list.end(); ++i) {
- blocklist += *i + "\n";
+ // Sanity check: if we are rebuilding the statcache, but the file looks
+ // like it hasn't changed, then the newly-computed checksum should match
+ // the checksum in the statcache. If not, we have possible disk corruption
+ // and report a warning.
+ if (flag_rebuild_statcache) {
+ if (found
+ && metawriter->is_unchanged(&stat_buf)
+ && file_info["checksum"] != metawriter->get_checksum()) {
+ fprintf(stderr,
+ "Warning: Checksum for %s does not match expected value\n"
+ " expected: %s\n"
+ " actual: %s\n",
+ path.c_str(),
+ metawriter->get_checksum().c_str(),
+ file_info["checksum"].c_str());
}
+ }
- LbsObject *i = new LbsObject;
- i->set_group("metadata");
- i->set_data(blocklist.data(), blocklist.size());
- i->write(tss);
- file_info["data"] = "@" + i->get_name();
- segment_list.insert(i->get_ref().get_segment());
- delete i;
+ if (verbose && status != NULL)
+ printf(" [%s]\n", status);
+
+ string blocklist = "";
+ for (list<string>::iterator i = object_list.begin();
+ i != object_list.end(); ++i) {
+ if (i != object_list.begin())
+ blocklist += "\n ";
+ blocklist += *i;
}
+ file_info["data"] = blocklist;
return size;
}
int64_t file_size;
ssize_t len;
- printf("%s\n", path.c_str());
+ if (verbose)
+ printf("%s\n", path.c_str());
+ metawriter->find(path);
+ file_info["name"] = uri_encode(path);
file_info["mode"] = encode_int(stat_buf.st_mode & 07777, 8);
+ file_info["ctime"] = encode_int(stat_buf.st_ctime);
file_info["mtime"] = encode_int(stat_buf.st_mtime);
file_info["user"] = encode_int(stat_buf.st_uid);
file_info["group"] = encode_int(stat_buf.st_gid);
+ time_t now = time(NULL);
+ if (now - stat_buf.st_ctime < 30 || now - stat_buf.st_mtime < 30)
+ if ((stat_buf.st_mode & S_IFMT) != S_IFDIR)
+ file_info["volatile"] = "1";
+
struct passwd *pwd = getpwuid(stat_buf.st_uid);
- if (pwd != NULL) {
+ if (pwd != NULL && pwd->pw_name != NULL) {
file_info["user"] += " (" + uri_encode(pwd->pw_name) + ")";
}
struct group *grp = getgrgid(stat_buf.st_gid);
- if (pwd != NULL) {
+ if (grp != NULL && grp->gr_name != NULL) {
file_info["group"] += " (" + uri_encode(grp->gr_name) + ")";
}
+ if (stat_buf.st_nlink > 1 && (stat_buf.st_mode & S_IFMT) != S_IFDIR) {
+ file_info["links"] = encode_int(stat_buf.st_nlink);
+ }
+
+ file_info["inode"] = encode_int(major(stat_buf.st_dev))
+ + "/" + encode_int(minor(stat_buf.st_dev))
+ + "/" + encode_int(stat_buf.st_ino);
+
char inode_type;
switch (stat_buf.st_mode & S_IFMT) {
fprintf(stderr, "error reading symlink: %m\n");
} else if (len <= stat_buf.st_size) {
buf[len] = '\0';
- file_info["contents"] = uri_encode(buf);
+ file_info["target"] = uri_encode(buf);
} else if (len > stat_buf.st_size) {
fprintf(stderr, "error reading symlink: name truncated\n");
}
delete[] buf;
break;
case S_IFREG:
- inode_type = '-';
+ inode_type = 'f';
file_size = dumpfile(fd, file_info, path, stat_buf);
file_info["size"] = encode_int(file_size);
- close(fd);
if (file_size < 0)
return; // error occurred; do not dump file
if (file_size != stat_buf.st_size) {
fprintf(stderr, "Warning: Size of %s changed during reading\n",
path.c_str());
+ file_info["volatile"] = "1";
}
break;
file_info["type"] = string(1, inode_type);
- metadata << "name: " << uri_encode(path) << "\n";
- dict_output(metadata, file_info);
- metadata << "\n";
-
- // Break apart metadata listing if it becomes too large.
- if (metadata.str().size() > LBS_METADATA_BLOCK_SIZE)
- metadata_flush();
+ metawriter->add(file_info);
}
void scanfile(const string& path, bool include)
for (list<string>::iterator i = includes.begin();
i != includes.end(); ++i) {
if (path == *i) {
- printf("Including %s\n", path.c_str());
include = true;
}
}
for (list<string>::iterator i = excludes.begin();
i != excludes.end(); ++i) {
if (path == *i) {
- printf("Excluding %s\n", path.c_str());
include = false;
}
}
+ if (excluded_names.size() > 0) {
+ std::string name = path;
+ std::string::size_type last_slash = name.rfind('/');
+ if (last_slash != std::string::npos) {
+ name.replace(0, last_slash + 1, "");
+ }
+
+ for (list<string>::iterator i = excluded_names.begin();
+ i != excluded_names.end(); ++i) {
+ if (name == *i) {
+ include = false;
+ }
+ }
+ }
+
for (list<string>::iterator i = searches.begin();
i != searches.end(); ++i) {
if (path == *i) {
- printf("Scanning %s\n", path.c_str());
scan_only = true;
}
}
* themselves are excluded from being backed up. */
void add_include(const char *path)
{
- printf("Add: %s\n", path);
/* Was an absolute path specified? If so, we'll need to start scanning
* from the root directory. Make sure that the user was consistent in
* providing either all relative paths or all absolute paths. */
{
fprintf(
stderr,
+ "Cumulus %s\n\n"
"Usage: %s [OPTION]... --dest=DEST PATHS...\n"
"Produce backup snapshot of files in SOURCE and store to DEST.\n"
"\n"
"Options:\n"
- " --dest=PATH path where backup is to be written [REQUIRED]\n"
+ " --dest=PATH path where backup is to be written\n"
+ " --upload-script=COMMAND\n"
+ " program to invoke for each backup file generated\n"
" --exclude=PATH exclude files in PATH from snapshot\n"
+ " --exclude-name=NAME exclude files called NAME from snapshot\n"
" --localdb=PATH local backup metadata is stored in PATH\n"
+ " --tmpdir=PATH path for temporarily storing backup files\n"
+ " (defaults to TMPDIR environment variable or /tmp)\n"
" --filter=COMMAND program through which to filter segment data\n"
" (defaults to \"bzip2 -c\")\n"
" --filter-extension=EXT\n"
" string to append to segment files\n"
" (defaults to \".bz2\")\n"
- " --scheme=NAME optional name for this snapshot\n",
- program
+ " --signature-filter=COMMAND\n"
+ " program though which to filter descriptor\n"
+ " --scheme=NAME optional name for this snapshot\n"
+ " --intent=FLOAT intended backup type: 1=daily, 7=weekly, ...\n"
+ " (defaults to \"1\")\n"
+ " --full-metadata do not re-use metadata from previous backups\n"
+ " --rebuild-statcache re-read all file data to verify statcache\n"
+ " -v --verbose list files as they are backed up\n"
+ "\n"
+ "Exactly one of --dest or --upload-script must be specified.\n",
+ cumulus_version, program
);
}
int main(int argc, char *argv[])
{
- string backup_dest = "";
+ string backup_dest = "", backup_script = "";
string localdb_dir = "";
string backup_scheme = "";
+ string signature_filter = "";
+
+ string tmp_dir = "/tmp";
+ if (getenv("TMPDIR") != NULL)
+ tmp_dir = getenv("TMPDIR");
while (1) {
static struct option long_options[] = {
{"filter-extension", 1, 0, 0}, // 3
{"dest", 1, 0, 0}, // 4
{"scheme", 1, 0, 0}, // 5
+ {"signature-filter", 1, 0, 0}, // 6
+ {"intent", 1, 0, 0}, // 7
+ {"full-metadata", 0, 0, 0}, // 8
+ {"tmpdir", 1, 0, 0}, // 9
+ {"upload-script", 1, 0, 0}, // 10
+ {"rebuild-statcache", 0, 0, 0}, // 11
+ {"exclude-name", 1, 0, 0}, // 12
+ // Aliases for short options
+ {"verbose", 0, 0, 'v'},
{NULL, 0, 0, 0},
};
int long_index;
- int c = getopt_long(argc, argv, "", long_options, &long_index);
+ int c = getopt_long(argc, argv, "v", long_options, &long_index);
if (c == -1)
break;
case 5: // --scheme
backup_scheme = optarg;
break;
+ case 6: // --signature-filter
+ signature_filter = optarg;
+ break;
+ case 7: // --intent
+ snapshot_intent = atof(optarg);
+ if (snapshot_intent <= 0)
+ snapshot_intent = 1;
+ break;
+ case 8: // --full-metadata
+ flag_full_metadata = true;
+ break;
+ case 9: // --tmpdir
+ tmp_dir = optarg;
+ break;
+ case 10: // --upload-script
+ backup_script = optarg;
+ break;
+ case 11: // --rebuild-statcache
+ flag_rebuild_statcache = true;
+ break;
+ case 12: // --exclude-name
+ excluded_names.push_back(optarg);
+ break;
default:
fprintf(stderr, "Unhandled long option!\n");
return 1;
}
} else {
- usage(argv[0]);
- return 1;
+ switch (c) {
+ case 'v':
+ verbose = true;
+ break;
+ default:
+ usage(argv[0]);
+ return 1;
+ }
}
}
for (int i = optind; i < argc; i++)
add_include(argv[i]);
- if (backup_dest == "") {
+ if (backup_dest == "" && backup_script == "") {
fprintf(stderr,
- "Error: Backup destination must be specified with --dest=\n");
+ "Error: Backup destination must be specified using --dest= or --upload-script=\n");
+ usage(argv[0]);
+ return 1;
+ }
+
+ if (backup_dest != "" && backup_script != "") {
+ fprintf(stderr,
+ "Error: Cannot specify both --dest= and --upload-script=\n");
usage(argv[0]);
return 1;
}
if (localdb_dir == "") {
localdb_dir = backup_dest;
}
-
- // Dump paths for debugging/informational purposes
- {
- list<string>::const_iterator i;
-
- printf("LBS Version: %s\n", lbs_version);
-
- printf("--dest=%s\n--localdb=%s\n\n",
- backup_dest.c_str(), localdb_dir.c_str());
-
- printf("Includes:\n");
- for (i = includes.begin(); i != includes.end(); ++i)
- printf(" %s\n", i->c_str());
-
- printf("Excludes:\n");
- for (i = excludes.begin(); i != excludes.end(); ++i)
- printf(" %s\n", i->c_str());
-
- printf("Searching:\n");
- for (i = searches.begin(); i != searches.end(); ++i)
- printf(" %s\n", i->c_str());
+ if (localdb_dir == "") {
+ fprintf(stderr,
+ "Error: Must specify local database path with --localdb=\n");
+ usage(argv[0]);
+ return 1;
}
- tss = new TarSegmentStore(backup_dest);
block_buf = new char[LBS_BLOCK_SIZE];
+ /* Initialize the remote storage layer. If using an upload script, create
+ * a temporary directory for staging files. Otherwise, write backups
+ * directly to the destination directory. */
+ if (backup_script != "") {
+ tmp_dir = tmp_dir + "/lbs." + generate_uuid();
+ if (mkdir(tmp_dir.c_str(), 0700) < 0) {
+ fprintf(stderr, "Cannot create temporary directory %s: %m\n",
+ tmp_dir.c_str());
+ return 1;
+ }
+ remote = new RemoteStore(tmp_dir, backup_script=backup_script);
+ } else {
+ remote = new RemoteStore(backup_dest);
+ }
+
/* Store the time when the backup started, so it can be included in the
* snapshot name. */
time_t now;
* snapshot. */
string database_path = localdb_dir + "/localdb.sqlite";
db = new LocalDb;
- db->Open(database_path.c_str(), desc_buf,
- backup_scheme.size() ? backup_scheme.c_str() : NULL);
+ db->Open(database_path.c_str(), desc_buf, backup_scheme.c_str(),
+ snapshot_intent);
+
+ tss = new TarSegmentStore(remote, db);
/* Initialize the stat cache, for skipping over unchanged files. */
- statcache = new StatCache;
- statcache->Open(localdb_dir.c_str(), desc_buf,
- backup_scheme.size() ? backup_scheme.c_str() : NULL);
+ metawriter = new MetadataWriter(tss, localdb_dir.c_str(), desc_buf,
+ backup_scheme.c_str());
scanfile(".", false);
- metadata_flush();
- const string md = metadata_root.str();
-
- LbsObject *root = new LbsObject;
- root->set_group("metadata");
- root->set_data(md.data(), md.size());
- root->write(tss);
- root->checksum();
- segment_list.insert(root->get_ref().get_segment());
-
- string backup_root = root->get_ref().to_string();
- delete root;
+ ObjectReference root_ref = metawriter->close();
+ add_segment(root_ref.get_segment());
+ string backup_root = root_ref.to_string();
- db->Close();
-
- statcache->Close();
- delete statcache;
+ delete metawriter;
tss->sync();
tss->dump_stats();
delete tss;
+ /* Write out a checksums file which lists the checksums for all the
+ * segments included in this snapshot. The format is designed so that it
+ * may be easily verified using the sha1sums command. */
+ const char csum_type[] = "sha1";
+ string checksum_filename = "snapshot-";
+ if (backup_scheme.size() > 0)
+ checksum_filename += backup_scheme + "-";
+ checksum_filename = checksum_filename + desc_buf + "." + csum_type + "sums";
+ RemoteFile *checksum_file = remote->alloc_file(checksum_filename,
+ "checksums");
+ FILE *checksums = fdopen(checksum_file->get_fd(), "w");
+
+ for (std::set<string>::iterator i = segment_list.begin();
+ i != segment_list.end(); ++i) {
+ string seg_path, seg_csum;
+ if (db->GetSegmentChecksum(*i, &seg_path, &seg_csum)) {
+ const char *raw_checksum = NULL;
+ if (strncmp(seg_csum.c_str(), csum_type,
+ strlen(csum_type)) == 0) {
+ raw_checksum = seg_csum.c_str() + strlen(csum_type);
+ if (*raw_checksum == '=')
+ raw_checksum++;
+ else
+ raw_checksum = NULL;
+ }
+
+ if (raw_checksum != NULL)
+ fprintf(checksums, "%s *%s\n",
+ raw_checksum, seg_path.c_str());
+ }
+ }
+ fclose(checksums);
+
+ SHA1Checksum checksum_csum;
+ string csum;
+ checksum_filename = checksum_file->get_local_path();
+ if (checksum_csum.process_file(checksum_filename.c_str())) {
+ csum = checksum_csum.checksum_str();
+ }
+
+ checksum_file->send();
+
+ db->Close();
+
+ /* All other files should be flushed to remote storage before writing the
+ * backup descriptor below, so that it is not possible to have a backup
+ * descriptor written out depending on non-existent (not yet written)
+ * files. */
+ remote->sync();
+
/* Write a backup descriptor file, which says which segments are needed and
* where to start to restore this snapshot. The filename is based on the
- * current time. */
- string desc_filename = backup_dest + "/snapshot-";
+ * current time. If a signature filter program was specified, filter the
+ * data through that to give a chance to sign the descriptor contents. */
+ string desc_filename = "snapshot-";
if (backup_scheme.size() > 0)
desc_filename += backup_scheme + "-";
desc_filename = desc_filename + desc_buf + ".lbs";
- std::ofstream descriptor(desc_filename.c_str());
- descriptor << "Format: LBS Snapshot v0.2\n";
- descriptor << "Producer: LBS " << lbs_version << "\n";
+ RemoteFile *descriptor_file = remote->alloc_file(desc_filename,
+ "snapshots");
+ int descriptor_fd = descriptor_file->get_fd();
+ if (descriptor_fd < 0) {
+ fprintf(stderr, "Unable to open descriptor output file: %m\n");
+ return 1;
+ }
+ pid_t signature_pid = 0;
+ if (signature_filter.size() > 0) {
+ int new_fd = spawn_filter(descriptor_fd, signature_filter.c_str(),
+ &signature_pid);
+ close(descriptor_fd);
+ descriptor_fd = new_fd;
+ }
+ FILE *descriptor = fdopen(descriptor_fd, "w");
+
+ fprintf(descriptor, "Format: LBS Snapshot v0.8\n");
+ fprintf(descriptor, "Producer: Cumulus %s\n", cumulus_version);
strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z", &time_buf);
- descriptor << "Date: " << desc_buf << "\n";
+ fprintf(descriptor, "Date: %s\n", desc_buf);
if (backup_scheme.size() > 0)
- descriptor << "Scheme: " << backup_scheme << "\n";
- descriptor << "Root: " << backup_root << "\n";
+ fprintf(descriptor, "Scheme: %s\n", backup_scheme.c_str());
+ fprintf(descriptor, "Backup-Intent: %g\n", snapshot_intent);
+ fprintf(descriptor, "Root: %s\n", backup_root.c_str());
+
+ if (csum.size() > 0) {
+ fprintf(descriptor, "Checksums: %s\n", csum.c_str());
+ }
- descriptor << "Segments:\n";
+ fprintf(descriptor, "Segments:\n");
for (std::set<string>::iterator i = segment_list.begin();
i != segment_list.end(); ++i) {
- descriptor << " " << *i << "\n";
+ fprintf(descriptor, " %s\n", i->c_str());
+ }
+
+ fclose(descriptor);
+
+ if (signature_pid) {
+ int status;
+ waitpid(signature_pid, &status, 0);
+
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ fatal("Signature filter process error");
+ }
+ }
+
+ descriptor_file->send();
+
+ remote->sync();
+ delete remote;
+
+ if (backup_script != "") {
+ if (rmdir(tmp_dir.c_str()) < 0) {
+ fprintf(stderr,
+ "Warning: Cannot delete temporary directory %s: %m\n",
+ tmp_dir.c_str());
+ }
}
return 0;