/* Recursively descend the filesystem and visit each file. */
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
-#include <sys/types.h>
+#include <getopt.h>
+#include <grp.h>
+#include <pwd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
#include <sys/stat.h>
+#include <sys/types.h>
#include <unistd.h>
#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <list>
+#include <set>
+#include <sstream>
#include <string>
#include <vector>
+#include "format.h"
+#include "localdb.h"
#include "store.h"
#include "sha1.h"
+#include "statcache.h"
+using std::list;
using std::string;
using std::vector;
+using std::ostream;
-static SegmentStore *segment_store;
-static OutputStream *info_dump = NULL;
-
-static SegmentPartitioner *index_segment, *data_segment;
+static TarSegmentStore *tss = NULL;
/* Buffer for holding a single block of data read from a file. */
-static const int LBS_BLOCK_SIZE = 1024 * 1024;
+static const size_t LBS_BLOCK_SIZE = 1024 * 1024;
static char *block_buf;
+static const size_t LBS_METADATA_BLOCK_SIZE = 65536;
+
+/* Local database, which tracks objects written in this and previous
+ * invocations to help in creating incremental snapshots. */
+LocalDb *db;
+
+/* Stat cache, which stored data locally to speed the backup process by quickly
+ * skipping files which have not changed. */
+StatCache *statcache;
+
+/* Contents of the root object. This will contain a set of indirect links to
+ * the metadata objects. */
+std::ostringstream metadata_root;
+
+/* Buffer for building up metadata. */
+std::ostringstream metadata;
+
+/* Keep track of all segments which are needed to reconstruct the snapshot. */
+std::set<string> segment_list;
+
void scandir(const string& path);
-/* Converts time to microseconds since the epoch. */
-int64_t encode_time(time_t time)
+/* Selection of files to include/exclude in the snapshot. */
+std::list<string> excludes;
+
+/* Ensure contents of metadata are flushed to an object. */
+void metadata_flush()
{
- return (int64_t)time * 1000000;
+ string m = metadata.str();
+ if (m.size() == 0)
+ return;
+
+ /* Write current metadata information to a new object. */
+ LbsObject *meta = new LbsObject;
+ meta->set_group("metadata");
+ meta->set_data(m.data(), m.size());
+ meta->write(tss);
+ meta->checksum();
+
+ /* Write a reference to this block in the root. */
+ ObjectReference ref = meta->get_ref();
+ metadata_root << "@" << ref.to_string() << "\n";
+ segment_list.insert(ref.get_segment());
+
+ delete meta;
+
+ metadata.str("");
}
/* Read data from a file descriptor and return the amount of data read. A
}
/* Read the contents of a file (specified by an open file descriptor) and copy
- * the data to the store. */
-void dumpfile(int fd, dictionary &file_info)
+ * the data to the store. Returns the size of the file (number of bytes
+ * dumped), or -1 on error. */
+int64_t dumpfile(int fd, dictionary &file_info, const string &path)
{
struct stat stat_buf;
fstat(fd, &stat_buf);
int64_t size = 0;
+ list<string> object_list;
if ((stat_buf.st_mode & S_IFMT) != S_IFREG) {
- printf("file is no longer a regular file!\n");
- return;
+ fprintf(stderr, "file is no longer a regular file!\n");
+ return -1;
}
/* The index data consists of a sequence of pointers to the data blocks
* that actually comprise the file data. This level of indirection is used
* so that the same data block can be used in multiple files, or multiple
* versions of the same file. */
- struct uuid segment_uuid;
- int object_id;
- OutputStream *index_data = index_segment->new_object(&segment_uuid,
- &object_id);
-
SHA1Checksum hash;
while (true) {
- struct uuid block_segment_uuid;
- int block_object_id;
-
size_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
if (bytes == 0)
break;
hash.process(block_buf, bytes);
- OutputStream *block = data_segment->new_object(&block_segment_uuid,
- &block_object_id);
- block->write(block_buf, bytes);
- index_data->write_uuid(block_segment_uuid);
- index_data->write_u32(block_object_id);
+ // Either find a copy of this block in an already-existing segment, or
+ // index it so it can be re-used in the future
+ double block_age = 0.0;
+ SHA1Checksum block_hash;
+ block_hash.process(block_buf, bytes);
+ string block_csum = block_hash.checksum_str();
+ ObjectReference ref = db->FindObject(block_csum, bytes);
+
+ // Store a copy of the object if one does not yet exist
+ if (ref.get_segment().size() == 0) {
+ LbsObject *o = new LbsObject;
+
+ /* We might still have seen this checksum before, if the object was
+ * stored at some time in the past, but we have decided to clean
+ * the segment the object was originally stored in (FindObject will
+ * not return such objects). When rewriting the object contents,
+ * put it in a separate group, so that old objects get grouped
+ * together. The hope is that these old objects will continue to
+ * be used in the future, and we obtain segments which will
+ * continue to be well-utilized. Additionally, keep track of the
+ * age of the data by looking up the age of the block which was
+ * expired and using that instead of the current time. */
+ if (db->IsOldObject(block_csum, bytes, &block_age))
+ o->set_group("compacted");
+ else
+ o->set_group("data");
+
+ o->set_data(block_buf, bytes);
+ o->write(tss);
+ ref = o->get_ref();
+ db->StoreObject(ref, block_csum, bytes, block_age);
+ delete o;
+ }
+
+ object_list.push_back(ref.to_string());
+ segment_list.insert(ref.get_segment());
+ db->UseObject(ref);
size += bytes;
}
- file_info["sha1"] = string((const char *)hash.checksum(),
- hash.checksum_size());
- file_info["data"] = encode_objref(segment_uuid, object_id);
+ file_info["checksum"] = hash.checksum_str();
+
+ statcache->Save(path, &stat_buf, file_info["checksum"], object_list);
+
+ /* For files that only need to be broken apart into a few objects, store
+ * the list of objects directly. For larger files, store the data
+ * out-of-line and provide a pointer to the indrect object. */
+ if (object_list.size() < 8) {
+ string blocklist = "";
+ for (list<string>::iterator i = object_list.begin();
+ i != object_list.end(); ++i) {
+ if (i != object_list.begin())
+ blocklist += " ";
+ blocklist += *i;
+ }
+ file_info["data"] = blocklist;
+ } else {
+ string blocklist = "";
+ for (list<string>::iterator i = object_list.begin();
+ i != object_list.end(); ++i) {
+ blocklist += *i + "\n";
+ }
+
+ LbsObject *i = new LbsObject;
+ i->set_group("metadata");
+ i->set_data(blocklist.data(), blocklist.size());
+ i->write(tss);
+ file_info["data"] = "@" + i->get_name();
+ segment_list.insert(i->get_ref().get_segment());
+ delete i;
+ }
+
+ return size;
}
void scanfile(const string& path)
struct stat stat_buf;
char *buf;
ssize_t len;
+ int64_t file_size;
+ list<string> refs;
// Set to true if the item is a directory and we should recursively scan
bool recurse = false;
+ // Check this file against the include/exclude list to see if it should be
+ // considered
+ for (list<string>::iterator i = excludes.begin();
+ i != excludes.end(); ++i) {
+ if (path == *i) {
+ printf("Excluding %s\n", path.c_str());
+ return;
+ }
+ }
+
dictionary file_info;
lstat(path.c_str(), &stat_buf);
printf("%s\n", path.c_str());
- file_info["mode"] = encode_u16(stat_buf.st_mode & 07777);
- file_info["atime"] = encode_u64(encode_time(stat_buf.st_atime));
- file_info["ctime"] = encode_u64(encode_time(stat_buf.st_ctime));
- file_info["mtime"] = encode_u64(encode_time(stat_buf.st_mtime));
- file_info["user"] = encode_u32(stat_buf.st_uid);
- file_info["group"] = encode_u32(stat_buf.st_gid);
+ file_info["mode"] = encode_int(stat_buf.st_mode & 07777);
+ file_info["mtime"] = encode_int(stat_buf.st_mtime);
+ file_info["user"] = encode_int(stat_buf.st_uid);
+ file_info["group"] = encode_int(stat_buf.st_gid);
+
+ struct passwd *pwd = getpwuid(stat_buf.st_uid);
+ if (pwd != NULL) {
+ file_info["user"] += " (" + uri_encode(pwd->pw_name) + ")";
+ }
+
+ struct group *grp = getgrgid(stat_buf.st_gid);
+ if (pwd != NULL) {
+ file_info["group"] += " (" + uri_encode(grp->gr_name) + ")";
+ }
char inode_type;
buf = new char[stat_buf.st_size + 2];
len = readlink(path.c_str(), buf, stat_buf.st_size + 1);
if (len < 0) {
- printf("error reading symlink: %m\n");
+ fprintf(stderr, "error reading symlink: %m\n");
} else if (len <= stat_buf.st_size) {
buf[len] = '\0';
- printf(" contents=%s\n", buf);
+ file_info["contents"] = uri_encode(buf);
} else if (len > stat_buf.st_size) {
- printf("error reading symlink: name truncated\n");
+ fprintf(stderr, "error reading symlink: name truncated\n");
}
- file_info["contents"] = buf;
-
delete[] buf;
break;
case S_IFREG:
* - O_NONBLOCK: prevents open() from blocking if the file was
* replaced by a fifo
* We also add in O_NOATIME, since this may reduce disk writes (for
- * inode updates). */
+ * inode updates). However, O_NOATIME may result in EPERM, so if the
+ * initial open fails, try again without O_NOATIME. */
fd = open(path.c_str(), O_RDONLY|O_NOATIME|O_NOFOLLOW|O_NONBLOCK);
+ if (fd < 0) {
+ fd = open(path.c_str(), O_RDONLY|O_NOFOLLOW|O_NONBLOCK);
+ }
+ if (fd < 0) {
+ fprintf(stderr, "Unable to open file %s: %m\n", path.c_str());
+ return;
+ }
/* Drop the use of the O_NONBLOCK flag; we only wanted that for file
* open. */
flags = fcntl(fd, F_GETFL);
fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
- file_info["size"] = encode_u64(stat_buf.st_size);
- dumpfile(fd, file_info);
+ file_size = dumpfile(fd, file_info, path);
+ file_info["size"] = encode_int(file_size);
close(fd);
+ if (file_size < 0)
+ return; // error occurred; do not dump file
+
+ if (file_size != stat_buf.st_size) {
+ fprintf(stderr, "Warning: Size of %s changed during reading\n",
+ path.c_str());
+ }
+
break;
case S_IFDIR:
inode_type = 'd';
file_info["type"] = string(1, inode_type);
- info_dump->write_string(path);
- info_dump->write_dictionary(file_info);
+ metadata << "name: " << uri_encode(path) << "\n";
+ dict_output(metadata, file_info);
+ metadata << "\n";
+
+ // Break apart metadata listing if it becomes too large.
+ if (metadata.str().size() > LBS_METADATA_BLOCK_SIZE)
+ metadata_flush();
// If we hit a directory, now that we've written the directory itself,
// recursively scan the directory.
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
- printf("Error: %m\n");
+ fprintf(stderr, "Error: %m\n");
return;
}
for (vector<string>::iterator i = contents.begin();
i != contents.end(); ++i) {
const string& filename = *i;
- scanfile(path + "/" + filename);
+ if (path == ".")
+ scanfile(filename);
+ else
+ scanfile(path + "/" + filename);
}
closedir(dir);
}
+void usage(const char *program)
+{
+ fprintf(stderr,
+ "Usage: %s [OPTION]... SOURCE DEST\n"
+ "Produce backup snapshot of files in SOURCE and store to DEST.\n"
+ "\n"
+ "Options:\n"
+ " --exclude=PATH exclude files in PATH from snapshot\n"
+ " --localdb=PATH local backup metadata is stored in PATH\n",
+ program);
+}
+
int main(int argc, char *argv[])
{
- block_buf = new char[LBS_BLOCK_SIZE];
+ string backup_source = ".";
+ string backup_dest = ".";
+ string localdb_dir = "";
+
+ while (1) {
+ static struct option long_options[] = {
+ {"localdb", 1, 0, 0}, // 0
+ {"exclude", 1, 0, 0}, // 1
+ {"filter", 1, 0, 0}, // 2
+ {"filter-extension", 1, 0, 0}, // 3
+ {NULL, 0, 0, 0},
+ };
+
+ int long_index;
+ int c = getopt_long(argc, argv, "", long_options, &long_index);
+
+ if (c == -1)
+ break;
+
+ if (c == 0) {
+ switch (long_index) {
+ case 0: // --localdb
+ localdb_dir = optarg;
+ break;
+ case 1: // --exclude
+ excludes.push_back(optarg);
+ break;
+ case 2: // --filter
+ filter_program = optarg;
+ break;
+ case 3: // --filter-extension
+ filter_extension = optarg;
+ break;
+ default:
+ fprintf(stderr, "Unhandled long option!\n");
+ return 1;
+ }
+ } else {
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ if (argc < optind + 2) {
+ usage(argv[0]);
+ return 1;
+ }
- segment_store = new SegmentStore(".");
- SegmentWriter *sw = segment_store->new_segment();
- info_dump = sw->new_object(NULL);
+ backup_source = argv[optind];
+ backup_dest = argv[argc - 1];
- index_segment = new SegmentPartitioner(segment_store);
- data_segment = new SegmentPartitioner(segment_store);
+ if (localdb_dir == "") {
+ localdb_dir = backup_dest;
+ }
+
+ printf("Source: %s\nDest: %s\nDatabase: %s\n\n",
+ backup_source.c_str(), backup_dest.c_str(), localdb_dir.c_str());
+
+ tss = new TarSegmentStore(backup_dest);
+ block_buf = new char[LBS_BLOCK_SIZE];
- string uuid = SegmentWriter::format_uuid(sw->get_uuid());
- printf("Backup UUID: %s\n", uuid.c_str());
+ /* Store the time when the backup started, so it can be included in the
+ * snapshot name. */
+ time_t now;
+ struct tm time_buf;
+ char desc_buf[256];
+ time(&now);
+ localtime_r(&now, &time_buf);
+ strftime(desc_buf, sizeof(desc_buf), "%Y%m%dT%H%M%S", &time_buf);
+
+ /* Open the local database which tracks all objects that are stored
+ * remotely, for efficient incrementals. Provide it with the name of this
+ * snapshot. */
+ string database_path = localdb_dir + "/localdb.sqlite";
+ db = new LocalDb;
+ db->Open(database_path.c_str(), desc_buf);
+
+ /* Initialize the stat cache, for skipping over unchanged files. */
+ statcache = new StatCache;
+ statcache->Open(localdb_dir.c_str(), desc_buf);
try {
scanfile(".");
fprintf(stderr, "IOException: %s\n", e.getError().c_str());
}
- delete index_segment;
- delete data_segment;
- delete sw;
+ metadata_flush();
+ const string md = metadata_root.str();
+
+ LbsObject *root = new LbsObject;
+ root->set_group("metadata");
+ root->set_data(md.data(), md.size());
+ root->write(tss);
+ root->checksum();
+ segment_list.insert(root->get_ref().get_segment());
+
+ /* Write a backup descriptor file, which says which segments are needed and
+ * where to start to restore this snapshot. The filename is based on the
+ * current time. */
+ string desc_filename = backup_dest + "/snapshot-" + desc_buf + ".lbs";
+ std::ofstream descriptor(desc_filename.c_str());
+
+ descriptor << "Format: LBS Snapshot v0.1\n";
+ strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z", &time_buf);
+ descriptor << "Date: " << desc_buf << "\n";
+ descriptor << "Root: " << root->get_ref().to_string() << "\n";
+
+ delete root;
+
+ descriptor << "Segments:\n";
+ for (std::set<string>::iterator i = segment_list.begin();
+ i != segment_list.end(); ++i) {
+ descriptor << " " << *i << "\n";
+ }
+
+ db->Close();
+
+ statcache->Close();
+ delete statcache;
+
+ tss->sync();
+ delete tss;
return 0;
}