/* Recursively descend the filesystem and visit each file. */
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
-#include <sys/types.h>
+#include <getopt.h>
+#include <grp.h>
+#include <pwd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
#include <sys/stat.h>
+#include <sys/types.h>
#include <unistd.h>
#include <algorithm>
-#include <string>
-#include <list>
-#include <vector>
-#include <iostream>
#include <fstream>
-#include <sstream>
+#include <iostream>
+#include <list>
#include <set>
+#include <sstream>
+#include <string>
+#include <vector>
#include "format.h"
+#include "localdb.h"
#include "store.h"
#include "sha1.h"
static const size_t LBS_METADATA_BLOCK_SIZE = 65536;
+/* Local database, which tracks objects written in this and previous
+ * invocations to help in creating incremental snapshots. */
+LocalDb *db;
+
/* Contents of the root object. This will contain a set of indirect links to
* the metadata objects. */
std::ostringstream metadata_root;
void scandir(const string& path);
+/* Selection of files to include/exclude in the snapshot. */
+std::list<string> excludes;
+
/* Ensure contents of metadata are flushed to an object. */
void metadata_flush()
{
/* Write current metadata information to a new object. */
LbsObject *meta = new LbsObject;
- meta->set_group("root");
+ meta->set_group("metadata");
meta->set_data(m.data(), m.size());
meta->write(tss);
meta->checksum();
}
/* Read the contents of a file (specified by an open file descriptor) and copy
- * the data to the store. */
-void dumpfile(int fd, dictionary &file_info)
+ * the data to the store. Returns the size of the file (number of bytes
+ * dumped), or -1 on error. */
+int64_t dumpfile(int fd, dictionary &file_info)
{
struct stat stat_buf;
fstat(fd, &stat_buf);
if ((stat_buf.st_mode & S_IFMT) != S_IFREG) {
fprintf(stderr, "file is no longer a regular file!\n");
- return;
+ return -1;
}
/* The index data consists of a sequence of pointers to the data blocks
hash.process(block_buf, bytes);
- // tarstore processing
- LbsObject *o = new LbsObject;
- o->set_group("data");
- o->set_data(block_buf, bytes);
- o->write(tss);
- object_list.push_back(o->get_name());
- segment_list.insert(o->get_ref().get_segment());
- delete o;
+ // Either find a copy of this block in an already-existing segment, or
+ // index it so it can be re-used in the future
+ SHA1Checksum block_hash;
+ block_hash.process(block_buf, bytes);
+ string block_csum = block_hash.checksum_str();
+ ObjectReference ref = db->FindObject(block_csum, bytes);
+
+ // Store a copy of the object if one does not yet exist
+ if (ref.get_segment().size() == 0) {
+ LbsObject *o = new LbsObject;
+
+ /* We might still have seen this checksum before, if the object was
+ * stored at some time in the past, but we have decided to clean
+ * the segment the object was originally stored in (FindObject will
+ * not return such objects). When rewriting the object contents,
+ * put it in a separate group, so that old objects get grouped
+ * together. The hope is that these old objects will continue to
+ * be used in the future, and we obtain segments which will
+ * continue to be well-utilized. */
+ if (db->IsOldObject(block_csum, bytes))
+ o->set_group("compacted");
+ else
+ o->set_group("data");
+
+ o->set_data(block_buf, bytes);
+ o->write(tss);
+ ref = o->get_ref();
+ db->StoreObject(ref, block_csum, bytes);
+ delete o;
+ }
+ object_list.push_back(ref.to_string());
+ segment_list.insert(ref.get_segment());
+ db->UseObject(ref);
size += bytes;
}
}
LbsObject *i = new LbsObject;
- i->set_group("indirect");
+ i->set_group("metadata");
i->set_data(blocklist.data(), blocklist.size());
i->write(tss);
file_info["data"] = "@" + i->get_name();
segment_list.insert(i->get_ref().get_segment());
delete i;
}
+
+ return size;
}
void scanfile(const string& path)
struct stat stat_buf;
char *buf;
ssize_t len;
+ int64_t file_size;
list<string> refs;
// Set to true if the item is a directory and we should recursively scan
bool recurse = false;
+ // Check this file against the include/exclude list to see if it should be
+ // considered
+ for (list<string>::iterator i = excludes.begin();
+ i != excludes.end(); ++i) {
+ if (path == *i) {
+ printf("Excluding %s\n", path.c_str());
+ return;
+ }
+ }
+
dictionary file_info;
lstat(path.c_str(), &stat_buf);
printf("%s\n", path.c_str());
- metadata << "name: " << uri_encode(path) << "\n";
-
file_info["mode"] = encode_int(stat_buf.st_mode & 07777);
- file_info["atime"] = encode_int(stat_buf.st_atime);
- file_info["ctime"] = encode_int(stat_buf.st_ctime);
file_info["mtime"] = encode_int(stat_buf.st_mtime);
file_info["user"] = encode_int(stat_buf.st_uid);
file_info["group"] = encode_int(stat_buf.st_gid);
+ struct passwd *pwd = getpwuid(stat_buf.st_uid);
+ if (pwd != NULL) {
+ file_info["user"] += " (" + uri_encode(pwd->pw_name) + ")";
+ }
+
+ struct group *grp = getgrgid(stat_buf.st_gid);
+ if (pwd != NULL) {
+ file_info["group"] += " (" + uri_encode(grp->gr_name) + ")";
+ }
+
char inode_type;
switch (stat_buf.st_mode & S_IFMT) {
* - O_NONBLOCK: prevents open() from blocking if the file was
* replaced by a fifo
* We also add in O_NOATIME, since this may reduce disk writes (for
- * inode updates). */
+ * inode updates). However, O_NOATIME may result in EPERM, so if the
+ * initial open fails, try again without O_NOATIME. */
fd = open(path.c_str(), O_RDONLY|O_NOATIME|O_NOFOLLOW|O_NONBLOCK);
+ if (fd < 0) {
+ fd = open(path.c_str(), O_RDONLY|O_NOFOLLOW|O_NONBLOCK);
+ }
+ if (fd < 0) {
+ fprintf(stderr, "Unable to open file %s: %m\n", path.c_str());
+ return;
+ }
/* Drop the use of the O_NONBLOCK flag; we only wanted that for file
* open. */
flags = fcntl(fd, F_GETFL);
fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
- file_info["size"] = encode_int(stat_buf.st_size);
- dumpfile(fd, file_info);
+ file_size = dumpfile(fd, file_info);
+ file_info["size"] = encode_int(file_size);
close(fd);
+ if (file_size < 0)
+ return; // error occurred; do not dump file
+
+ if (file_size != stat_buf.st_size) {
+ fprintf(stderr, "Warning: Size of %s changed during reading\n",
+ path.c_str());
+ }
+
break;
case S_IFDIR:
inode_type = 'd';
file_info["type"] = string(1, inode_type);
+ metadata << "name: " << uri_encode(path) << "\n";
dict_output(metadata, file_info);
metadata << "\n";
for (vector<string>::iterator i = contents.begin();
i != contents.end(); ++i) {
const string& filename = *i;
- scanfile(path + "/" + filename);
+ if (path == ".")
+ scanfile(filename);
+ else
+ scanfile(path + "/" + filename);
}
closedir(dir);
}
-int main(int argc, char *argv[])
+void usage(const char *program)
{
- block_buf = new char[LBS_BLOCK_SIZE];
+ fprintf(stderr,
+ "Usage: %s [OPTION]... SOURCE DEST\n"
+ "Produce backup snapshot of files in SOURCE and store to DEST.\n"
+ "\n"
+ "Options:\n"
+ " --exclude=PATH exclude files in PATH from snapshot\n"
+ " --localdb=PATH local backup metadata is stored in PATH\n",
+ program);
+}
+int main(int argc, char *argv[])
+{
+ string backup_source = ".";
string backup_dest = ".";
+ string localdb_dir = "";
+
+ while (1) {
+ static struct option long_options[] = {
+ {"localdb", 1, 0, 0}, // 0
+ {"exclude", 1, 0, 0}, // 1
+ {"filter", 1, 0, 0}, // 2
+ {"filter-extension", 1, 0, 0}, // 3
+ {NULL, 0, 0, 0},
+ };
- if (argc > 1)
- backup_dest = argv[1];
+ int long_index;
+ int c = getopt_long(argc, argv, "", long_options, &long_index);
+
+ if (c == -1)
+ break;
+
+ if (c == 0) {
+ switch (long_index) {
+ case 0: // --localdb
+ localdb_dir = optarg;
+ break;
+ case 1: // --exclude
+ excludes.push_back(optarg);
+ break;
+ case 2: // --filter
+ filter_program = optarg;
+ break;
+ case 3: // --filter-extension
+ filter_extension = optarg;
+ break;
+ default:
+ fprintf(stderr, "Unhandled long option!\n");
+ return 1;
+ }
+ } else {
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ if (argc < optind + 2) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ backup_source = argv[optind];
+ backup_dest = argv[argc - 1];
+
+ if (localdb_dir == "") {
+ localdb_dir = backup_dest;
+ }
+
+ printf("Source: %s, Dest: %s\n",
+ backup_source.c_str(), backup_dest.c_str());
tss = new TarSegmentStore(backup_dest);
+ block_buf = new char[LBS_BLOCK_SIZE];
- string desc_filename = backup_dest + "/snapshot.lbs";
+ /* Write a backup descriptor file, which says which segments are needed and
+ * where to start to restore this snapshot. The filename is based on the
+ * current time. */
+ time_t now;
+ struct tm time_buf;
+ char desc_buf[256];
+ time(&now);
+ localtime_r(&now, &time_buf);
+ strftime(desc_buf, sizeof(desc_buf), "%Y%m%dT%H%M%S", &time_buf);
+ string desc_filename = backup_dest + "/" + desc_buf + ".lbs";
std::ofstream descriptor(desc_filename.c_str());
+ /* Open the local database which tracks all objects that are stored
+ * remotely, for efficient incrementals. Provide it with the name of this
+ * snapshot. */
+ string database_path = backup_dest + "/localdb.sqlite";
+ db = new LocalDb;
+ db->Open(database_path.c_str(), desc_buf);
+
try {
scanfile(".");
} catch (IOException e) {
const string md = metadata_root.str();
LbsObject *root = new LbsObject;
- root->set_group("root");
+ root->set_group("metadata");
root->set_data(md.data(), md.size());
root->write(tss);
root->checksum();
segment_list.insert(root->get_ref().get_segment());
- descriptor << "root: " << root->get_ref().to_string() << "\n\n";
+ descriptor << "Format: LBS Snapshot v0.1\n";
+ strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z", &time_buf);
+ descriptor << "Date: " << desc_buf << "\n";
+ descriptor << "Root: " << root->get_ref().to_string() << "\n";
delete root;
- descriptor << "segments:\n";
+ descriptor << "Segments:\n";
for (std::set<string>::iterator i = segment_list.begin();
i != segment_list.end(); ++i) {
descriptor << " " << *i << "\n";
}
+ db->Close();
+
tss->sync();
delete tss;