1 /* Recursively descend the filesystem and visit each file. */
13 #include <sys/types.h>
29 #include "statcache.h"
36 static TarSegmentStore *tss = NULL;
38 /* Buffer for holding a single block of data read from a file. */
39 static const size_t LBS_BLOCK_SIZE = 1024 * 1024;
40 static char *block_buf;
42 static const size_t LBS_METADATA_BLOCK_SIZE = 65536;
44 /* Local database, which tracks objects written in this and previous
45 * invocations to help in creating incremental snapshots. */
48 /* Stat cache, which stored data locally to speed the backup process by quickly
49 * skipping files which have not changed. */
52 /* Contents of the root object. This will contain a set of indirect links to
53 * the metadata objects. */
54 std::ostringstream metadata_root;
56 /* Buffer for building up metadata. */
57 std::ostringstream metadata;
59 /* Keep track of all segments which are needed to reconstruct the snapshot. */
60 std::set<string> segment_list;
62 void scandir(const string& path);
64 /* Selection of files to include/exclude in the snapshot. */
65 std::list<string> excludes;
67 /* Ensure contents of metadata are flushed to an object. */
70 string m = metadata.str();
74 /* Write current metadata information to a new object. */
75 LbsObject *meta = new LbsObject;
76 meta->set_group("metadata");
77 meta->set_data(m.data(), m.size());
81 /* Write a reference to this block in the root. */
82 ObjectReference ref = meta->get_ref();
83 metadata_root << "@" << ref.to_string() << "\n";
84 segment_list.insert(ref.get_segment());
91 /* Read data from a file descriptor and return the amount of data read. A
92 * short read (less than the requested size) will only occur if end-of-file is
94 size_t file_read(int fd, char *buf, size_t maxlen)
96 size_t bytes_read = 0;
99 ssize_t res = read(fd, buf, maxlen);
103 throw IOException("file_read: error reading");
104 } else if (res == 0) {
116 /* Read the contents of a file (specified by an open file descriptor) and copy
117 * the data to the store. Returns the size of the file (number of bytes
118 * dumped), or -1 on error. */
119 int64_t dumpfile(int fd, dictionary &file_info, const string &path)
121 struct stat stat_buf;
122 fstat(fd, &stat_buf);
124 list<string> object_list;
126 if ((stat_buf.st_mode & S_IFMT) != S_IFREG) {
127 fprintf(stderr, "file is no longer a regular file!\n");
131 /* The index data consists of a sequence of pointers to the data blocks
132 * that actually comprise the file data. This level of indirection is used
133 * so that the same data block can be used in multiple files, or multiple
134 * versions of the same file. */
137 size_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
141 hash.process(block_buf, bytes);
143 // Either find a copy of this block in an already-existing segment, or
144 // index it so it can be re-used in the future
145 SHA1Checksum block_hash;
146 block_hash.process(block_buf, bytes);
147 string block_csum = block_hash.checksum_str();
148 ObjectReference ref = db->FindObject(block_csum, bytes);
150 // Store a copy of the object if one does not yet exist
151 if (ref.get_segment().size() == 0) {
152 LbsObject *o = new LbsObject;
154 /* We might still have seen this checksum before, if the object was
155 * stored at some time in the past, but we have decided to clean
156 * the segment the object was originally stored in (FindObject will
157 * not return such objects). When rewriting the object contents,
158 * put it in a separate group, so that old objects get grouped
159 * together. The hope is that these old objects will continue to
160 * be used in the future, and we obtain segments which will
161 * continue to be well-utilized. */
162 if (db->IsOldObject(block_csum, bytes))
163 o->set_group("compacted");
165 o->set_group("data");
167 o->set_data(block_buf, bytes);
170 db->StoreObject(ref, block_csum, bytes);
174 object_list.push_back(ref.to_string());
175 segment_list.insert(ref.get_segment());
180 file_info["checksum"] = hash.checksum_str();
182 statcache->Save(path, &stat_buf, file_info["checksum"], object_list);
184 /* For files that only need to be broken apart into a few objects, store
185 * the list of objects directly. For larger files, store the data
186 * out-of-line and provide a pointer to the indrect object. */
187 if (object_list.size() < 8) {
188 string blocklist = "";
189 for (list<string>::iterator i = object_list.begin();
190 i != object_list.end(); ++i) {
191 if (i != object_list.begin())
195 file_info["data"] = blocklist;
197 string blocklist = "";
198 for (list<string>::iterator i = object_list.begin();
199 i != object_list.end(); ++i) {
200 blocklist += *i + "\n";
203 LbsObject *i = new LbsObject;
204 i->set_group("metadata");
205 i->set_data(blocklist.data(), blocklist.size());
207 file_info["data"] = "@" + i->get_name();
208 segment_list.insert(i->get_ref().get_segment());
215 void scanfile(const string& path)
219 struct stat stat_buf;
225 // Set to true if the item is a directory and we should recursively scan
226 bool recurse = false;
228 // Check this file against the include/exclude list to see if it should be
230 for (list<string>::iterator i = excludes.begin();
231 i != excludes.end(); ++i) {
233 printf("Excluding %s\n", path.c_str());
238 dictionary file_info;
240 lstat(path.c_str(), &stat_buf);
242 printf("%s\n", path.c_str());
244 file_info["mode"] = encode_int(stat_buf.st_mode & 07777);
245 file_info["mtime"] = encode_int(stat_buf.st_mtime);
246 file_info["user"] = encode_int(stat_buf.st_uid);
247 file_info["group"] = encode_int(stat_buf.st_gid);
249 struct passwd *pwd = getpwuid(stat_buf.st_uid);
251 file_info["user"] += " (" + uri_encode(pwd->pw_name) + ")";
254 struct group *grp = getgrgid(stat_buf.st_gid);
256 file_info["group"] += " (" + uri_encode(grp->gr_name) + ")";
261 switch (stat_buf.st_mode & S_IFMT) {
277 /* Use the reported file size to allocate a buffer large enough to read
278 * the symlink. Allocate slightly more space, so that we ask for more
279 * bytes than we expect and so check for truncation. */
280 buf = new char[stat_buf.st_size + 2];
281 len = readlink(path.c_str(), buf, stat_buf.st_size + 1);
283 fprintf(stderr, "error reading symlink: %m\n");
284 } else if (len <= stat_buf.st_size) {
286 file_info["contents"] = uri_encode(buf);
287 } else if (len > stat_buf.st_size) {
288 fprintf(stderr, "error reading symlink: name truncated\n");
296 /* Be paranoid when opening the file. We have no guarantee that the
297 * file was not replaced between the stat() call above and the open()
298 * call below, so we might not even be opening a regular file. That
299 * the file descriptor refers to a regular file is checked in
300 * dumpfile(). But we also supply flags to open to to guard against
301 * various conditions before we can perform that verification:
302 * - O_NOFOLLOW: in the event the file was replaced by a symlink
303 * - O_NONBLOCK: prevents open() from blocking if the file was
305 * We also add in O_NOATIME, since this may reduce disk writes (for
306 * inode updates). However, O_NOATIME may result in EPERM, so if the
307 * initial open fails, try again without O_NOATIME. */
308 fd = open(path.c_str(), O_RDONLY|O_NOATIME|O_NOFOLLOW|O_NONBLOCK);
310 fd = open(path.c_str(), O_RDONLY|O_NOFOLLOW|O_NONBLOCK);
313 fprintf(stderr, "Unable to open file %s: %m\n", path.c_str());
317 /* Drop the use of the O_NONBLOCK flag; we only wanted that for file
319 flags = fcntl(fd, F_GETFL);
320 fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
322 file_size = dumpfile(fd, file_info, path);
323 file_info["size"] = encode_int(file_size);
327 return; // error occurred; do not dump file
329 if (file_size != stat_buf.st_size) {
330 fprintf(stderr, "Warning: Size of %s changed during reading\n",
341 fprintf(stderr, "Unknown inode type: mode=%x\n", stat_buf.st_mode);
345 file_info["type"] = string(1, inode_type);
347 metadata << "name: " << uri_encode(path) << "\n";
348 dict_output(metadata, file_info);
351 // Break apart metadata listing if it becomes too large.
352 if (metadata.str().size() > LBS_METADATA_BLOCK_SIZE)
355 // If we hit a directory, now that we've written the directory itself,
356 // recursively scan the directory.
361 void scandir(const string& path)
363 DIR *dir = opendir(path.c_str());
366 fprintf(stderr, "Error: %m\n");
371 vector<string> contents;
372 while ((ent = readdir(dir)) != NULL) {
373 string filename(ent->d_name);
374 if (filename == "." || filename == "..")
376 contents.push_back(filename);
379 sort(contents.begin(), contents.end());
381 for (vector<string>::iterator i = contents.begin();
382 i != contents.end(); ++i) {
383 const string& filename = *i;
387 scanfile(path + "/" + filename);
393 void usage(const char *program)
396 "Usage: %s [OPTION]... SOURCE DEST\n"
397 "Produce backup snapshot of files in SOURCE and store to DEST.\n"
400 " --exclude=PATH exclude files in PATH from snapshot\n"
401 " --localdb=PATH local backup metadata is stored in PATH\n",
405 int main(int argc, char *argv[])
407 string backup_source = ".";
408 string backup_dest = ".";
409 string localdb_dir = "";
412 static struct option long_options[] = {
413 {"localdb", 1, 0, 0}, // 0
414 {"exclude", 1, 0, 0}, // 1
415 {"filter", 1, 0, 0}, // 2
416 {"filter-extension", 1, 0, 0}, // 3
421 int c = getopt_long(argc, argv, "", long_options, &long_index);
427 switch (long_index) {
429 localdb_dir = optarg;
432 excludes.push_back(optarg);
435 filter_program = optarg;
437 case 3: // --filter-extension
438 filter_extension = optarg;
441 fprintf(stderr, "Unhandled long option!\n");
450 if (argc < optind + 2) {
455 backup_source = argv[optind];
456 backup_dest = argv[argc - 1];
458 if (localdb_dir == "") {
459 localdb_dir = backup_dest;
462 printf("Source: %s\nDest: %s\nDatabase: %s\n\n",
463 backup_source.c_str(), backup_dest.c_str(), localdb_dir.c_str());
465 tss = new TarSegmentStore(backup_dest);
466 block_buf = new char[LBS_BLOCK_SIZE];
468 /* Store the time when the backup started, so it can be included in the
474 localtime_r(&now, &time_buf);
475 strftime(desc_buf, sizeof(desc_buf), "%Y%m%dT%H%M%S", &time_buf);
477 /* Open the local database which tracks all objects that are stored
478 * remotely, for efficient incrementals. Provide it with the name of this
480 string database_path = localdb_dir + "/localdb.sqlite";
482 db->Open(database_path.c_str(), desc_buf);
484 /* Initialize the stat cache, for skipping over unchanged files. */
485 statcache = new StatCache;
486 statcache->Open(localdb_dir.c_str(), desc_buf);
490 } catch (IOException e) {
491 fprintf(stderr, "IOException: %s\n", e.getError().c_str());
495 const string md = metadata_root.str();
497 LbsObject *root = new LbsObject;
498 root->set_group("metadata");
499 root->set_data(md.data(), md.size());
502 segment_list.insert(root->get_ref().get_segment());
504 /* Write a backup descriptor file, which says which segments are needed and
505 * where to start to restore this snapshot. The filename is based on the
507 string desc_filename = backup_dest + "/snapshot-" + desc_buf + ".lbs";
508 std::ofstream descriptor(desc_filename.c_str());
510 descriptor << "Format: LBS Snapshot v0.1\n";
511 strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z", &time_buf);
512 descriptor << "Date: " << desc_buf << "\n";
513 descriptor << "Root: " << root->get_ref().to_string() << "\n";
517 descriptor << "Segments:\n";
518 for (std::set<string>::iterator i = segment_list.begin();
519 i != segment_list.end(); ++i) {
520 descriptor << " " << *i << "\n";