1 /* Recursively descend the filesystem and visit each file. */
13 #include <sys/types.h>
35 static TarSegmentStore *tss = NULL;
37 /* Buffer for holding a single block of data read from a file. */
38 static const size_t LBS_BLOCK_SIZE = 1024 * 1024;
39 static char *block_buf;
41 static const size_t LBS_METADATA_BLOCK_SIZE = 65536;
43 /* Local database, which tracks objects written in this and previous
44 * invocations to help in creating incremental snapshots. */
47 /* Contents of the root object. This will contain a set of indirect links to
48 * the metadata objects. */
49 std::ostringstream metadata_root;
51 /* Buffer for building up metadata. */
52 std::ostringstream metadata;
54 /* Keep track of all segments which are needed to reconstruct the snapshot. */
55 std::set<string> segment_list;
57 void scandir(const string& path);
59 /* Selection of files to include/exclude in the snapshot. */
60 std::list<string> excludes;
62 /* Ensure contents of metadata are flushed to an object. */
65 string m = metadata.str();
69 /* Write current metadata information to a new object. */
70 LbsObject *meta = new LbsObject;
71 meta->set_group("root");
72 meta->set_data(m.data(), m.size());
76 /* Write a reference to this block in the root. */
77 ObjectReference ref = meta->get_ref();
78 metadata_root << "@" << ref.to_string() << "\n";
79 segment_list.insert(ref.get_segment());
86 /* Read data from a file descriptor and return the amount of data read. A
87 * short read (less than the requested size) will only occur if end-of-file is
89 size_t file_read(int fd, char *buf, size_t maxlen)
91 size_t bytes_read = 0;
94 ssize_t res = read(fd, buf, maxlen);
98 throw IOException("file_read: error reading");
99 } else if (res == 0) {
111 /* Read the contents of a file (specified by an open file descriptor) and copy
112 * the data to the store. Returns the size of the file (number of bytes
113 * dumped), or -1 on error. */
114 int64_t dumpfile(int fd, dictionary &file_info)
116 struct stat stat_buf;
117 fstat(fd, &stat_buf);
119 list<string> object_list;
121 if ((stat_buf.st_mode & S_IFMT) != S_IFREG) {
122 fprintf(stderr, "file is no longer a regular file!\n");
126 /* The index data consists of a sequence of pointers to the data blocks
127 * that actually comprise the file data. This level of indirection is used
128 * so that the same data block can be used in multiple files, or multiple
129 * versions of the same file. */
132 size_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
136 hash.process(block_buf, bytes);
138 // Either find a copy of this block in an already-existing segment, or
139 // index it so it can be re-used in the future
140 SHA1Checksum block_hash;
141 block_hash.process(block_buf, bytes);
142 string block_csum = block_hash.checksum_str();
143 ObjectReference ref = db->FindObject(block_csum, bytes);
145 // Store a copy of the object if one does not yet exist
146 if (ref.get_segment().size() == 0) {
147 LbsObject *o = new LbsObject;
148 o->set_group("data");
149 o->set_data(block_buf, bytes);
152 db->StoreObject(ref, block_csum, bytes);
156 object_list.push_back(ref.to_string());
157 segment_list.insert(ref.get_segment());
162 file_info["checksum"] = hash.checksum_str();
164 /* For files that only need to be broken apart into a few objects, store
165 * the list of objects directly. For larger files, store the data
166 * out-of-line and provide a pointer to the indrect object. */
167 if (object_list.size() < 8) {
168 string blocklist = "";
169 for (list<string>::iterator i = object_list.begin();
170 i != object_list.end(); ++i) {
171 if (i != object_list.begin())
175 file_info["data"] = blocklist;
177 string blocklist = "";
178 for (list<string>::iterator i = object_list.begin();
179 i != object_list.end(); ++i) {
180 blocklist += *i + "\n";
183 LbsObject *i = new LbsObject;
184 i->set_group("indirect");
185 i->set_data(blocklist.data(), blocklist.size());
187 file_info["data"] = "@" + i->get_name();
188 segment_list.insert(i->get_ref().get_segment());
195 void scanfile(const string& path)
199 struct stat stat_buf;
205 // Set to true if the item is a directory and we should recursively scan
206 bool recurse = false;
208 // Check this file against the include/exclude list to see if it should be
210 for (list<string>::iterator i = excludes.begin();
211 i != excludes.end(); ++i) {
213 printf("Excluding %s\n", path.c_str());
218 dictionary file_info;
220 lstat(path.c_str(), &stat_buf);
222 printf("%s\n", path.c_str());
224 file_info["mode"] = encode_int(stat_buf.st_mode & 07777);
225 file_info["mtime"] = encode_int(stat_buf.st_mtime);
226 file_info["user"] = encode_int(stat_buf.st_uid);
227 file_info["group"] = encode_int(stat_buf.st_gid);
229 struct passwd *pwd = getpwuid(stat_buf.st_uid);
231 file_info["user"] += " (" + uri_encode(pwd->pw_name) + ")";
234 struct group *grp = getgrgid(stat_buf.st_gid);
236 file_info["group"] += " (" + uri_encode(grp->gr_name) + ")";
241 switch (stat_buf.st_mode & S_IFMT) {
257 /* Use the reported file size to allocate a buffer large enough to read
258 * the symlink. Allocate slightly more space, so that we ask for more
259 * bytes than we expect and so check for truncation. */
260 buf = new char[stat_buf.st_size + 2];
261 len = readlink(path.c_str(), buf, stat_buf.st_size + 1);
263 fprintf(stderr, "error reading symlink: %m\n");
264 } else if (len <= stat_buf.st_size) {
266 file_info["contents"] = uri_encode(buf);
267 } else if (len > stat_buf.st_size) {
268 fprintf(stderr, "error reading symlink: name truncated\n");
276 /* Be paranoid when opening the file. We have no guarantee that the
277 * file was not replaced between the stat() call above and the open()
278 * call below, so we might not even be opening a regular file. That
279 * the file descriptor refers to a regular file is checked in
280 * dumpfile(). But we also supply flags to open to to guard against
281 * various conditions before we can perform that verification:
282 * - O_NOFOLLOW: in the event the file was replaced by a symlink
283 * - O_NONBLOCK: prevents open() from blocking if the file was
285 * We also add in O_NOATIME, since this may reduce disk writes (for
286 * inode updates). However, O_NOATIME may result in EPERM, so if the
287 * initial open fails, try again without O_NOATIME. */
288 fd = open(path.c_str(), O_RDONLY|O_NOATIME|O_NOFOLLOW|O_NONBLOCK);
290 fd = open(path.c_str(), O_RDONLY|O_NOFOLLOW|O_NONBLOCK);
293 fprintf(stderr, "Unable to open file %s: %m\n", path.c_str());
297 /* Drop the use of the O_NONBLOCK flag; we only wanted that for file
299 flags = fcntl(fd, F_GETFL);
300 fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
302 file_size = dumpfile(fd, file_info);
303 file_info["size"] = encode_int(file_size);
307 return; // error occurred; do not dump file
309 if (file_size != stat_buf.st_size) {
310 fprintf(stderr, "Warning: Size of %s changed during reading\n",
321 fprintf(stderr, "Unknown inode type: mode=%x\n", stat_buf.st_mode);
325 file_info["type"] = string(1, inode_type);
327 metadata << "name: " << uri_encode(path) << "\n";
328 dict_output(metadata, file_info);
331 // Break apart metadata listing if it becomes too large.
332 if (metadata.str().size() > LBS_METADATA_BLOCK_SIZE)
335 // If we hit a directory, now that we've written the directory itself,
336 // recursively scan the directory.
341 void scandir(const string& path)
343 DIR *dir = opendir(path.c_str());
346 fprintf(stderr, "Error: %m\n");
351 vector<string> contents;
352 while ((ent = readdir(dir)) != NULL) {
353 string filename(ent->d_name);
354 if (filename == "." || filename == "..")
356 contents.push_back(filename);
359 sort(contents.begin(), contents.end());
361 for (vector<string>::iterator i = contents.begin();
362 i != contents.end(); ++i) {
363 const string& filename = *i;
367 scanfile(path + "/" + filename);
373 void usage(const char *program)
376 "Usage: %s [OPTION]... SOURCE DEST\n"
377 "Produce backup snapshot of files in SOURCE and store to DEST.\n"
380 " --exclude=PATH exclude files in PATH from snapshot\n"
381 " --localdb=PATH local backup metadata is stored in PATH\n",
385 int main(int argc, char *argv[])
387 string backup_source = ".";
388 string backup_dest = ".";
389 string localdb_dir = "";
392 static struct option long_options[] = {
393 {"localdb", 1, 0, 0}, // 0
394 {"exclude", 1, 0, 0}, // 1
399 int c = getopt_long(argc, argv, "", long_options, &long_index);
405 switch (long_index) {
407 localdb_dir = optarg;
410 excludes.push_back(optarg);
413 fprintf(stderr, "Unhandled long option!\n");
422 if (argc < optind + 2) {
427 backup_source = argv[optind];
428 backup_dest = argv[argc - 1];
430 if (localdb_dir == "") {
431 localdb_dir = backup_dest;
434 printf("Source: %s, Dest: %s\n",
435 backup_source.c_str(), backup_dest.c_str());
437 tss = new TarSegmentStore(backup_dest);
438 block_buf = new char[LBS_BLOCK_SIZE];
440 /* Write a backup descriptor file, which says which segments are needed and
441 * where to start to restore this snapshot. The filename is based on the
447 localtime_r(&now, &time_buf);
448 strftime(desc_buf, sizeof(desc_buf), "%Y%m%dT%H%M%S", &time_buf);
449 string desc_filename = backup_dest + "/" + desc_buf + ".lbs";
450 std::ofstream descriptor(desc_filename.c_str());
452 /* Open the local database which tracks all objects that are stored
453 * remotely, for efficient incrementals. Provide it with the name of this
455 string database_path = backup_dest + "/localdb.sqlite";
457 db->Open(database_path.c_str(), desc_buf);
461 } catch (IOException e) {
462 fprintf(stderr, "IOException: %s\n", e.getError().c_str());
466 const string md = metadata_root.str();
468 LbsObject *root = new LbsObject;
469 root->set_group("root");
470 root->set_data(md.data(), md.size());
474 segment_list.insert(root->get_ref().get_segment());
475 descriptor << "Root: " << root->get_ref().to_string() << "\n";
476 strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z", &time_buf);
477 descriptor << "Date: " << desc_buf << "\n";
481 descriptor << "Segments:\n";
482 for (std::set<string>::iterator i = segment_list.begin();
483 i != segment_list.end(); ++i) {
484 descriptor << " " << *i << "\n";