1 /* Recursively descend the filesystem and visit each file. */
11 #include <sys/types.h>
33 static TarSegmentStore *tss = NULL;
35 /* Buffer for holding a single block of data read from a file. */
36 static const size_t LBS_BLOCK_SIZE = 1024 * 1024;
37 static char *block_buf;
39 static const size_t LBS_METADATA_BLOCK_SIZE = 65536;
41 /* Contents of the root object. This will contain a set of indirect links to
42 * the metadata objects. */
43 std::ostringstream metadata_root;
45 /* Buffer for building up metadata. */
46 std::ostringstream metadata;
48 /* Keep track of all segments which are needed to reconstruct the snapshot. */
49 std::set<string> segment_list;
51 void scandir(const string& path);
53 /* Ensure contents of metadata are flushed to an object. */
56 string m = metadata.str();
60 /* Write current metadata information to a new object. */
61 LbsObject *meta = new LbsObject;
62 meta->set_group("root");
63 meta->set_data(m.data(), m.size());
67 /* Write a reference to this block in the root. */
68 ObjectReference ref = meta->get_ref();
69 metadata_root << "@" << ref.to_string() << "\n";
70 segment_list.insert(ref.get_segment());
77 /* Read data from a file descriptor and return the amount of data read. A
78 * short read (less than the requested size) will only occur if end-of-file is
80 size_t file_read(int fd, char *buf, size_t maxlen)
82 size_t bytes_read = 0;
85 ssize_t res = read(fd, buf, maxlen);
89 throw IOException("file_read: error reading");
90 } else if (res == 0) {
102 /* Read the contents of a file (specified by an open file descriptor) and copy
103 * the data to the store. Returns the size of the file (number of bytes
104 * dumped), or -1 on error. */
105 int64_t dumpfile(int fd, dictionary &file_info)
107 struct stat stat_buf;
108 fstat(fd, &stat_buf);
110 list<string> object_list;
112 if ((stat_buf.st_mode & S_IFMT) != S_IFREG) {
113 fprintf(stderr, "file is no longer a regular file!\n");
117 /* The index data consists of a sequence of pointers to the data blocks
118 * that actually comprise the file data. This level of indirection is used
119 * so that the same data block can be used in multiple files, or multiple
120 * versions of the same file. */
123 size_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
127 hash.process(block_buf, bytes);
129 // tarstore processing
130 LbsObject *o = new LbsObject;
131 o->set_group("data");
132 o->set_data(block_buf, bytes);
134 object_list.push_back(o->get_name());
135 segment_list.insert(o->get_ref().get_segment());
141 file_info["checksum"] = hash.checksum_str();
143 /* For files that only need to be broken apart into a few objects, store
144 * the list of objects directly. For larger files, store the data
145 * out-of-line and provide a pointer to the indrect object. */
146 if (object_list.size() < 8) {
147 string blocklist = "";
148 for (list<string>::iterator i = object_list.begin();
149 i != object_list.end(); ++i) {
150 if (i != object_list.begin())
154 file_info["data"] = blocklist;
156 string blocklist = "";
157 for (list<string>::iterator i = object_list.begin();
158 i != object_list.end(); ++i) {
159 blocklist += *i + "\n";
162 LbsObject *i = new LbsObject;
163 i->set_group("indirect");
164 i->set_data(blocklist.data(), blocklist.size());
166 file_info["data"] = "@" + i->get_name();
167 segment_list.insert(i->get_ref().get_segment());
174 void scanfile(const string& path)
178 struct stat stat_buf;
184 // Set to true if the item is a directory and we should recursively scan
185 bool recurse = false;
187 dictionary file_info;
189 lstat(path.c_str(), &stat_buf);
191 printf("%s\n", path.c_str());
193 metadata << "name: " << uri_encode(path) << "\n";
195 file_info["mode"] = encode_int(stat_buf.st_mode & 07777);
196 file_info["mtime"] = encode_int(stat_buf.st_mtime);
197 file_info["user"] = encode_int(stat_buf.st_uid);
198 file_info["group"] = encode_int(stat_buf.st_gid);
200 struct passwd *pwd = getpwuid(stat_buf.st_uid);
202 file_info["user"] += " (" + uri_encode(pwd->pw_name) + ")";
205 struct group *grp = getgrgid(stat_buf.st_gid);
207 file_info["group"] += " (" + uri_encode(grp->gr_name) + ")";
212 switch (stat_buf.st_mode & S_IFMT) {
228 /* Use the reported file size to allocate a buffer large enough to read
229 * the symlink. Allocate slightly more space, so that we ask for more
230 * bytes than we expect and so check for truncation. */
231 buf = new char[stat_buf.st_size + 2];
232 len = readlink(path.c_str(), buf, stat_buf.st_size + 1);
234 fprintf(stderr, "error reading symlink: %m\n");
235 } else if (len <= stat_buf.st_size) {
237 file_info["contents"] = uri_encode(buf);
238 } else if (len > stat_buf.st_size) {
239 fprintf(stderr, "error reading symlink: name truncated\n");
247 /* Be paranoid when opening the file. We have no guarantee that the
248 * file was not replaced between the stat() call above and the open()
249 * call below, so we might not even be opening a regular file. That
250 * the file descriptor refers to a regular file is checked in
251 * dumpfile(). But we also supply flags to open to to guard against
252 * various conditions before we can perform that verification:
253 * - O_NOFOLLOW: in the event the file was replaced by a symlink
254 * - O_NONBLOCK: prevents open() from blocking if the file was
256 * We also add in O_NOATIME, since this may reduce disk writes (for
258 fd = open(path.c_str(), O_RDONLY|O_NOATIME|O_NOFOLLOW|O_NONBLOCK);
260 /* Drop the use of the O_NONBLOCK flag; we only wanted that for file
262 flags = fcntl(fd, F_GETFL);
263 fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
265 file_size = dumpfile(fd, file_info);
266 file_info["size"] = encode_int(file_size);
270 return; // error occurred; do not dump file
272 if (file_size != stat_buf.st_size) {
273 fprintf(stderr, "Warning: Size of %s changed during reading\n",
284 fprintf(stderr, "Unknown inode type: mode=%x\n", stat_buf.st_mode);
288 file_info["type"] = string(1, inode_type);
290 dict_output(metadata, file_info);
293 // Break apart metadata listing if it becomes too large.
294 if (metadata.str().size() > LBS_METADATA_BLOCK_SIZE)
297 // If we hit a directory, now that we've written the directory itself,
298 // recursively scan the directory.
303 void scandir(const string& path)
305 DIR *dir = opendir(path.c_str());
308 fprintf(stderr, "Error: %m\n");
313 vector<string> contents;
314 while ((ent = readdir(dir)) != NULL) {
315 string filename(ent->d_name);
316 if (filename == "." || filename == "..")
318 contents.push_back(filename);
321 sort(contents.begin(), contents.end());
323 for (vector<string>::iterator i = contents.begin();
324 i != contents.end(); ++i) {
325 const string& filename = *i;
326 scanfile(path + "/" + filename);
332 int main(int argc, char *argv[])
334 block_buf = new char[LBS_BLOCK_SIZE];
336 string backup_dest = ".";
339 backup_dest = argv[1];
341 tss = new TarSegmentStore(backup_dest);
343 /* Write a backup descriptor file, which says which segments are needed and
344 * where to start to restore this snapshot. The filename is based on the
350 localtime_r(&now, &time_buf);
351 strftime(desc_buf, sizeof(desc_buf), "%Y%m%dT%H%M%S", &time_buf);
352 string desc_filename = backup_dest + "/" + desc_buf + ".lbs";
353 std::ofstream descriptor(desc_filename.c_str());
357 } catch (IOException e) {
358 fprintf(stderr, "IOException: %s\n", e.getError().c_str());
362 const string md = metadata_root.str();
364 LbsObject *root = new LbsObject;
365 root->set_group("root");
366 root->set_data(md.data(), md.size());
370 segment_list.insert(root->get_ref().get_segment());
371 descriptor << "Root: " << root->get_ref().to_string() << "\n";
372 strftime(desc_buf, sizeof(desc_buf), "%Y-%m-%d %H:%M:%S %z", &time_buf);
373 descriptor << "Date: " << desc_buf << "\n";
377 descriptor << "Segments:\n";
378 for (std::set<string>::iterator i = segment_list.begin();
379 i != segment_list.end(); ++i) {
380 descriptor << " " << *i << "\n";