#include <algorithm>
#include <string>
#include <vector>
+#include <iostream>
+#include <sstream>
#include "store.h"
+#include "tarstore.h"
#include "sha1.h"
using std::string;
using std::vector;
+using std::ostream;
+static SegmentStore *segment_store;
static OutputStream *info_dump = NULL;
-void scandir(const string& path);
+static TarSegmentStore *tss = NULL;
+
+static SegmentPartitioner *index_segment, *data_segment;
+
+/* Buffer for holding a single block of data read from a file. */
+static const int LBS_BLOCK_SIZE = 1024 * 1024;
+static char *block_buf;
+
+void scandir(const string& path, std::ostream& metadata);
/* Converts time to microseconds since the epoch. */
int64_t encode_time(time_t time)
return (int64_t)time * 1000000;
}
-void dumpfile(int fd, dictionary &file_info)
+/* Read data from a file descriptor and return the amount of data read. A
+ * short read (less than the requested size) will only occur if end-of-file is
+ * hit. */
+size_t file_read(int fd, char *buf, size_t maxlen)
+{
+ size_t bytes_read = 0;
+
+ while (true) {
+ ssize_t res = read(fd, buf, maxlen);
+ if (res < 0) {
+ if (errno == EINTR)
+ continue;
+ throw IOException("file_read: error reading");
+ } else if (res == 0) {
+ break;
+ } else {
+ bytes_read += res;
+ buf += res;
+ maxlen -= res;
+ }
+ }
+
+ return bytes_read;
+}
+
+/* Read the contents of a file (specified by an open file descriptor) and copy
+ * the data to the store. */
+void dumpfile(int fd, dictionary &file_info, ostream &metadata)
{
struct stat stat_buf;
fstat(fd, &stat_buf);
int64_t size = 0;
-
- char buf[4096];
+ string segment_list = "data:";
if ((stat_buf.st_mode & S_IFMT) != S_IFREG) {
printf("file is no longer a regular file!\n");
return;
}
+ /* The index data consists of a sequence of pointers to the data blocks
+ * that actually comprise the file data. This level of indirection is used
+ * so that the same data block can be used in multiple files, or multiple
+ * versions of the same file. */
+ struct uuid segment_uuid;
+ int object_id;
+ OutputStream *index_data = index_segment->new_object(&segment_uuid,
+ &object_id,
+ "DREF");
+
SHA1Checksum hash;
while (true) {
- ssize_t res = read(fd, buf, sizeof(buf));
- if (res < 0) {
- if (errno == EINTR)
- continue;
- printf("Error while reading: %m\n");
- return;
- } else if (res == 0) {
+ struct uuid block_segment_uuid;
+ int block_object_id;
+
+ size_t bytes = file_read(fd, block_buf, LBS_BLOCK_SIZE);
+ if (bytes == 0)
break;
- } else {
- hash.process(buf, res);
- size += res;
- }
+
+ hash.process(block_buf, bytes);
+ OutputStream *block = data_segment->new_object(&block_segment_uuid,
+ &block_object_id,
+ "DATA");
+ block->write(block_buf, bytes);
+ index_data->write_uuid(block_segment_uuid);
+ index_data->write_u32(block_object_id);
+
+ // tarstore processing
+ string blockid = tss->write_object(block_buf, bytes, "data");
+ segment_list += " " + blockid;
+
+ size += bytes;
}
file_info["sha1"] = string((const char *)hash.checksum(),
hash.checksum_size());
+ file_info["data"] = encode_objref(segment_uuid, object_id);
+
+ metadata << segment_list << "\n";
}
-void scanfile(const string& path)
+void scanfile(const string& path, ostream &metadata)
{
int fd;
long flags;
printf("%s\n", path.c_str());
+ metadata << "name: " << path << "\n";
+ metadata << "mode: " << (stat_buf.st_mode & 07777) << "\n";
+ metadata << "atime: " << stat_buf.st_atime << "\n";
+ metadata << "ctime: " << stat_buf.st_ctime << "\n";
+ metadata << "mtime: " << stat_buf.st_mtime << "\n";
+ metadata << "user: " << stat_buf.st_uid << "\n";
+ metadata << "group: " << stat_buf.st_gid << "\n";
+
file_info["mode"] = encode_u16(stat_buf.st_mode & 07777);
file_info["atime"] = encode_u64(encode_time(stat_buf.st_atime));
file_info["ctime"] = encode_u64(encode_time(stat_buf.st_ctime));
fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
file_info["size"] = encode_u64(stat_buf.st_size);
- dumpfile(fd, file_info);
+ dumpfile(fd, file_info, metadata);
close(fd);
break;
}
file_info["type"] = string(1, inode_type);
+ metadata << "type: " << inode_type << "\n";
info_dump->write_string(path);
info_dump->write_dictionary(file_info);
+ metadata << "\n";
+
// If we hit a directory, now that we've written the directory itself,
// recursively scan the directory.
if (recurse)
- scandir(path);
+ scandir(path, metadata);
}
-void scandir(const string& path)
+void scandir(const string& path, ostream &metadata)
{
DIR *dir = opendir(path.c_str());
for (vector<string>::iterator i = contents.begin();
i != contents.end(); ++i) {
const string& filename = *i;
- scanfile(path + "/" + filename);
+ scanfile(path + "/" + filename, metadata);
}
closedir(dir);
int main(int argc, char *argv[])
{
- struct uuid id = SegmentWriter::generate_uuid();
- string filename = SegmentWriter::format_uuid(id);
-
- printf("Backup UUID: %s\n", filename.c_str());
- FILE *dump = fopen(filename.c_str(), "w");
- if (dump == NULL) {
- fprintf(stderr, "Cannot open file %s: %m\n", filename.c_str());
- return 1;
- }
+ block_buf = new char[LBS_BLOCK_SIZE];
+
+ tss = new TarSegmentStore(".");
+ segment_store = new SegmentStore(".");
+ SegmentWriter *sw = segment_store->new_segment();
+ info_dump = sw->new_object(NULL, "ROOT");
+
+ index_segment = new SegmentPartitioner(segment_store);
+ data_segment = new SegmentPartitioner(segment_store);
- FileOutputStream os(dump);
- SegmentWriter sw(os, id);
- info_dump = sw.new_object();
+ string uuid = SegmentWriter::format_uuid(sw->get_uuid());
+ printf("Backup UUID: %s\n", uuid.c_str());
+
+ std::ostringstream metadata;
try {
- scanfile(".");
+ scanfile(".", metadata);
} catch (IOException e) {
fprintf(stderr, "IOException: %s\n", e.getError().c_str());
}
+ const string md = metadata.str();
+ string root = tss->write_object(md.data(), md.size(), "root");
+
+ fprintf(stderr, "Metadata root is at %s\n", root.c_str());
+
+ tss->sync();
+ delete tss;
+
+ delete index_segment;
+ delete data_segment;
+ delete sw;
+
return 0;
}