1 /* Cumulus: Efficient Filesystem Backup to the Cloud
2 * Copyright (C) 2008-2009 The Cumulus Developers
3 * See the AUTHORS file for a list of contributors.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 /* Backup data is stored in a collection of objects, which are grouped together
21 * into segments for storage purposes. This implementation of the object store
22 * represents segments as TAR files and objects as files within them. */
28 #include <sys/types.h>
30 #include <sys/resource.h>
56 /* Default filter program is bzip2 */
57 const char *filter_program = "bzip2 -c";
58 const char *filter_extension = ".bz2";
60 Tarfile::Tarfile(RemoteFile *file, const string &segment)
64 assert(sizeof(struct tar_header) == TAR_BLOCK_SIZE);
67 real_fd = file->get_fd();
68 filter_fd = spawn_filter(real_fd, filter_program, &filter_pid);
73 char buf[TAR_BLOCK_SIZE];
75 /* Append the EOF marker: two blocks filled with nulls. */
76 memset(buf, 0, sizeof(buf));
77 tar_write(buf, TAR_BLOCK_SIZE);
78 tar_write(buf, TAR_BLOCK_SIZE);
80 if (close(filter_fd) != 0)
81 fatal("Error closing Tarfile");
83 /* ...and wait for filter process to finish. */
85 waitpid(filter_pid, &status, 0);
87 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
88 fatal("Filter process error");
94 /* Launch a child process which can act as a filter (compress, encrypt, etc.)
95 * on the TAR output. The file descriptor to which output should be written
96 * must be specified; the return value is the file descriptor which will be
97 * attached to the standard input of the filter program. */
98 int spawn_filter(int fd_out, const char *program, pid_t *filter_pid)
103 /* Create a pipe for communicating with the filter process. */
105 fatal("Unable to create pipe for filter");
108 /* Create a child process which can exec() the filter program. */
111 fatal("Unable to fork filter process");
117 if (filter_pid != NULL)
120 /* Child process. Rearrange file descriptors. stdin is fds[0], stdout
121 * is fd_out, stderr is unchanged. */
124 if (dup2(fds[0], 0) < 0)
128 if (dup2(fd_out, 1) < 0)
132 /* Exec the filter program. */
133 execlp("/bin/sh", "/bin/sh", "-c", program, NULL);
135 /* Should not reach here except for error cases. */
136 fprintf(stderr, "Could not exec filter: %m\n");
143 void Tarfile::tar_write(const char *data, size_t len)
148 int res = write(filter_fd, data, len);
153 fprintf(stderr, "Write error: %m\n");
154 fatal("Write error");
162 void Tarfile::write_object(int id, const char *data, size_t len)
164 struct tar_header header;
165 memset(&header, 0, sizeof(header));
168 sprintf(buf, "%08x", id);
169 string path = segment_name + "/" + buf;
171 assert(path.size() < 100);
172 memcpy(header.name, path.data(), path.size());
173 sprintf(header.mode, "%07o", 0600);
174 sprintf(header.uid, "%07o", 0);
175 sprintf(header.gid, "%07o", 0);
176 sprintf(header.size, "%011o", (int)len);
177 sprintf(header.mtime, "%011o", (int)time(NULL));
178 header.typeflag = '0';
179 strcpy(header.magic, "ustar ");
180 strcpy(header.uname, "root");
181 strcpy(header.gname, "root");
183 memset(header.chksum, ' ', sizeof(header.chksum));
185 for (int i = 0; i < TAR_BLOCK_SIZE; i++) {
186 checksum += ((uint8_t *)&header)[i];
188 sprintf(header.chksum, "%06o", checksum);
190 tar_write((const char *)&header, TAR_BLOCK_SIZE);
195 tar_write(data, len);
197 char padbuf[TAR_BLOCK_SIZE];
198 size_t blocks = (len + TAR_BLOCK_SIZE - 1) / TAR_BLOCK_SIZE;
199 size_t padding = blocks * TAR_BLOCK_SIZE - len;
200 memset(padbuf, 0, padding);
201 tar_write(padbuf, padding);
204 /* Estimate the size based on the size of the actual output file on disk.
205 * However, it might be the case that the filter program is buffering all its
206 * data, and might potentially not write a single byte until we have closed
207 * our end of the pipe. If we don't do so until we see data written, we have
208 * a problem. So, arbitrarily pick an upper bound on the compression ratio
209 * that the filter will achieve (128:1), and return a size estimate which is
210 * the larger of a) bytes actually seen written to disk, and b) input
212 size_t Tarfile::size_estimate()
216 if (fstat(real_fd, &statbuf) == 0)
217 return max((int64_t)statbuf.st_size, (int64_t)(size / 128));
219 /* Couldn't stat the file on disk, so just return the actual number of
220 * bytes, before compression. */
224 static const size_t SEGMENT_SIZE = 4 * 1024 * 1024;
226 /* Backup size summary: segment type -> (uncompressed size, compressed size) */
227 static map<string, pair<int64_t, int64_t> > group_sizes;
229 ObjectReference TarSegmentStore::write_object(const char *data, size_t len,
230 const std::string &group,
231 const std::string &checksum,
234 struct segment_info *segment;
236 // Find the segment into which the object should be written, looking up by
237 // group. If no segment exists yet, create one.
238 if (segments.find(group) == segments.end()) {
239 segment = new segment_info;
241 segment->name = generate_uuid();
242 segment->group = group;
243 segment->basename = segment->name + ".tar";
244 segment->basename += filter_extension;
246 segment->data_size = 0;
247 segment->rf = remote->alloc_file(segment->basename,
248 group == "metadata" ? "segments0"
250 segment->file = new Tarfile(segment->rf, segment->name);
252 segments[group] = segment;
254 segment = segments[group];
257 int id = segment->count;
259 sprintf(id_buf, "%08x", id);
261 segment->file->write_object(id, data, len);
263 segment->data_size += len;
265 group_sizes[group].first += len;
267 ObjectReference ref(segment->name, id_buf);
268 ref.set_range(0, len, true);
269 if (checksum.size() > 0)
270 ref.set_checksum(checksum);
272 db->StoreObject(ref, age);
274 // If this segment meets or exceeds the size target, close it so that
275 // future objects will go into a new segment.
276 if (segment->file->size_estimate() >= SEGMENT_SIZE)
277 close_segment(group);
282 void TarSegmentStore::sync()
284 while (!segments.empty())
285 close_segment(segments.begin()->first);
288 void TarSegmentStore::dump_stats()
290 printf("Data written:\n");
291 for (map<string, pair<int64_t, int64_t> >::iterator i = group_sizes.begin();
292 i != group_sizes.end(); ++i) {
293 printf(" %s: %lld (%lld compressed)\n", i->first.c_str(),
294 (long long)i->second.first, (long long)i->second.second);
298 void TarSegmentStore::close_segment(const string &group)
300 struct segment_info *segment = segments[group];
302 delete segment->file;
305 struct stat stat_buf;
307 if (stat(segment->rf->get_local_path().c_str(), &stat_buf) == 0) {
308 disk_size = stat_buf.st_size;
309 group_sizes[segment->group].second += disk_size;
312 SHA1Checksum segment_checksum;
314 if (segment_checksum.process_file(segment->rf->get_local_path().c_str())) {
315 checksum = segment_checksum.checksum_str();
318 db->SetSegmentMetadata(segment->name, segment->basename, checksum,
319 group, segment->data_size, disk_size);
324 segments.erase(segments.find(group));
328 string TarSegmentStore::object_reference_to_segment(const string &object)
333 LbsObject::LbsObject()
334 : group(""), age(0.0), data(NULL), data_len(0), written(false)
338 LbsObject::~LbsObject()
342 void LbsObject::set_data(const char *d, size_t len, const char *checksum)
347 if (checksum != NULL) {
348 this->checksum = checksum;
350 Hash *hash = Hash::New();
351 hash->update(data, data_len);
352 this->checksum = hash->digest_str();
357 void LbsObject::write(TarSegmentStore *store)
359 assert(data != NULL);
362 ref = store->write_object(data, data_len, group, checksum, age);