+ memset(&t->th_buf, 0, sizeof(struct tar_header));
+
+ th_set_type(t, S_IFREG | 0600);
+ th_set_user(t, 0);
+ th_set_group(t, 0);
+ th_set_mode(t, 0600);
+ th_set_size(t, len);
+ th_set_mtime(t, time(NULL));
+ th_set_path(t, const_cast<char *>(path.c_str()));
+ th_finish(t);
+
+ if (th_write(t) != 0)
+ throw IOException("Error writing tar header");
+
+ size += T_BLOCKSIZE;
+
+ if (len == 0)
+ return;
+
+ size_t blocks = (len + T_BLOCKSIZE - 1) / T_BLOCKSIZE;
+ size_t padding = blocks * T_BLOCKSIZE - len;
+
+ for (size_t i = 0; i < blocks - 1; i++) {
+ if (tar_block_write(t, &data[i * T_BLOCKSIZE]) == -1)
+ throw IOException("Error writing tar block");
+ }
+
+ char block[T_BLOCKSIZE];
+ memset(block, 0, sizeof(block));
+ memcpy(block, &data[T_BLOCKSIZE * (blocks - 1)], T_BLOCKSIZE - padding);
+ if (tar_block_write(t, block) == -1)
+ throw IOException("Error writing final tar block");
+
+ size += blocks * T_BLOCKSIZE;
+}
+
+/* Estimate the size based on the size of the actual output file on disk.
+ * However, it might be the case that the filter program is buffering all its
+ * data, and might potentially not write a single byte until we have closed
+ * our end of the pipe. If we don't do so until we see data written, we have
+ * a problem. So, arbitrarily pick an upper bound on the compression ratio
+ * that the filter will achieve (128:1), and return a size estimate which is
+ * the larger of a) bytes actually seen written to disk, and b) input
+ * bytes/128. */
+size_t Tarfile::size_estimate()
+{
+ struct stat statbuf;
+
+ if (fstat(real_fd, &statbuf) == 0)
+ return max((int64_t)statbuf.st_size, (int64_t)(size / 128));
+
+ /* Couldn't stat the file on disk, so just return the actual number of
+ * bytes, before compression. */
+ return size;