3 # Proof-of-concept/reference decoder for LBS-format backup snapshots.
5 # This decoder aims to decompress an LBS snapshot. It is not meant to be
6 # particularly efficient, but should be a small and portable tool for doing so
7 # (important for recovering from data loss). It is also meant to serve as a
8 # check on the snapshot tool and data format itself, and serve as documentation
11 # This decoder does not understand TAR archives; it assumes that all segments
12 # in the snapshot have already been decompressed, and that objects are
13 # available simply as files in the filesystem. This simplifies the design.
15 # Limitations: Since this code is probably using 32-bit arithmetic, files
16 # larger than 2-4 GB may not be properly handled.
18 # Copyright (C) 2007 Michael Vrable
24 my $OBJECT_DIR; # Where are the unpacked objects available?
25 my $DEST_DIR = "."; # Where should restored files should be placed?
26 my $RECURSION_LIMIT = 3; # Bound on recursive object references
28 my $VERBOSE = 0; # Set to 1 to enable debugging messages
30 ############################ CHECKSUM VERIFICATION ############################
31 # A very simple layer for verifying checksums. Checksums may be used on object
32 # references directly, and can also be used to verify entire reconstructed
35 # A checksum to verify is given in the form "algorithm=hexdigest". Given such
36 # a string, we can construct a "verifier" object. Bytes can be incrementally
37 # added to the verifier, and at the end a test can be made to see if the
38 # checksum matches. The caller need not know what algorithm is used. However,
39 # at the moment we only support SHA-1 for computing digest (algorith name
44 if ($checksum !~ m/^(\w+)=([0-9a-f]+)$/) {
45 die "Malformed checksum: $checksum";
47 my ($algorithm, $hash) = ($1, $2);
48 if ($algorithm ne 'sha1') {
49 die "Unsupported checksum algorithm: $algorithm";
53 ALGORITHM => $algorithm,
55 DIGESTER => new Digest::SHA1
61 sub verifier_add_bytes {
63 my $digester = $verifier->{DIGESTER};
66 $digester->add($data);
71 my $digester = $verifier->{DIGESTER};
73 my $newhash = $digester->hexdigest();
74 if ($VERBOSE && $verifier->{HASH} ne $newhash) {
75 print STDERR "Verification failure: ",
76 $newhash, " != ", $verifier->{HASH}, "\n";
78 return ($verifier->{HASH} eq $newhash);
81 ################################ OBJECT ACCESS ################################
82 # The base of the decompressor is the object reference layer. See ref.h for a
83 # description of the format for object references. These functions will parse
84 # an object reference, locate the object data from the filesystem, perform any
85 # necessary integrity checks (if a checksum is included), and return the object
90 # Check for special objects before attempting general parsing.
91 if ($ref_str =~ m/^zero\[(\d+)\+(\d+)\]$/) {
92 return "\0" x ($2 + 0);
95 # Try to parse the object reference string into constituent pieces. The
96 # format is segment/object(checksum)[range]. Both the checksum and range
98 if ($ref_str !~ m/^([-0-9a-f]+)\/([0-9a-f]+)(\(\S+\))?(\[\S+\])?$/) {
99 die "Malformed object reference: $ref_str";
102 my ($segment, $object, $checksum, $range) = ($1, $2, $3, $4);
104 # Next, use the segment/object components to locate and read the object
105 # contents from disk.
106 open OBJECT, "<", "$OBJECT_DIR/$segment/$object"
107 or die "Unable to open object $OBJECT_DIR/$segment/$object: $!";
108 my $contents = join '', <OBJECT>;
111 # If a checksum was specified in the object reference, verify the object
112 # integrity by computing a checksum of the read data and comparing.
114 $checksum =~ m/^\((\S+)\)$/;
115 my $verifier = verifier_create($1);
116 verifier_add_bytes($verifier, $contents);
117 if (!verifier_check($verifier)) {
118 die "Integrity check for object $ref_str failed";
122 # If a range was specified, then only a subset of the bytes of the object
123 # are desired. Extract just the desired bytes.
125 if ($range !~ m/^\[(\d+)\+(\d+)\]$/) {
126 die "Malformed object range: $range";
129 my $object_size = length $contents;
130 my ($start, $length) = ($1 + 0, $2 + 0);
131 if ($start >= $object_size || $start + $length > $object_size) {
132 die "Object range $range falls outside object bounds "
133 . "(actual size $object_size)";
136 $contents = substr $contents, $start, $length;
142 ############################### FILE PROCESSING ###############################
143 # Process the metadata for a single file. process_file is the main entry
144 # point; it should be given a list of file metadata key/value pairs.
145 # iterate_objects is a helper function used to iterate over the set of object
146 # references that contain the file data for a regular file.
159 $str =~ s/%([0-9a-f]{2})/chr(hex($1))/ge;
163 sub iterate_objects {
164 my $callback = shift; # Function to be called for each reference
165 my $arg = shift; # Argument passed to callback
166 my $text = shift; # Whitespace-separate list of object references
168 # Simple limit to guard against cycles in the object references
169 my $recursion_level = shift || 0;
170 if ($recursion_level >= $RECURSION_LIMIT) {
171 die "Recursion limit reached";
174 # Split the provided text at whitespace boundaries to produce the list of
175 # object references. If any of these start with "@", then we have an
176 # indirect reference, and must look up that object and call iterate_objects
179 foreach $obj (split /\s+/, $text) {
181 if ($obj =~ /^@(\S+)$/) {
182 my $indirect = load_ref($1);
183 iterate_objects($callback, $arg, $indirect, $recursion_level + 1);
185 &$callback($arg, $obj);
193 my $data = load_ref($obj);
195 or die "Error writing file data: $!";
196 verifier_add_bytes($state->{VERIFIER}, $data);
197 $state->{BYTES} += length($data);
200 # Extract the contents of a regular file by concatenating all the objects that
207 if (!defined $info{data}) {
208 die "File contents not specified for $name";
210 if (!defined $info{checksum} || !defined $info{size}) {
211 die "File $name is missing checksum or size";
214 $info{size} = parse_int($info{size});
216 # Open the file to be recreated. The data will be written out by the call
217 # to iterate_objects.
218 open FILE, ">", "$DEST_DIR/$name"
219 or die "Cannot write file $name: $!";
221 # Set up state so that we can incrementally compute the checksum and length
222 # of the reconstructed data. Then iterate over all objects in the file.
223 $state{VERIFIER} = verifier_create($info{checksum});
225 iterate_objects(\&obj_callback, \%state, $info{data});
229 # Verify that the reconstructed object matches the size/checksum we were
231 if (!verifier_check($state{VERIFIER}) || $state{BYTES} != $info{size}) {
232 die "File reconstruction failed for $name: size or checksum differs";
239 if (!defined($info{name})) {
240 die "Filename not specified in metadata block";
243 my $type = $info{type};
245 my $filename = uri_decode($info{name});
246 print "$filename\n" if $VERBOSE;
248 # Restore the specified file. How to do so depends upon the file type, so
249 # dispatch based on that.
250 my $dest = "$DEST_DIR/$filename";
251 if ($type eq '-' || $type eq 'f') {
253 unpack_file($filename, %info);
254 } elsif ($type eq 'd') {
256 if ($filename ne '.') {
257 mkdir $dest or die "Cannot create directory $filename: $!";
259 } elsif ($type eq 'l') {
261 my $target = $info{target} || $info{contents};
262 if (!defined($target)) {
263 die "Symlink $filename has no value specified";
265 $target = uri_decode($target);
266 symlink $target, $dest
267 or die "Cannot create symlink $filename: $!";
269 # TODO: We can't properly restore all metadata for symbolic links
270 # (attempts to do so below will change metadata for the pointed-to
271 # file). This should be later fixed, but for now we simply return
272 # before getting to the restore metadata step below.
274 } elsif ($type eq 'p' || $type eq 's' || $type eq 'c' || $type eq 'b') {
275 # Pipe, socket, character device, block device.
276 # TODO: Handle these cases.
277 print STDERR "Ignoring special file $filename of type $type\n";
280 die "Unknown file type '$type' for file $filename";
283 # Restore mode, ownership, and any other metadata for the file. This is
284 # split out from the code above since the code is the same regardless of
286 my $mtime = $info{mtime} || time();
287 utime time(), $mtime, $dest
288 or warn "Unable to update mtime for $dest";
292 if (defined $info{user}) {
293 my @items = split /\s/, $info{user};
294 $uid = parse_int($items[0]) if exists $items[0];
296 if (defined $info{group}) {
297 my @items = split /\s/, $info{group};
298 $gid = parse_int($items[0]) if exists $items[0];
300 chown $uid, $gid, $dest
301 or warn "Unable to change ownership for $dest";
303 if (defined $info{mode}) {
304 my $mode = parse_int($info{mode});
306 or warn "Unable to change permissions for $dest";
310 ########################### METADATA LIST PROCESSING ##########################
311 # Process the file metadata listing provided, and as information for each file
312 # is extracted, pass it to process_file. This will recursively follow indirect
313 # references to other metadata objects.
314 sub process_metadata {
315 my ($metadata, $recursion_level) = @_;
317 # Check recursion; this will prevent us from infinitely recursing on an
318 # indirect reference which loops back to itself.
319 $recursion_level ||= 0;
320 if ($recursion_level >= $RECURSION_LIMIT) {
321 die "Recursion limit reached";
324 # Split the metadata into lines, then start processing each line. There
325 # are two primary cases:
326 # - Lines starting with "@" are indirect references to other metadata
327 # objects. Recursively process that object before continuing.
328 # - Other lines should come in groups separated by a blank line; these
329 # contain metadata for a single file that should be passed to
331 # Note that blocks of metadata about a file cannot span a boundary between
336 foreach $line (split /\n/, $metadata) {
337 # If we find a blank line or a reference to another block, process any
338 # data for the previous file first.
339 if ($line eq '' || $line =~ m/^@/) {
340 process_file(%info) if %info;
346 # Recursively handle indirect metadata blocks.
347 if ($line =~ m/^@(\S+)$/) {
348 print "Indirect: $1\n" if $VERBOSE;
349 my $indirect = load_ref($1);
350 process_metadata($indirect, $recursion_level + 1);
354 # Try to parse the data as "key: value" pairs of file metadata. Also
355 # handle continuation lines, which start with whitespace and continue
356 # the previous "key: value" pair.
357 if ($line =~ m/^(\w+):\s*(.*)$/) {
360 } elsif ($line =~/^\s/ && defined $last_key) {
361 $info{$last_key} .= $line;
363 print STDERR "Junk in file metadata section: $line\n";
367 # Process any last file metadata which has not already been processed.
368 process_file(%info) if %info;
371 ############################### MAIN ENTRY POINT ##############################
372 # Program start. We expect to be called with a single argument, which is the
373 # name of the backup descriptor file written by a backup pass. This will name
374 # the root object in the snapshot, from which we can reach all other data we
377 # Parse command-line arguments. The first (required) is the name of the
378 # snapshot descriptor file. The backup objects are assumed to be stored in the
379 # same directory as the descriptor. The second (optional) argument is the
380 # directory where the restored files should be written; it defaults to ".";
381 my $descriptor = $ARGV[0];
382 unless (defined($descriptor) && -r $descriptor) {
383 print STDERR "Usage: $0 <snapshot file>\n";
387 if (defined($ARGV[1])) {
388 $DEST_DIR = $ARGV[1];
391 $OBJECT_DIR = dirname($descriptor);
392 print "Source directory: $OBJECT_DIR\n" if $VERBOSE;
394 # Read the snapshot descriptor to find the root object. Parse it to get a set
395 # of key/value pairs.
396 open DESCRIPTOR, "<", $descriptor
397 or die "Cannot open backup descriptor file $descriptor: $!";
399 my ($line, $last_key);
400 while (defined($line = <DESCRIPTOR>)) {
401 # Any lines of the form "key: value" should be inserted into the
402 # %descriptor dictionary. Any continuation line (a line starting with
403 # whitespace) will append text to the previous key's value. Ignore other
407 if ($line =~ m/^(\w+):\s*(.*)$/) {
408 $descriptor{$1} = $2;
410 } elsif ($line =~/^\s/ && defined $last_key) {
411 $descriptor{$last_key} .= $line;
414 print STDERR "Ignoring line in backup descriptor: $line\n";
418 # A valid backup descriptor should at the very least specify the root metadata
420 if (!exists $descriptor{Root}) {
421 die "Expected 'Root:' specification in backup descriptor file";
423 my $root = $descriptor{Root};
426 # Set the umask to something restrictive. As we unpack files, we'll originally
427 # write the files/directories without setting the permissions, so be
428 # conservative and ensure that they can't be read. Afterwards, we'll properly
429 # fix up permissions.
432 # Start processing metadata stored in the root to recreate the files.
433 print "Root object: $root\n" if $VERBOSE;
434 my $contents = load_ref($root);
435 process_metadata($contents);