X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=restore.pl;h=ad1f5949840fc1ef9caa20e7a4816d6b6fff1e36;hb=27cae99a5071a5c9b5a91831ef0b81aa3a3f17f9;hp=1322e1e006355bc2f90472382a4ebf934eb3145b;hpb=fd58e0c9a15cf28adaf545adc31b3d5531c9f899;p=cumulus.git diff --git a/restore.pl b/restore.pl index 1322e1e..ad1f594 100755 --- a/restore.pl +++ b/restore.pl @@ -12,17 +12,23 @@ # in the snapshot have already been decompressed, and that objects are # available simply as files in the filesystem. This simplifies the design. # +# Limitations: Since this code is probably using 32-bit arithmetic, files +# larger than 2-4 GB may not be properly handled. +# # Copyright (C) 2007 Michael Vrable use strict; use Digest::SHA1; use File::Basename; -my $OBJECT_DIR = "."; # Directory where objects are unpacked +my $OBJECT_DIR; # Where are the unpacked objects available? +my $DEST_DIR = "."; # Where should restored files should be placed? my $RECURSION_LIMIT = 3; # Bound on recursive object references +my $VERBOSE = 0; # Set to 1 to enable debugging messages + ############################ CHECKSUM VERIFICATION ############################ -# A very simple later for verifying checksums. Checksums may be used on object +# A very simple layer for verifying checksums. Checksums may be used on object # references directly, and can also be used to verify entire reconstructed # files. # @@ -65,7 +71,7 @@ sub verifier_check { my $digester = $verifier->{DIGESTER}; my $newhash = $digester->hexdigest(); - if ($verifier->{HASH} ne $newhash) { + if ($VERBOSE && $verifier->{HASH} ne $newhash) { print STDERR "Verification failure: ", $newhash, " != ", $verifier->{HASH}, "\n"; } @@ -134,6 +140,15 @@ sub load_ref { # iterate_objects is a helper function used to iterate over the set of object # references that contain the file data for a regular file. +sub parse_int { + my $str = shift; + if ($str =~ /^0/) { + return oct($str); + } else { + return $str + 0; + } +} + sub uri_decode { my $str = shift; $str =~ s/%([0-9a-f]{2})/chr(hex($1))/ge; @@ -160,7 +175,7 @@ sub iterate_objects { next if $obj eq ""; if ($obj =~ /^@(\S+)$/) { my $indirect = load_ref($1); - iterate_objects($callback, $arg, $1, $recursion_level + 1); + iterate_objects($callback, $arg, $indirect, $recursion_level + 1); } else { &$callback($arg, $obj); } @@ -168,26 +183,121 @@ sub iterate_objects { } sub obj_callback { - my $verifier = shift; + my $state = shift; my $obj = shift; my $data = load_ref($obj); - print " ", $obj, " (size ", length($data), ")\n"; - verifier_add_bytes($verifier, $data); + print FILE $data + or die "Error writing file data: $!"; + verifier_add_bytes($state->{VERIFIER}, $data); + $state->{BYTES} += length($data); +} + +# Extract the contents of a regular file by concatenating all the objects that +# comprise it. +sub unpack_file { + my $name = shift; + my %info = @_; + my %state = (); + + if (!defined $info{data}) { + die "File contents not specified for $name"; + } + if (!defined $info{checksum} || !defined $info{size}) { + die "File $name is missing checksum or size"; + } + + $info{size} = parse_int($info{size}); + + # Open the file to be recreated. The data will be written out by the call + # to iterate_objects. + open FILE, ">", "$DEST_DIR/$name" + or die "Cannot write file $name: $!"; + + # Set up state so that we can incrementally compute the checksum and length + # of the reconstructed data. Then iterate over all objects in the file. + $state{VERIFIER} = verifier_create($info{checksum}); + $state{BYTES} = 0; + iterate_objects(\&obj_callback, \%state, $info{data}); + + close FILE; + + # Verify that the reconstructed object matches the size/checksum we were + # given. + if (!verifier_check($state{VERIFIER}) || $state{BYTES} != $info{size}) { + die "File reconstruction failed for $name: size or checksum differs"; + } } sub process_file { my %info = @_; - # TODO - print "process_file: ", uri_decode($info{name}), "\n"; + if (!defined($info{name})) { + die "Filename not specified in metadata block"; + } - if (defined $info{data}) { - my $verifier = verifier_create($info{checksum}); + my $type = $info{type}; + + my $filename = uri_decode($info{name}); + print "$filename\n" if $VERBOSE; + + # Restore the specified file. How to do so depends upon the file type, so + # dispatch based on that. + my $dest = "$DEST_DIR/$filename"; + if ($type eq '-') { + # Regular file + unpack_file($filename, %info); + } elsif ($type eq 'd') { + # Directory + if ($filename ne '.') { + mkdir $dest or die "Cannot create directory $filename: $!"; + } + } elsif ($type eq 'l') { + # Symlink + if (!defined($info{contents})) { + die "Symlink $filename has no value specified"; + } + my $contents = uri_decode($info{contents}); + symlink $contents, $dest + or die "Cannot create symlink $filename: $!"; + + # TODO: We can't properly restore all metadata for symbolic links + # (attempts to do so below will change metadata for the pointed-to + # file). This should be later fixed, but for now we simply return + # before getting to the restore metadata step below. + return; + } elsif ($type eq 'p' || $type eq 's' || $type eq 'c' || $type eq 'b') { + # Pipe, socket, character device, block device. + # TODO: Handle these cases. + print STDERR "Ignoring special file $filename of type $type\n"; + return; + } else { + die "Unknown file type '$type' for file $filename"; + } - iterate_objects(\&obj_callback, $verifier, $info{data}); + # Restore mode, ownership, and any other metadata for the file. This is + # split out from the code above since the code is the same regardless of + # file type. + my $mtime = $info{mtime} || time(); + utime time(), $mtime, $dest + or warn "Unable to update mtime for $dest"; + + my $uid = -1; + my $gid = -1; + if (defined $info{user}) { + my @items = split /\s/, $info{user}; + $uid = parse_int($items[0]) if exists $items[0]; + } + if (defined $info{group}) { + my @items = split /\s/, $info{group}; + $gid = parse_int($items[0]) if exists $items[0]; + } + chown $uid, $gid, $dest + or warn "Unable to change ownership for $dest"; - print " checksum: ", (verifier_check($verifier) ? "pass" : "fail"), - " ", $info{checksum}, "\n"; + if (defined $info{mode}) { + my $mode = parse_int($info{mode}); + chmod $mode, $dest + or warn "Unable to change permissions for $dest"; } } @@ -216,26 +326,33 @@ sub process_metadata { # metadata objects. my %info = (); my $line; + my $last_key; foreach $line (split /\n/, $metadata) { # If we find a blank line or a reference to another block, process any # data for the previous file first. if ($line eq '' || $line =~ m/^@/) { process_file(%info) if %info; %info = (); + undef $last_key; next if $line eq ''; } # Recursively handle indirect metadata blocks. if ($line =~ m/^@(\S+)$/) { - print "Indirect: $1\n"; + print "Indirect: $1\n" if $VERBOSE; my $indirect = load_ref($1); process_metadata($indirect, $recursion_level + 1); next; } - # Try to parse the data as "key: value" pairs of file metadata. - if ($line =~ m/^(\w+):\s+(.*)\s*$/) { + # Try to parse the data as "key: value" pairs of file metadata. Also + # handle continuation lines, which start with whitespace and continue + # the previous "key: value" pair. + if ($line =~ m/^(\w+):\s*(.*)$/) { $info{$1} = $2; + $last_key = $1; + } elsif ($line =~/^\s/ && defined $last_key) { + $info{$last_key} .= $line; } else { print STDERR "Junk in file metadata section: $line\n"; } @@ -251,25 +368,62 @@ sub process_metadata { # the root object in the snapshot, from which we can reach all other data we # need. +# Parse command-line arguments. The first (required) is the name of the +# snapshot descriptor file. The backup objects are assumed to be stored in the +# same directory as the descriptor. The second (optional) argument is the +# directory where the restored files should be written; it defaults to "."; my $descriptor = $ARGV[0]; unless (defined($descriptor) && -r $descriptor) { print STDERR "Usage: $0 \n"; exit 1; } +if (defined($ARGV[1])) { + $DEST_DIR = $ARGV[1]; +} + $OBJECT_DIR = dirname($descriptor); -print "Source directory: $OBJECT_DIR\n"; +print "Source directory: $OBJECT_DIR\n" if $VERBOSE; +# Read the snapshot descriptor to find the root object. Parse it to get a set +# of key/value pairs. open DESCRIPTOR, "<", $descriptor or die "Cannot open backup descriptor file $descriptor: $!"; -my $line = ; -if ($line !~ m/^root: (\S+)$/) { - die "Expected 'root:' specification in backup descriptor file"; +my %descriptor = (); +my ($line, $last_key); +while (defined($line = )) { + # Any lines of the form "key: value" should be inserted into the + # %descriptor dictionary. Any continuation line (a line starting with + # whitespace) will append text to the previous key's value. Ignore other + # lines. + chomp $line; + + if ($line =~ m/^(\w+):\s*(.*)$/) { + $descriptor{$1} = $2; + $last_key = $1; + } elsif ($line =~/^\s/ && defined $last_key) { + $descriptor{$last_key} .= $line; + } else { + undef $last_key; + print STDERR "Ignoring line in backup descriptor: $line\n"; + } +} + +# A valid backup descriptor should at the very least specify the root metadata +# object. +if (!exists $descriptor{Root}) { + die "Expected 'Root:' specification in backup descriptor file"; } -my $root = $1; +my $root = $descriptor{Root}; close DESCRIPTOR; -print "Root object: $root\n"; +# Set the umask to something restrictive. As we unpack files, we'll originally +# write the files/directories without setting the permissions, so be +# conservative and ensure that they can't be read. Afterwards, we'll properly +# fix up permissions. +umask 077; +# Start processing metadata stored in the root to recreate the files. +print "Root object: $root\n" if $VERBOSE; my $contents = load_ref($root); process_metadata($contents);