--- /dev/null
+#!/usr/bin/perl -w\r
+#\r
+# Copyright (c) 2002-2003\r
+# The President and Fellows of Harvard College.\r
+#\r
+# Redistribution and use in source and binary forms, with or without\r
+# modification, are permitted provided that the following conditions\r
+# are met:\r
+# 1. Redistributions of source code must retain the above copyright\r
+# notice, this list of conditions and the following disclaimer.\r
+# 2. Redistributions in binary form must reproduce the above copyright\r
+# notice, this list of conditions and the following disclaimer in the\r
+# documentation and/or other materials provided with the distribution.\r
+# 3. Neither the name of the University nor the names of its contributors\r
+# may be used to endorse or promote products derived from this software\r
+# without specific prior written permission.\r
+#\r
+# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND\r
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+# ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE\r
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\r
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\r
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\r
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\r
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\r
+# SUCH DAMAGE.\r
+#\r
+# $Id: nfsscan,v 1.18 2003/07/28 14:27:16 ellard Exp $\r
+\r
+$ProgDir = $0;\r
+$ProgDir =~ /(^.*)\//;\r
+$ProgDir = $1;\r
+if (!$ProgDir) {\r
+ $ProgDir = ".";\r
+}\r
+\r
+require "$ProgDir/nfsdump.pl";\r
+require "$ProgDir/userUtils.pl";\r
+require "$ProgDir/hier.pl";\r
+require "$ProgDir/counts.pl";\r
+require "$ProgDir/latency.pl";\r
+require "$ProgDir/key.pl";\r
+require "$ProgDir/common.pl";\r
+\r
+use Getopt::Std;\r
+\r
+$INTERVAL = 5 * 60; # in seconds (5 minutes)\r
+\r
+%KeysSeen = ();\r
+\r
+@ADD_USERS = ();\r
+@DEL_USERS = ();\r
+@ADD_GROUPS = ();\r
+@DEL_GROUPS = ();\r
+@ADD_CLIENTS = ();\r
+@DEL_CLIENTS = ();\r
+\r
+$DO_COUNTS = 1;\r
+$DO_LATENCY = 0;\r
+$DO_FILES = 0;\r
+$DO_PATHS = 0;\r
+$DO_SQUEEZE = 0;\r
+\r
+$FH_TYPE = 'unknown';\r
+\r
+$END_TIME = -1;\r
+$START_TIME = -1;\r
+$NOW = -1;\r
+$UseClient = 0;\r
+$UseFH = 0;\r
+$UseUID = 0;\r
+$UseGID = 0;\r
+$OMIT_ZEROS = 0;\r
+\r
+$OutFileBaseName = undef;\r
+\r
+$nextPruneTime = -1;\r
+$PRUNE_INTERVAL = 1 * 60; # One minute.\r
+\r
+# &&&\r
+# Is this really the right default set of operations?\r
+\r
+$DEF_OPLIST = 'read,write,lookup,getattr,access,create,remove';\r
+@OPLIST = ('TOTAL', 'INTERESTING', \r
+ split (/,/, $DEF_OPLIST));\r
+%OPARRAY = ();\r
+\r
+$Usage =<< ".";\r
+\r
+Usage: $0 [options] [trace1 [trace2 ...]]\r
+\r
+If no trace files are specified, then the trace is read from stdin.\r
+\r
+Command line options:\r
+\r
+-h Print usage message and exit.\r
+\r
+-B [CFUG] Compute per-Client, per-File, per-User, or per-Group info.\r
+\r
+-c c1[,c2]* Include only activity performed by the specified clients.\r
+\r
+-C c1[,c2]* Exclude activity performed by the specified clients.\r
+\r
+-d Compute per-directory statistics. This implicitly\r
+ enables -BF so that per-file info is computed.\r
+\r
+-f Do file info tracking. This implicitly enables -BF so\r
+ that per-File info is computed.\r
+\r
+-F fhtype Specify the file handle type used by the server.\r
+ (advfs or netapp)\r
+\r
+-g g1[,g2]* Include only activity performed by the specified groups.\r
+\r
+-G g1[,g2]* Exclude activity performed by the specified groups.\r
+\r
+-l Record average operation latency.\r
+\r
+-o basename Write output to files starting with the specified\r
+ basename. The "Count" table goes to basename.cnt,\r
+ "Latency" to basename.lat, and "File" to basename.fil.\r
+ The default is to write all output to stdout.\r
+\r
+-O op[,op]* Specify the list of "interesting" operations.\r
+ The default list is:\r
+\r
+ read,write,lookup,getattr,access,create,remove\r
+\r
+ If the first op starts with +, then the specified list\r
+ of ops is appended to the default list. The special\r
+ pseudo-ops readM and writeM represent the number of\r
+ bytes read and written, expressed in MB.\r
+\r
+-t interval Time interval for cummulative statistics (such as\r
+ operation count). The default is $INTERVAL seconds. \r
+ If set to 0, then the entire trace is processed. By\r
+ default, time is specified in seconds, but if the last\r
+ character of the interval is any of s, m, h, or d,\r
+ then the interval is interpreted as seconds, minutes,\r
+ hours, or days.\r
+\r
+-u u1[,u2]* Include only activity performed by the specified users.\r
+\r
+-U u1[,u2]* Exclude activity performed by the specified users.\r
+\r
+-Z Omit count and latency lines that have a zero total\r
+ operation count.\r
+.\r
+\r
+\r
+main ();\r
+\r
+sub main {\r
+\r
+ parseArgs ();\r
+\r
+ if ($DO_COUNTS) {\r
+ counts::printTitle (*OUT_COUNTS);\r
+ }\r
+\r
+ if ($DO_LATENCY) {\r
+ latency::printTitle (*OUT_LATENCY);\r
+ }\r
+\r
+ counts::resetOpCounts ();\r
+\r
+ my $cmdbuf = 'rm -f noattrdirdiscard noattrdir-root';\r
+ system($cmdbuf);\r
+\r
+ readTrace ();\r
+}\r
+\r
+sub parseArgs {\r
+\r
+ my $cmdline = "$0 " . join (' ', @ARGV);\r
+\r
+ my $Options = "B:dfF:g:G:hlO:o:t:u:U:SR:Z";\r
+ if (! getopts ($Options)) {\r
+ print STDERR "$0: Incorrect usage.\n";\r
+ print STDERR $Usage;\r
+ exit (1);\r
+ }\r
+ if (defined $opt_h) {\r
+ print $Usage;\r
+ exit (0);\r
+ }\r
+\r
+ #RFS: neednot input arguments\r
+ $opt_o = "test";\r
+ $opt_f = 1;\r
+ $opt_t = 0;\r
+ #$opt_F = 'RFSNN'; # advfs or netapp\r
+\r
+ if (defined $opt_B) {\r
+ $UseClient = ($opt_B =~ /C/);\r
+ $UseFH = ($opt_B =~ /F/);\r
+ $UseUID = ($opt_B =~ /U/);\r
+ $UseGID = ($opt_B =~ /G/);\r
+ }\r
+\r
+ if (defined $opt_o) {\r
+ $OutFileBaseName = $opt_o;\r
+ }\r
+\r
+ if (defined $opt_O) {\r
+ if ($opt_O =~ /^\+(.*)/) {\r
+ @OPLIST = (@OPLIST, split (/,/, $1));\r
+ }\r
+ else {\r
+ @OPLIST = ('TOTAL', 'INTERESTING', split (/,/, $opt_O));\r
+ }\r
+ # Error checking?\r
+ }\r
+\r
+ if (defined $opt_l) {\r
+ $DO_LATENCY = 1;\r
+ }\r
+\r
+ if (defined $opt_t) {\r
+ if ($INTERVAL =~ /([0-9]*)([smhd])/) {\r
+ my $n = $1;\r
+ my $unit = $2;\r
+\r
+ if ($unit eq 's') {\r
+ $INTERVAL = $opt_t;\r
+ }\r
+ elsif ($unit eq 'm') {\r
+ $INTERVAL = $opt_t * 60;\r
+ }\r
+ elsif ($unit eq 'h') {\r
+ $INTERVAL = $opt_t * 60 * 60;\r
+ }\r
+ elsif ($unit eq 'd') {\r
+ $INTERVAL = $opt_t * 24 * 60 * 60;\r
+ }\r
+ }\r
+ else {\r
+ $INTERVAL = $opt_t;\r
+ }\r
+ }\r
+\r
+ $DO_PATHS = (defined $opt_d);\r
+ $DO_FILES = (defined $opt_f);\r
+ $DO_SQUEEZE = (defined $opt_S);\r
+ $OMIT_ZEROS = (defined $opt_Z);\r
+\r
+ $TIME_ROUNDING = (defined $opt_R) ? $opt_R : 0;\r
+\r
+ if (defined $opt_F) {\r
+ $FH_TYPE = $opt_F;\r
+ }\r
+\r
+ if (defined $opt_c) {\r
+ @ADD_CLIENTS = split (/,/, $opt_c);\r
+ }\r
+ if (defined $opt_C) {\r
+ @DEL_CLIENTS = split (/,/, $opt_c);\r
+ }\r
+\r
+ if (defined $opt_g) {\r
+ @ADD_GROUPS = groups2gids (split (/,/, $opt_g));\r
+ }\r
+ if (defined $opt_G) {\r
+ @DEL_GROUPS = groups2gids (split (/,/, $opt_G));\r
+ }\r
+\r
+ if (defined $opt_u) {\r
+ @ADD_USERS = logins2uids (split (/,/, $opt_u));\r
+ }\r
+ if (defined $opt_U) {\r
+ @DEL_USERS = logins2uids (split (/,/, $opt_U));\r
+ }\r
+\r
+\r
+ # Now that we know what options the user asked for, initialize\r
+ # things accordingly.\r
+\r
+ if ($DO_PATHS || $DO_FILES) {\r
+ $UseFH = 1;\r
+ }\r
+\r
+ if ($DO_LATENCY) {\r
+ latency::init (@OPLIST);\r
+ }\r
+\r
+ if ($DO_COUNTS) {\r
+ counts::init (@OPLIST);\r
+ }\r
+\r
+ if (defined $OutFileBaseName) {\r
+ if ($DO_COUNTS) {\r
+ open (OUT_COUNTS, ">$OutFileBaseName.cnt") ||\r
+ die "Can't create $OutFileBaseName.cnt.";\r
+ print OUT_COUNTS "#cmdline $cmdline\n";\r
+ }\r
+ if ($DO_LATENCY) {\r
+ open (OUT_LATENCY, ">$OutFileBaseName.lat") ||\r
+ die "Can't create $OutFileBaseName.lat.";\r
+ print OUT_LATENCY "#cmdline $cmdline\n";\r
+ }\r
+ if ($DO_FILES) {\r
+ open (OUT_FILES, ">$OutFileBaseName.fil") ||\r
+ die "Can't create $OutFileBaseName.fil.";\r
+ print OUT_FILES "#cmdline $cmdline\n";\r
+ }\r
+ if ($DO_PATHS) {\r
+ open (OUT_PATHS, ">$OutFileBaseName.pat") ||\r
+ die "Can't create $OutFileBaseName.pat.";\r
+ print OUT_PATHS "#cmdline $cmdline\n";\r
+ }\r
+ }\r
+ else {\r
+ *OUT_COUNTS = STDOUT;\r
+ *OUT_LATENCY = STDOUT;\r
+ *OUT_FILES = STDOUT;\r
+ *OUT_PATHS = STDOUT;\r
+\r
+ print STDOUT "#cmdline $cmdline\n";\r
+ }\r
+\r
+ foreach my $op ( @OPLIST ) {\r
+ $OPARRAY{$op} = 1;\r
+ }\r
+\r
+ return ;\r
+}\r
+\r
+sub readTrace {\r
+ my (@args) = @_;\r
+\r
+ while (my $line = <>) {\r
+\r
+ $hier::rfsLineNum++;\r
+ if ( ($hier::rfsLineNum % 1000) eq 0) {\r
+ print STDERR "$hier::rfsLineNum\n";\r
+ }\r
+\r
+\r
+ if ($line =~ /SHORT\ PACKET/) {\r
+ next;\r
+ }\r
+\r
+ my ($proto, $op, $xid, $client, $now, $response) =\r
+ nfsd::nfsDumpParseLineHeader ($line);\r
+ $NOW = $now;\r
+\r
+ # NOTE: This next bit of logic requires a little\r
+ # extra attention. We want to discard lines as\r
+ # quickly as we can if they're not "interesting". \r
+ # However, different lines are interesting in\r
+ # different contexts, so the order of the tests and\r
+ # the manner in which they are interspersed with\r
+ # subroutine calls to pluck info from the lines is\r
+ # very important.\r
+\r
+ # Check whether it is a line that we should prune and\r
+ # ignore, because of the filters.\r
+ \r
+ next if (($op eq 'C3' || $op eq 'C2') &&\r
+ ! pruneCall ($line, $client));\r
+\r
+ if ($DO_PATHS || $DO_FILES) {\r
+ hier::processLine ($line,\r
+ $proto, $op, $xid, $client,\r
+ $now, $response, $FH_TYPE);\r
+ }\r
+\r
+ my $key = key::makeKey ($line, $proto, $op,\r
+ $xid, $client, $now,\r
+ $UseClient, $UseFH, $UseUID, $UseGID,\r
+ $FH_TYPE);\r
+ if (! defined $key) {\r
+ next ;\r
+ }\r
+ $KeysSeen{$key} = 1;\r
+\r
+ # Count everything towards the total, but only\r
+ # do the rest of the processing for things\r
+ # that are "interesting".\r
+\r
+ if ($proto eq 'C3' || $proto eq 'C2') {\r
+ $counts::OpCounts{"$key,TOTAL"}++;\r
+ $counts::KeysSeen{$key} = 1;\r
+\r
+ next if (! exists $OPARRAY{$op});\r
+\r
+ $counts::OpCounts{"$key,$op"}++;\r
+ $counts::OpCounts{"$key,INTERESTING"}++;\r
+ }\r
+\r
+ if ($op eq 'read' && exists $OPARRAY{'readM'}) {\r
+ doReadSize ($line, $proto, $op, $key, $client, $xid, $response, $now);\r
+ }\r
+\r
+ if ($op eq 'write' && exists $OPARRAY{'writeM'}) {\r
+ doWriteSize ($line, $proto, $op, $key, $client, $xid, $response, $now);\r
+ }\r
+\r
+ if ($DO_LATENCY) {\r
+ latency::update ($key, $proto, $op,\r
+ $xid, $client, $now);\r
+ }\r
+\r
+ if ($END_TIME < 0) {\r
+ $START_TIME = findStartTime ($NOW, $TIME_ROUNDING);\r
+ $END_TIME = $START_TIME + $INTERVAL;\r
+ }\r
+\r
+ # Note that this is a loop, because if the interval is\r
+ # short enough, or the system is very idle (or there's\r
+ # a filter in place that makes it look idle), entire\r
+ # intervals can go by without anything happening at\r
+ # all. Some tools can get confused if intervals are\r
+ # missing from the table, so we emit them anyway.\r
+\r
+ while (($INTERVAL > 0) && ($NOW >= $END_TIME)) {\r
+ printAll ($START_TIME);\r
+\r
+ counts::resetOpCounts ();\r
+ latency::resetOpCounts ();\r
+\r
+ $START_TIME += $INTERVAL;\r
+ $END_TIME = $START_TIME + $INTERVAL;\r
+ }\r
+\r
+ if ($now > $nextPruneTime) {\r
+ key::prunePending ($now - $PRUNE_INTERVAL);\r
+ latency::prunePending ($now - $PRUNE_INTERVAL);\r
+\r
+ prunePending ($now - $PRUNE_INTERVAL);\r
+\r
+ $nextPruneTime = $now + $PRUNE_INTERVAL;\r
+ }\r
+ }\r
+\r
+ # Squeeze out the last little bit, if there's anything that we\r
+ # counted but did not emit. If DO_SQUEEZE is true, then\r
+ # always do this. Otherwise, only squeeze out the results of\r
+ # the last interval if the interval is "almost" complete (ie\r
+ # within 10 seconds of the end).\r
+\r
+ if (($NOW > $START_TIME) && ($DO_SQUEEZE || (($END_TIME - $NOW) < 10))) {\r
+ printAll ($START_TIME);\r
+ counts::resetOpCounts ();\r
+ }\r
+\r
+ print "#T endtime = $NOW\n";\r
+\r
+}\r
+\r
+sub printAll {\r
+ my ($start_time) = @_;\r
+\r
+ if ($DO_COUNTS) {\r
+ counts::printOps ($start_time, *OUT_COUNTS);\r
+ }\r
+\r
+ if ($DO_LATENCY) {\r
+ latency::printOps ($start_time, *OUT_LATENCY);\r
+ }\r
+\r
+ if ($DO_FILES) {\r
+ hier::printAll ($start_time, *OUT_FILES);\r
+ }\r
+\r
+ if ($DO_PATHS) {\r
+ printPaths ($start_time, *OUT_PATHS);\r
+ }\r
+}\r
+\r
+sub pruneCall {\r
+ my ($line, $client) = @_;\r
+\r
+ if (@ADD_USERS > 0 || @DEL_USERS > 0) {\r
+ my $c_uid = nfsd::nfsDumpParseLineField ($line, 'euid');\r
+ if (! defined ($c_uid)) {\r
+ return 0;\r
+ }\r
+ $c_uid = hex ($c_uid);\r
+\r
+ if (@ADD_USERS && !grep (/^$c_uid$/, @ADD_USERS)) {\r
+ return 0;\r
+ }\r
+ if (@DEL_USERS && grep (/^$c_uid$/, @DEL_USERS)) {\r
+ return 0;\r
+ }\r
+ }\r
+\r
+ if (@ADD_GROUPS > 0 || @DEL_GROUPS > 0) {\r
+ my $c_gid = nfsd::nfsDumpParseLineField ($line, 'egid');\r
+ if (! defined ($c_gid)) {\r
+ return 0;\r
+ }\r
+ $c_gid = hex ($c_gid);\r
+\r
+ if (@ADD_GROUPS && !grep (/^$c_gid$/, @ADD_GROUPS)) {\r
+ return 0;\r
+ }\r
+ if (@DEL_GROUPS && grep (/^$c_gid$/, @DEL_GROUPS)) {\r
+ return 0;\r
+ }\r
+ }\r
+\r
+ if (@ADD_CLIENTS > 0 || @DEL_CLIENTS > 0) {\r
+ if (@ADD_CLIENTS && !grep (/^$client$/, @ADD_CLIENTS)) {\r
+ return 0;\r
+ }\r
+ if (@DEL_CLIENTS && grep (/^$client$/, @DEL_CLIENTS)) {\r
+ return 0;\r
+ }\r
+ }\r
+\r
+ return 1;\r
+}\r
+\r
+%PathOpCounts = ();\r
+%PathsSeen = ();\r
+\r
+sub buildDirPath {\r
+ my ($fh, $key) = @_;\r
+ my $pfh;\r
+ my $cnt;\r
+\r
+ foreach my $op ( @OPLIST ) {\r
+ if (exists $counts::OpCounts{"$key,$op"}) {\r
+ $cnt = $counts::OpCounts{"$key,$op"};\r
+ }\r
+ else {\r
+ $cnt = 0;\r
+ }\r
+ $PathOpCounts{"$fh,$op"} = $cnt;\r
+\r
+ $PathsSeen{$fh} = 1;\r
+\r
+ $pfh = $fh;\r
+ my $len = 0;\r
+ while (defined ($pfh = $hier::fh2Parent{$pfh})) {\r
+\r
+ if ($len++ > 20) {\r
+ print "Really long path ($fh)\n";\r
+ last;\r
+ }\r
+\r
+ if (exists $PathOpCounts{"$pfh,$op"}) {\r
+ $PathOpCounts{"$pfh,$op"} += $cnt;\r
+ }\r
+ else {\r
+ $PathOpCounts{"$pfh,$op"} = $cnt;\r
+ }\r
+ $PathsSeen{$pfh} = 1;\r
+ }\r
+ }\r
+\r
+ return ;\r
+}\r
+\r
+sub printPaths {\r
+ my ($start_time, $out) = @_;\r
+\r
+ my $str = "#D time Dir/File dircnt path fh";\r
+ foreach my $op ( @OPLIST ) {\r
+ $str .= " $op";\r
+ }\r
+ $str .= "\n";\r
+\r
+ print $out $str;\r
+\r
+ undef %PathsSeen;\r
+\r
+ foreach my $key ( keys %KeysSeen ) {\r
+ my ($client_id, $fh, $euid, $egid) = split (/,/, $key);\r
+\r
+ buildDirPath ($fh, $key);\r
+ }\r
+\r
+ foreach my $fh ( keys %PathsSeen ) {\r
+ my ($path, $cnt) = hier::findPath ($fh);\r
+\r
+ if ($cnt == 0) {\r
+ $path = ".";\r
+ }\r
+\r
+ my $type = (exists $hier::fhIsDir{$fh} && $hier::fhIsDir{$fh}==2) ? 'D' : 'F';\r
+\r
+ my $str = "$cnt $type $path $fh ";\r
+\r
+ foreach my $op ( @OPLIST ) {\r
+ my $cnt;\r
+\r
+ if (exists $PathOpCounts{"$fh,$op"}) {\r
+ $cnt = $PathOpCounts{"$fh,$op"};\r
+ }\r
+ else {\r
+ print "Missing $fh $op\n";\r
+ $cnt = 0;\r
+ }\r
+\r
+ $str .= " $cnt";\r
+\r
+ $PathOpCounts{"$fh,$op"} = 0; # &&& reset\r
+ }\r
+\r
+ print $out "D $start_time $str\n";\r
+ }\r
+}\r
+\r
+%uxid2key = ();\r
+%uxid2time = ();\r
+\r
+sub doReadSize {\r
+ my ($line, $proto, $op, $key, $client, $xid, $response, $time) = @_;\r
+\r
+ my $uxid = "$client-$xid";\r
+\r
+ if ($proto eq 'C3' || $proto eq 'C2') {\r
+ $uxid2time{$uxid} = $time;\r
+ $uxid2key{$uxid} = $key;\r
+ }\r
+ else {\r
+ if (! exists $uxid2key{$uxid}) {\r
+ return ;\r
+ }\r
+ if ($response ne 'OK') {\r
+ return ;\r
+ }\r
+\r
+ $key = $uxid2key{$uxid};\r
+ my $count = nfsd::nfsDumpParseLineField ($line, 'count');\r
+ $count = hex ($count);\r
+\r
+ delete $uxid2key{$uxid};\r
+ delete $uxid2time{$uxid};\r
+\r
+ $counts::OpCounts{"$key,readM"} += $count;\r
+ }\r
+}\r
+\r
+# Note that we always just assume that writes succeed, because on most\r
+# systems they virtually always do. If you're tracing a system where\r
+# your users are constantly filling up the disk or exceeding their\r
+# quotas, then you will need to fix this.\r
+\r
+sub doWriteSize {\r
+ my ($line, $proto, $op, $key, $client, $xid, $response, $time) = @_;\r
+\r
+ if ($proto eq 'C3' || $proto eq 'C2') {\r
+\r
+ my $tag = ($proto eq 'C3') ? 'count' : 'tcount';\r
+\r
+ my $count = nfsd::nfsDumpParseLineField ($line, $tag);\r
+\r
+ if (! $count) {\r
+ printf "WEIRD count $line\n";\r
+ }\r
+\r
+ $count = hex ($count);\r
+\r
+ $counts::OpCounts{"$key,writeM"} += $count;\r
+ }\r
+}\r
+\r
+\r
+# Purge all the pending XID records dated earlier than $when (which is\r
+# typically at least $PRUNE_INTERVAL seconds ago). This is important\r
+# because otherwise missing XID records can pile up, eating a lot of\r
+# memory. \r
+ \r
+sub prunePending {\r
+ my ($when) = @_;\r
+\r
+ foreach my $uxid ( keys %uxid2time ) {\r
+ if ($uxid2time{$uxid} < $when) {\r
+ delete $uxid2key{$uxid};\r
+ delete $uxid2time{$uxid};\r
+ }\r
+ }\r
+\r
+ return ;\r
+}\r
+\r