--- /dev/null
+These runs are similar to the later 20110226/ runs, but using the older
+disk (80 GB and slower) in the proxy.
+
+One major difference is that all OPS targets use the same working set
+size for the files, rather than having it scale up with the load. Total
+there is ~10 GB of data, but the working set is around 3.0-3.5 GB.
--- /dev/null
+set xlabel "Requests Operations per Second"
+set ylabel "Achieved Operations per Second"
+
+set term wxt 0
+plot "sfssum.20110214-native" with linespoints title "Linux knfsd", \
+ "sfssum.20110215-bluesky-512M" with linespoints title "BlueSky (512 MB Cache)", \
+ "sfssum.20110214-bluesky-4G" with linespoints title "BlueSky (4 GB Cache)"
+
+set term wxt 1
+set ylabel "Latency (ms)"
+plot "sfssum.20110214-native" using 1:3 with linespoints title "Linux knfsd", \
+ "sfssum.20110215-bluesky-512M" using 1:3 with linespoints title "BlueSky (512 MB Cache)", \
+ "sfssum.20110214-bluesky-4G" using 1:3 with linespoints title "BlueSky (4 GB Cache)"
--- /dev/null
+set xlabel "Requests Operations per Second"
+set ylabel "Achieved Operations per Second"
+
+set term wxt 0
+plot "sfssum.20110225-native" with linespoints title "Linux knfsd", \
+ "sfssum.20110226-s3-bigcache" with linespoints title "BlueSky (64 GB Cache)", \
+ "sfssum.20110226-s3-smallcache" with linespoints title "BlueSky (1 GB Cache)"
+
+set term wxt 1
+set ylabel "Latency (ms)"
+plot "sfssum.20110225-native" using 1:3 with linespoints title "Linux knfsd", \
+ "sfssum.20110226-s3-bigcache" using 1:3 with linespoints title "BlueSky (64 GB Cache)", \
+ "sfssum.20110226-s3-smallcache" using 1:3 with linespoints title "BlueSky (1 GB Cache)"
+
+set term wxt 2
+set ylabel "Working Set Size (GB)"
+plot "sfssum.20110225-native" using 1:($9*0.3/1024.0**2) with linespoints notitle
--- /dev/null
+plot "profile.times" using 0:2 with linespoints notitle