Run 2to3 over Python sources.
authorMichael Vrable <vrable@cs.hmc.edu>
Tue, 28 Jan 2014 16:24:56 +0000 (08:24 -0800)
committerMichael Vrable <vrable@cs.hmc.edu>
Sat, 1 Feb 2014 17:18:56 +0000 (09:18 -0800)
13 files changed:
cumulus-sync
python/cumulus/__init__.py
python/cumulus/cmd_util.py
python/cumulus/config.py
python/cumulus/main.py
python/cumulus/metadata.py
python/cumulus/rebuild_database.py
python/cumulus/retention.py
python/cumulus/store/__init__.py
python/cumulus/store/file.py
python/cumulus/store/ftp.py
python/cumulus/store/s3.py
python/cumulus/store/sftp.py

index 19d4aea..4953091 100755 (executable)
@@ -43,14 +43,14 @@ for s in snapshots:
     items_required.add(s)
     d = cumulus.parse_full(source.load_snapshot(s))
     items_required.update(d['Segments'].split())
-print "Required:", len(items_required)
+print("Required:", len(items_required))
 
 files_present = set()
 for filetype in cumulus.SEARCH_PATHS:
     for (name, path) in store2.list_generic(filetype):
         items_required.discard(name)
         files_present.add(path)
-print "Files already present:", len(sorted(files_present))
+print("Files already present:", len(sorted(files_present)))
 
 files_required = []
 items_found = set()
@@ -62,5 +62,5 @@ for filetype in cumulus.SEARCH_PATHS:
 files_required.sort()
 
 for i, f in enumerate(files_required):
-    print "[%d/%d] %s" % (i + 1, len(files_required), f)
+    print("[%d/%d] %s" % (i + 1, len(files_required), f))
     store2.raw_backend.put(f, store1.raw_backend.get(f))
index 02f978e..8dc4c98 100644 (file)
@@ -26,7 +26,7 @@ various parts of a Cumulus archive:
   - reading and maintaining the local object database
 """
 
-from __future__ import division
+
 import hashlib
 import itertools
 import os
@@ -34,7 +34,7 @@ import re
 import sqlite3
 import tarfile
 import tempfile
-import thread
+import _thread
 
 import cumulus.store
 import cumulus.store.file
@@ -264,7 +264,7 @@ class BackendWrapper(object):
 
         store may either be a Store object or URL.
         """
-        if type(backend) in (str, unicode):
+        if type(backend) in (str, str):
             if backend.find(":") >= 0:
                 self._backend = cumulus.store.open(backend)
             else:
@@ -295,10 +295,10 @@ class BackendWrapper(object):
     def prefetch_generic(self):
         """Calls scan on directories to prefetch file metadata."""
         directories = set()
-        for typeinfo in SEARCH_PATHS.values():
+        for typeinfo in list(SEARCH_PATHS.values()):
             directories.update(typeinfo.directories())
         for d in directories:
-            print "Prefetch", d
+            print("Prefetch", d)
             self._backend.scan(d)
 
 class CumulusStore:
@@ -374,7 +374,7 @@ class CumulusStore:
                 dst.write(block)
             src.close()
             dst.close()
-        thread.start_new_thread(copy_thread, (filehandle, input))
+        _thread.start_new_thread(copy_thread, (filehandle, input))
         return output
 
     def get_segment(self, segment):
@@ -481,7 +481,7 @@ def parse(lines, terminate=None):
 
 def parse_full(lines):
     try:
-        return parse(lines).next()
+        return next(parse(lines))
     except StopIteration:
         return {}
 
@@ -566,7 +566,7 @@ class MetadataItem:
     @staticmethod
     def decode_device(s):
         """Decode a device major/minor number."""
-        (major, minor) = map(MetadataItem.decode_int, s.split("/"))
+        (major, minor) = list(map(MetadataItem.decode_int, s.split("/")))
         return (major, minor)
 
     class Items: pass
@@ -578,7 +578,7 @@ class MetadataItem:
         self.object_store = object_store
         self.keys = []
         self.items = self.Items()
-        for (k, v) in fields.items():
+        for (k, v) in list(fields.items()):
             if k in self.field_types:
                 decoder = self.field_types[k]
                 setattr(self.items, k, decoder(v))
@@ -736,7 +736,7 @@ class LocalDatabase:
                 can_delete = True
 
             if can_delete and not first:
-                print "Delete snapshot %d (%s)" % (id, name)
+                print("Delete snapshot %d (%s)" % (id, name))
                 cur.execute("delete from snapshots where snapshotid = ?",
                             (id,))
             first = False
@@ -942,11 +942,11 @@ class LocalDatabase:
         target_size = max(2 * segment_size_estimate,
                           total_bytes / target_buckets)
 
-        print "segment_size:", segment_size_estimate
-        print "distribution:", distribution
-        print "total_bytes:", total_bytes
-        print "target_buckets:", target_buckets
-        print "min, target size:", min_size, target_size
+        print("segment_size:", segment_size_estimate)
+        print("distribution:", distribution)
+        print("total_bytes:", total_bytes)
+        print("target_buckets:", target_buckets)
+        print("min, target size:", min_size, target_size)
 
         # Chosen cutoffs.  Each bucket consists of objects with age greater
         # than one cutoff value, but not greater than the next largest cutoff.
@@ -976,7 +976,7 @@ class LocalDatabase:
             cutoffs.append(-1)
         cutoffs.append(-1)
 
-        print "cutoffs:", cutoffs
+        print("cutoffs:", cutoffs)
 
         # Update the database to assign each object to the appropriate bucket.
         cutoffs.reverse()
index 2e163cf..0827cb0 100644 (file)
@@ -36,7 +36,7 @@ def check_version(format):
 # environment variable.
 def get_passphrase():
     ENV_KEY = 'LBS_GPG_PASSPHRASE'
-    if not os.environ.has_key(ENV_KEY):
+    if ENV_KEY not in os.environ:
         os.environ[ENV_KEY] = getpass.getpass()
 
 def cmd_prune_db(args):
@@ -64,8 +64,8 @@ def cmd_clean(args, clean_threshold=7.0):
     # Expire segments which are poorly-utilized.
     for s in db.get_segment_cleaning_list():
         if s.cleaning_benefit > clean_threshold:
-            print "Cleaning segment %d (benefit %.2f)" % (s.id,
-                                                          s.cleaning_benefit)
+            print("Cleaning segment %d (benefit %.2f)" % (s.id,
+                                                          s.cleaning_benefit))
             db.mark_segment_expired(s)
         else:
             break
@@ -77,7 +77,7 @@ def cmd_list_snapshots(args):
         Syntax: $0 --data=DATADIR list-snapshots
     """
     store = cumulus.CumulusStore(options.store)
-    for s in sorted(store.list_snapshots()): print s
+    for s in sorted(store.list_snapshots()): print(s)
 
 def cmd_list_snapshot_sizes(args):
     """ List size of data needed for each snapshot.
@@ -104,7 +104,7 @@ def cmd_list_snapshot_sizes(args):
             remcount += 1
         size += added - removed
         previous = segments
-        print "%s: %.3f +%.3f -%.3f (+%d/-%d segments)" % (s, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2, addcount, remcount)
+        print("%s: %.3f +%.3f -%.3f (+%d/-%d segments)" % (s, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2, addcount, remcount))
 
 def cmd_garbage_collect(args):
     """ Search for any files which are not needed by any current
@@ -120,7 +120,7 @@ def cmd_garbage_collect(args):
         referenced.add(s)
         referenced.update(d['Segments'].split())
 
-    print referenced
+    print(referenced)
 
     to_delete = []
     to_preserve = []
@@ -131,12 +131,12 @@ def cmd_garbage_collect(args):
             else:
                 to_delete.append(path)
 
-    print to_preserve
-    print to_delete
+    print(to_preserve)
+    print(to_delete)
 
     raw_backend = backend.raw_backend
     for f in to_delete:
-        print "Delete:", f
+        print("Delete:", f)
         if not options.dry_run:
             raw_backend.delete(f)
 cmd_gc = cmd_garbage_collect
@@ -149,8 +149,8 @@ def cmd_read_snapshots(snapshots):
     for s in snapshots:
         d = cumulus.parse_full(store.load_snapshot(s))
         check_version(d['Format'])
-        print d
-        print d['Segments'].split()
+        print(d)
+        print(d['Segments'].split())
     store.cleanup()
 
 def cmd_read_metadata(args):
@@ -179,14 +179,14 @@ def cmd_verify_snapshots(snapshots):
     store = cumulus.CumulusStore(options.store)
     for s in snapshots:
         cumulus.accessed_segments.clear()
-        print "#### Snapshot", s
+        print("#### Snapshot", s)
         d = cumulus.parse_full(store.load_snapshot(s))
         check_version(d['Format'])
-        print "## Root:", d['Root']
+        print("## Root:", d['Root'])
         metadata = cumulus.iterate_metadata(store, d['Root'])
         for m in metadata:
             if m.fields['type'] not in ('-', 'f'): continue
-            print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
+            print("%s [%d bytes]" % (m.fields['name'], int(m.fields['size'])))
             verifier = cumulus.ChecksumVerifier(m.fields['checksum'])
             size = 0
             for block in m.data():
@@ -203,11 +203,11 @@ def cmd_verify_snapshots(snapshots):
         # doesn't contain duplicates.
         listed_segments = set(d['Segments'].split())
         if cumulus.accessed_segments - listed_segments:
-            print "Error: Some segments not listed in descriptor!"
-            print sorted(list(cumulus.accessed_segments - listed_segments))
+            print("Error: Some segments not listed in descriptor!")
+            print(sorted(list(cumulus.accessed_segments - listed_segments)))
         if listed_segments - cumulus.accessed_segments :
-            print "Warning: Extra unused segments listed in descriptor!"
-            print sorted(list(listed_segments - cumulus.accessed_segments))
+            print("Warning: Extra unused segments listed in descriptor!")
+            print(sorted(list(listed_segments - cumulus.accessed_segments)))
     store.cleanup()
 
 def cmd_restore_snapshot(args):
@@ -232,7 +232,7 @@ def cmd_restore_snapshot(args):
         return False
 
     def warn(m, msg):
-        print "Warning: %s: %s" % (m.items.name, msg)
+        print("Warning: %s: %s" % (m.items.name, msg))
 
     # Phase 1: Read the complete metadata log and create directory structure.
     metadata_items = []
@@ -262,16 +262,16 @@ def cmd_restore_snapshot(args):
 
         try:
             if not os.path.isdir(path):
-                print "mkdir:", path
+                print("mkdir:", path)
                 os.makedirs(path)
-        except Exception, e:
+        except Exception as e:
             warn(m, "Error creating directory structure: %s" % (e,))
             continue
 
     # Phase 2: Restore files, ordered by how data is stored in segments.
     def restore_file(pathname, m):
         assert m.items.type in ('-', 'f')
-        print "extract:", pathname
+        print("extract:", pathname)
         destpath = os.path.join(destdir, pathname)
 
         file = open(destpath, 'wb')
@@ -290,13 +290,13 @@ def cmd_restore_snapshot(args):
 
     while metadata_segments:
         (segment, items) = metadata_segments.popitem()
-        print "+ Segment", segment
+        print("+ Segment", segment)
         for pathname in sorted(items):
             if pathname in metadata_paths:
                 restore_file(pathname, metadata_paths[pathname])
                 del metadata_paths[pathname]
 
-    print "+ Remaining files"
+    print("+ Remaining files")
     while metadata_paths:
         (pathname, m) = metadata_paths.popitem()
         restore_file(pathname, m)
@@ -304,7 +304,7 @@ def cmd_restore_snapshot(args):
     # Phase 3: Restore special files (symlinks, devices).
     # Phase 4: Restore directory permissions and modification times.
     for (pathname, m) in reversed(metadata_items):
-        print "permissions:", pathname
+        print("permissions:", pathname)
         destpath = os.path.join(destdir, pathname)
         (path, filename) = os.path.split(destpath)
 
@@ -326,9 +326,9 @@ def cmd_restore_snapshot(args):
                 os.mkfifo(destpath)
             elif m.items.type in ('c', 'b'):
                 if m.items.type == 'c':
-                    mode = 0600 | stat.S_IFCHR
+                    mode = 0o600 | stat.S_IFCHR
                 else:
-                    mode = 0600 | stat.S_IFBLK
+                    mode = 0o600 | stat.S_IFBLK
                 os.mknod(destpath, mode, os.makedev(*m.items.device))
             elif m.items.type == 's':
                 pass        # TODO: Implement
@@ -336,7 +336,7 @@ def cmd_restore_snapshot(args):
                 warn(m, "Unknown type code: " + m.items.type)
                 continue
 
-        except Exception, e:
+        except Exception as e:
             warn(m, "Error restoring: %s" % (e,))
             continue
 
@@ -344,7 +344,7 @@ def cmd_restore_snapshot(args):
             uid = m.items.user[0]
             gid = m.items.group[0]
             os.lchown(destpath, uid, gid)
-        except Exception, e:
+        except Exception as e:
             warn(m, "Error restoring file ownership: %s" % (e,))
 
         if m.items.type == 'l':
@@ -352,12 +352,12 @@ def cmd_restore_snapshot(args):
 
         try:
             os.chmod(destpath, m.items.mode)
-        except Exception, e:
+        except Exception as e:
             warn(m, "Error restoring file permissions: %s" % (e,))
 
         try:
             os.utime(destpath, (time.time(), m.items.mtime))
-        except Exception, e:
+        except Exception as e:
             warn(m, "Error restoring file timestamps: %s" % (e,))
 
     store.cleanup()
@@ -365,7 +365,7 @@ def cmd_restore_snapshot(args):
 def main(argv):
     usage = ["%prog [option]... command [arg]...", "", "Commands:"]
     cmd = method = None
-    for cmd, method in globals().iteritems():
+    for cmd, method in globals().items():
         if cmd.startswith ('cmd_'):
             usage.append(cmd[4:].replace('_', '-') + ':' + method.__doc__)
     parser = OptionParser(usage="\n".join(usage))
@@ -391,6 +391,6 @@ def main(argv):
     if method:
         method (args)
     else:
-        print "Unknown command:", cmd
+        print("Unknown command:", cmd)
         parser.print_usage()
         sys.exit(1)
index cf721bf..32c20da 100644 (file)
@@ -22,7 +22,7 @@ See the Cumulus documentation for a description of the configuration file
 format.
 """
 
-import ConfigParser
+import configparser
 import datetime
 import re
 
@@ -41,7 +41,7 @@ def _build_retention_engine(spec):
     for s in spec.split():
         m = class_re.match(s)
         if not m:
-            print "Invalid retain spec:", s
+            print("Invalid retain spec:", s)
             continue
         period = datetime.timedelta()
         classname = m.group(1)
@@ -58,7 +58,7 @@ def _build_retention_engine(spec):
 class CumulusConfig(object):
     def __init__(self, filename):
         """Parse a Cumulus backup configuration from the specified file."""
-        self._config = ConfigParser.RawConfigParser()
+        self._config = configparser.RawConfigParser()
         self._config.readfp(open(filename))
 
     def get_global(self, key):
index 62f2665..af50e26 100644 (file)
@@ -47,13 +47,13 @@ def prune_backups(backup_config, scheme):
             expired_snapshots.append(snapshot)
     # The most recent snapshot is never removed.
     if expired_snapshots: expired_snapshots.pop()
-    print expired_snapshots
+    print(expired_snapshots)
 
     # TODO: Clean up the expiration part...
     for snapshot in expired_snapshots:
         store.store.delete("snapshot", "snapshot-%s.lbs" % snapshot)
 
-    print "Collecting garbage..."
+    print("Collecting garbage...")
     options = FakeOptions()
     options.store = backup_config.get_global("dest")
     options.dry_run = False
@@ -91,10 +91,10 @@ def prune_localdb(backup_config, scheme, next_snapshot=None):
         retention.consider_snapshot(next_snapshot)
     retained = set(retention.last_snapshots().values())
     retained.add(snapshots[-1])
-    print retention.last_snapshots()
-    print retained
+    print(retention.last_snapshots())
+    print(retained)
     for s in snapshots:
-        print s, s in retained
+        print(s, s in retained)
 
     evicted = [s for s in snapshots if s not in retained]
     for s in evicted:
@@ -105,7 +105,7 @@ def prune_localdb(backup_config, scheme, next_snapshot=None):
 def main(argv):
     backup_config = config.CumulusConfig(argv[1])
     for scheme in backup_config.backup_schemes():
-        print scheme
+        print(scheme)
         #prune_backups(backup_config, scheme)
         prune_localdb(backup_config, scheme, datetime.datetime.utcnow())
         #prune_localdb(backup_config, scheme, datetime.datetime(2013, 1, 1))
index c133b39..ac92ec4 100644 (file)
@@ -45,7 +45,7 @@ class Metadata:
         lines = self._load(ref)[n:]
 
         try:
-            return cumulus.parse(lines, lambda l: len(l) == 0).next()
+            return next(cumulus.parse(lines, lambda l: len(l) == 0))
         except StopIteration:
             return {}
 
@@ -195,6 +195,6 @@ if __name__ == '__main__':
 
     metadata = Metadata(store, root)
     ptr = metadata.search(['home', 'mvrable', 'docs'])
-    print ptr
-    print metadata._read(ptr)
+    print(ptr)
+    print(metadata._read(ptr))
     store.cleanup()
index 10a5f9a..a4af640 100755 (executable)
@@ -113,7 +113,7 @@ class Chunker(object):
     def compute_breaks(self, buf):
         breaks = [0]
         signature = self.window_init()
-        for i in xrange(len(buf)):
+        for i in range(len(buf)):
             self.window_update(signature, ord(buf[i]))
             block_len = i - breaks[-1] + 1
             if ((signature[0] % self.TARGET_CHUNK_SIZE == self.BREAKMARK_VALUE
@@ -163,9 +163,9 @@ class Chunker(object):
                 n -= i
 
         position = 0
-        for next_start, (size, digest) in sorted(signatures.iteritems()):
+        for next_start, (size, digest) in sorted(signatures.items()):
             if next_start < position:
-                print "Warning: overlapping signatures, ignoring"
+                print("Warning: overlapping signatures, ignoring")
                 continue
             skip(next_start - position)
             records.append(struct.pack(">H", size) + digest)
@@ -177,7 +177,7 @@ class Chunker(object):
         """Loads signatures from the binary format stored in the database."""
         entry_size = 2 + self.hash_size
         if len(signatures) % entry_size != 0:
-            print "Warning: Invalid signatures to load"
+            print("Warning: Invalid signatures to load")
             return {}
 
         null_digest = "\x00" * self.hash_size
@@ -250,11 +250,11 @@ class DatabaseRebuilder(object):
             if metadata.items.type not in ("-", "f"): continue
             try:
                 path = os.path.join(reference_path, metadata.items.name)
-                print "Path:", path
+                print("Path:", path)
                 # TODO: Check file size for early abort if different
                 self.rebuild_file(open(path), metadata)
             except IOError as e:
-                print e
+                print(e)
                 pass  # Ignore the file
 
         self.database.commit()
@@ -275,7 +275,7 @@ class DatabaseRebuilder(object):
 
     def insert_segment_info(self, segment, info):
         id = self.segment_to_id(segment)
-        for k, v in info.items():
+        for k, v in list(info.items()):
             self.cursor.execute("update segments set " + k + " = ? "
                                 "where segmentid = ?",
                                 (v, id))
@@ -323,10 +323,10 @@ class DatabaseRebuilder(object):
                 subblock[k] = self.chunker.dump_signatures(subblock[k])
             self.store_checksums(checksums, subblock)
         else:
-            print "Checksum mismatch"
+            print("Checksum mismatch")
 
     def store_checksums(self, block_checksums, subblock_signatures):
-        for (segment, object), (size, checksum) in block_checksums.iteritems():
+        for (segment, object), (size, checksum) in block_checksums.items():
             segmentid = self.segment_to_id(segment)
             self.cursor.execute(
                 """insert or ignore into block_index(segmentid, object)
@@ -440,8 +440,8 @@ if __name__ == "__main__":
                 os.path.relpath(f, topdir))
             if metadata:
                 for (k, v) in sorted(metadata.items()):
-                    print "%s: %s" % (k, v)
-                print
+                    print("%s: %s" % (k, v))
+                print()
         sys.exit(0)
 
     # Sample code to rebuild the segments table from metadata--needs to be
index b0e0078..e447b54 100644 (file)
@@ -132,7 +132,7 @@ class RetentionEngine(object):
 
         self._labels = set()
         retain = False
-        for (backup_class, retention_period) in self._policies.iteritems():
+        for (backup_class, retention_period) in self._policies.items():
             partition = _backup_classes[backup_class](timestamp_policy)
             last_snapshot = self._last_snapshots[backup_class]
             if self._last_snapshots[backup_class][0] != partition:
@@ -153,4 +153,4 @@ class RetentionEngine(object):
     def last_snapshots(self):
         """Returns the most recent snapshot in each backup class."""
         return dict((k, v[1]) for (k, v)
-                    in self._last_snapshots.iteritems() if v[2])
+                    in self._last_snapshots.items() if v[2])
index 7488b2f..e368c57 100644 (file)
@@ -16,7 +16,7 @@
 # with this program; if not, write to the Free Software Foundation, Inc.,
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
-import exceptions, re, urlparse
+import exceptions, re, urllib.parse
 
 type_patterns = {
     'checksums': re.compile(r"^snapshot-(.*)\.(\w+)sums$"),
@@ -39,7 +39,7 @@ class Store (object):
         if cls != Store:
             return super(Store, cls).__new__(cls, url, **kw)
         (scheme, netloc, path, params, query, fragment) \
-            = urlparse.urlparse(url)
+            = urllib.parse.urlparse(url)
 
         try:
             cumulus = __import__('cumulus.store.%s' % scheme, globals())
@@ -53,7 +53,7 @@ class Store (object):
             obj.fragment = fragment
             return obj
         except ImportError:
-            raise NotImplementedError, "Scheme %s not implemented" % scheme
+            raise NotImplementedError("Scheme %s not implemented" % scheme)
 
     def list(self, path):
         raise NotImplementedError
index e2da34a..6d23a58 100644 (file)
@@ -56,6 +56,6 @@ class FileStore(cumulus.store.Store):
             stat = os.stat(os.path.join(self.prefix, path))
             return {'size': stat.st_size}
         except OSError:
-            raise cumulus.store.NotFoundError, path
+            raise cumulus.store.NotFoundError(path)
 
 Store = FileStore
index 95dd14e..ba2e4e7 100644 (file)
@@ -98,16 +98,16 @@ class FtpStore (Store):
             self.ftp.sendcmd ('TYPE I')
             size = self.ftp.size (fn)
             self.ftp.sendcmd ('TYPE A')
-        except all_errors, err:
-            print err
+        except all_errors as err:
+            print(err)
             pass
         if size is not None:
             return {'size': size}
-        print "nlst: %s" % fn, size
+        print("nlst: %s" % fn, size)
         l = self.ftp.nlst (fn)
         if l:
             return {'size': 42}
-        raise NotFoundError(type, name)
+        raise NotFoundError(type, name)
 
     def sync (self):
         """ After a get command at end of transfer a 2XX reply is still
@@ -120,7 +120,7 @@ class FtpStore (Store):
             if not self.synced:
                 self.ftp.voidresp()
             self.ftp.sendcmd ('TYPE A')
-        except error_temp, err :
+        except error_temp as err :
             if not err.message.startswith ('421') :
                 raise
             self.connect ()
index 7d8aaaf..4898583 100644 (file)
@@ -33,7 +33,7 @@ def throw_notfound(method):
             return method(*args, **kwargs)
         except S3ResponseError as e:
             if e.status == 404:
-                print "Got a 404:", e
+                print("Got a 404:", e)
                 raise cumulus.store.NotFoundError(e)
             else:
                 raise
index f781861..bbc4aff 100644 (file)
@@ -17,7 +17,7 @@
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
 #needed for python 2.5
-from __future__ import with_statement
+
 
 from paramiko import Transport, SFTPClient, RSAKey, DSSKey
 from paramiko.config import SSHConfig
@@ -74,18 +74,18 @@ class SFTPStore(Store):
 
         host_keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
         try:
-            self.hostkey = host_keys[self.config['hostkeyalias']].values()[0]
+            self.hostkey = list(host_keys[self.config['hostkeyalias']].values())[0]
         except:
-            print str(self.config)
+            print(str(self.config))
             raise
 
 
-        if(self.config.has_key('identityfile')):
+        if('identityfile' in self.config):
             key_file = os.path.expanduser(self.config['identityfile'])
             #not really nice but i don't see a cleaner way atm...
             try:
                 self.auth_key = RSAKey (key_file)
-            except SSHException, e:
+            except SSHException as e:
                 if e.message == 'Unable to parse file':
                     self.auth_key = DSAKey (key_file)
                 else:
@@ -111,7 +111,7 @@ class SFTPStore(Store):
         return "%s/%s" % (self.path,  name)
 
     def list(self, type):
-        return filter(type_patterns[type].match, self.client.listdir(self.path))
+        return list(filter(type_patterns[type].match, self.client.listdir(self.path)))
 
     def get(self, type, name):
         return self.client.open(self.__build_fn(name), mode = 'rb')