--- /dev/null
+#!/usr/bin/python
+#
+# Utility for managing Cumulus archives.
+
+import getpass, os, stat, sys, time
+from optparse import OptionParser
+import lbs
+
+# We support up to "LBS Snapshot v0.6" formats, but are also limited by the lbs
+# module.
+FORMAT_VERSION = min(lbs.FORMAT_VERSION, (0, 6))
+
+def check_version(format):
+ ver = lbs.parse_metadata_version(format)
+ if ver > FORMAT_VERSION:
+ raise RuntimeError("Unsupported LBS format: " + format)
+
+parser = OptionParser(usage="%prog [option]... command [arg]...")
+parser.add_option("-v", action="store_true", dest="verbose", default=False,
+ help="increase verbosity")
+parser.add_option("--store", dest="store",
+ help="specify path to backup data store")
+parser.add_option("--localdb", dest="localdb",
+ help="specify path to local database")
+parser.add_option("--intent", dest="intent", default=1.0,
+ help="give expected next snapshot type when cleaning")
+(options, args) = parser.parse_args(sys.argv[1:])
+
+# Read a passphrase from the user and store it in the LBS_GPG_PASSPHRASE
+# environment variable.
+def get_passphrase():
+ ENV_KEY = 'LBS_GPG_PASSPHRASE'
+ if not os.environ.has_key(ENV_KEY):
+ os.environ[ENV_KEY] = getpass.getpass()
+
+# Delete old snapshots from the local database, though do not actually schedule
+# any segment cleaning.
+# Syntax: $0 --localdb=LOCALDB prune-db
+def cmd_prune_db():
+ db = lbs.LocalDatabase(options.localdb)
+
+ # Delete old snapshots from the local database.
+ #db.garbage_collect()
+ #db.commit()
+
+# Run the segment cleaner.
+# Syntax: $0 --localdb=LOCALDB clean
+def cmd_clean(clean_threshold=7.0):
+ db = lbs.LocalDatabase(options.localdb)
+
+ # Delete old snapshots from the local database.
+ intent = float(options.intent)
+ for s in db.list_schemes():
+ db.garbage_collect(s, intent)
+
+ # Expire segments which are poorly-utilized.
+ for s in db.get_segment_cleaning_list():
+ if s.cleaning_benefit > clean_threshold:
+ print "Cleaning segment %d (benefit %.2f)" % (s.id,
+ s.cleaning_benefit)
+ db.mark_segment_expired(s)
+ else:
+ break
+ db.balance_expired_objects()
+ db.commit()
+
+# List snapshots stored.
+# Syntax: $0 --data=DATADIR list-snapshots
+def cmd_list_snapshots():
+ store = lbs.LowlevelDataStore(options.store)
+ for s in sorted(store.list_snapshots()):
+ print s
+
+# List size of data needed for each snapshot.
+# Syntax: $0 --data=DATADIR list-snapshot-sizes
+def cmd_list_snapshot_sizes():
+ lowlevel = lbs.LowlevelDataStore(options.store)
+ store = lbs.ObjectStore(lowlevel)
+ previous = set()
+ for s in sorted(lowlevel.list_snapshots()):
+ d = lbs.parse_full(store.load_snapshot(s))
+ check_version(d['Format'])
+ segments = d['Segments'].split()
+ (size, added, removed) = (0, 0, 0)
+ for seg in segments:
+ segsize = lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
+ size += segsize
+ if seg not in previous: added += segsize
+ for seg in previous:
+ if seg not in segments:
+ removed += lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
+ previous = set(segments)
+ print "%s: %.3f +%.3f -%.3f" % (s, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2)
+
+# Build checksum list for objects in the given segments, or all segments if
+# none are specified.
+def cmd_object_checksums(segments):
+ get_passphrase()
+ lowlevel = lbs.LowlevelDataStore(options.store)
+ store = lbs.ObjectStore(lowlevel)
+ if len(segments) == 0:
+ segments = sorted(lowlevel.list_segments())
+ for s in segments:
+ for (o, data) in store.load_segment(s):
+ csum = lbs.ChecksumCreator().update(data).compute()
+ print "%s/%s:%d:%s" % (s, o, len(data), csum)
+ store.cleanup()
+
+# Read a snapshot file
+def cmd_read_snapshots(snapshots):
+ get_passphrase()
+ lowlevel = lbs.LowlevelDataStore(options.store)
+ store = lbs.ObjectStore(lowlevel)
+ for s in snapshots:
+ d = lbs.parse_full(store.load_snapshot(s))
+ check_version(d['Format'])
+ print d
+ print d['Segments'].split()
+ store.cleanup()
+
+# Produce a flattened metadata dump from a snapshot
+def cmd_read_metadata(snapshot):
+ get_passphrase()
+ lowlevel = lbs.LowlevelDataStore(options.store)
+ store = lbs.ObjectStore(lowlevel)
+ d = lbs.parse_full(store.load_snapshot(snapshot))
+ check_version(d['Format'])
+ metadata = lbs.read_metadata(store, d['Root'])
+ blank = True
+ for l in metadata:
+ if l == '\n':
+ if blank: continue
+ blank = True
+ else:
+ blank = False
+ sys.stdout.write(l)
+ store.cleanup()
+
+# Verify snapshot integrity
+def cmd_verify_snapshots(snapshots):
+ get_passphrase()
+ lowlevel = lbs.LowlevelDataStore(options.store)
+ store = lbs.ObjectStore(lowlevel)
+ for s in snapshots:
+ lbs.accessed_segments.clear()
+ print "#### Snapshot", s
+ d = lbs.parse_full(store.load_snapshot(s))
+ check_version(d['Format'])
+ print "## Root:", d['Root']
+ metadata = lbs.iterate_metadata(store, d['Root'])
+ for m in metadata:
+ if m.fields['type'] not in ('-', 'f'): continue
+ print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
+ verifier = lbs.ChecksumVerifier(m.fields['checksum'])
+ size = 0
+ for block in m.data():
+ data = store.get(block)
+ verifier.update(data)
+ size += len(data)
+ if int(m.fields['size']) != size:
+ raise ValueError("File size does not match!")
+ if not verifier.valid():
+ raise ValueError("Bad checksum found")
+
+ # Verify that the list of segments included with the snapshot was
+ # actually accurate: covered all segments that were really read, and
+ # doesn't contain duplicates.
+ listed_segments = set(d['Segments'].split())
+ if lbs.accessed_segments - listed_segments:
+ print "Error: Some segments not listed in descriptor!"
+ print sorted(list(lbs.accessed_segments - listed_segments))
+ if listed_segments - lbs.accessed_segments :
+ print "Warning: Extra unused segments listed in descriptor!"
+ print sorted(list(listed_segments - lbs.accessed_segments))
+ store.cleanup()
+
+# Restore a snapshot, or some subset of files from it
+def cmd_restore_snapshot(args):
+ get_passphrase()
+ lowlevel = lbs.LowlevelDataStore(options.store)
+ store = lbs.ObjectStore(lowlevel)
+ snapshot = lbs.parse_full(store.load_snapshot(args[0]))
+ check_version(snapshot['Format'])
+ destdir = args[1]
+ paths = args[2:]
+
+ def matchpath(path):
+ "Return true if the specified path should be included in the restore."
+
+ # No specification of what to restore => restore everything
+ if len(paths) == 0: return True
+
+ for p in paths:
+ if path == p: return True
+ if path.startswith(p + "/"): return True
+ return False
+
+ def warn(m, msg):
+ print "Warning: %s: %s" % (m.items.name, msg)
+
+ # Phase 1: Read the complete metadata log and create directory structure.
+ metadata_items = []
+ metadata_paths = {}
+ metadata_segments = {}
+ for m in lbs.iterate_metadata(store, snapshot['Root']):
+ pathname = os.path.normpath(m.items.name)
+ while os.path.isabs(pathname):
+ pathname = pathname[1:]
+ if not matchpath(pathname): continue
+
+ destpath = os.path.join(destdir, pathname)
+ if m.items.type == 'd':
+ path = destpath
+ else:
+ (path, filename) = os.path.split(destpath)
+
+ metadata_items.append((pathname, m))
+ if m.items.type in ('-', 'f'):
+ metadata_paths[pathname] = m
+ for block in m.data():
+ (segment, object, checksum, slice) \
+ = lbs.ObjectStore.parse_ref(block)
+ if segment not in metadata_segments:
+ metadata_segments[segment] = set()
+ metadata_segments[segment].add(pathname)
+
+ try:
+ if not os.path.isdir(path):
+ print "mkdir:", path
+ os.makedirs(path)
+ except Exception, e:
+ warn(m, "Error creating directory structure: %s" % (e,))
+ continue
+
+ # Phase 2: Restore files, ordered by how data is stored in segments.
+ def restore_file(pathname, m):
+ assert m.items.type in ('-', 'f')
+ print "extract:", pathname
+ destpath = os.path.join(destdir, pathname)
+
+ file = open(destpath, 'wb')
+ verifier = lbs.ChecksumVerifier(m.items.checksum)
+ size = 0
+ for block in m.data():
+ data = store.get(block)
+ verifier.update(data)
+ size += len(data)
+ file.write(data)
+ file.close()
+ if int(m.fields['size']) != size:
+ raise ValueError("File size does not match!")
+ if not verifier.valid():
+ raise ValueError("Bad checksum found")
+
+ while metadata_segments:
+ (segment, items) = metadata_segments.popitem()
+ print "+ Segment", segment
+ for pathname in sorted(items):
+ if pathname in metadata_paths:
+ restore_file(pathname, metadata_paths[pathname])
+ del metadata_paths[pathname]
+
+ print "+ Remaining files"
+ while metadata_paths:
+ (pathname, m) = metadata_paths.popitem()
+ restore_file(pathname, m)
+
+ # Phase 3: Restore special files (symlinks, devices).
+ # Phase 4: Restore directory permissions and modification times.
+ for (pathname, m) in reversed(metadata_items):
+ print "permissions:", pathname
+ destpath = os.path.join(destdir, pathname)
+ (path, filename) = os.path.split(destpath)
+
+ # TODO: Check for ../../../paths that might attempt to write outside
+ # the destination directory. Maybe also check attempts to follow
+ # symlinks pointing outside?
+
+ try:
+ if m.items.type in ('-', 'f', 'd'):
+ pass
+ elif m.items.type == 'l':
+ try:
+ target = m.items.target
+ except:
+ # Old (v0.2 format) name for 'target'
+ target = m.items.contents
+ os.symlink(target, destpath)
+ elif m.items.type == 'p':
+ os.mkfifo(destpath)
+ elif m.items.type in ('c', 'b'):
+ if m.items.type == 'c':
+ mode = 0600 | stat.S_IFCHR
+ else:
+ mode = 0600 | stat.S_IFBLK
+ os.mknod(destpath, mode, os.makedev(*m.items.device))
+ elif m.items.type == 's':
+ pass # TODO: Implement
+ else:
+ warn(m, "Unknown type code: " + m.items.type)
+ continue
+
+ except Exception, e:
+ warn(m, "Error restoring: %s" % (e,))
+ continue
+
+ try:
+ uid = m.items.user[0]
+ gid = m.items.group[0]
+ os.lchown(destpath, uid, gid)
+ except Exception, e:
+ warn(m, "Error restoring file ownership: %s" % (e,))
+
+ if m.items.type == 'l':
+ continue
+
+ try:
+ os.chmod(destpath, m.items.mode)
+ except Exception, e:
+ warn(m, "Error restoring file permissions: %s" % (e,))
+
+ try:
+ os.utime(destpath, (time.time(), m.items.mtime))
+ except Exception, e:
+ warn(m, "Error restoring file timestamps: %s" % (e,))
+
+ store.cleanup()
+
+if len(args) == 0:
+ parser.print_usage()
+ sys.exit(1)
+cmd = args[0]
+args = args[1:]
+if cmd == 'clean':
+ cmd_clean()
+elif cmd == 'prune-db':
+ cmd_prune_db()
+elif cmd == 'list-snapshots':
+ cmd_list_snapshots()
+elif cmd == 'object-sums':
+ cmd_object_checksums(args)
+elif cmd == 'read-snapshots':
+ cmd_read_snapshots(args)
+elif cmd == 'read-metadata':
+ cmd_read_metadata(args[0])
+elif cmd == 'list-snapshot-sizes':
+ cmd_list_snapshot_sizes()
+elif cmd == 'verify-snapshots':
+ cmd_verify_snapshots(args)
+elif cmd == 'restore-snapshot':
+ cmd_restore_snapshot(args)
+else:
+ print "Unknown command:", cmd
+ parser.print_usage()
+ sys.exit(1)
+++ /dev/null
-#!/usr/bin/python
-#
-# Utility for managing LBS archives.
-
-import getpass, os, stat, sys, time
-from optparse import OptionParser
-import lbs
-
-# We support up to "LBS Snapshot v0.6" formats, but are also limited by the lbs
-# module.
-FORMAT_VERSION = min(lbs.FORMAT_VERSION, (0, 6))
-
-def check_version(format):
- ver = lbs.parse_metadata_version(format)
- if ver > FORMAT_VERSION:
- raise RuntimeError("Unsupported LBS format: " + format)
-
-parser = OptionParser(usage="%prog [option]... command [arg]...")
-parser.add_option("-v", action="store_true", dest="verbose", default=False,
- help="increase verbosity")
-parser.add_option("--store", dest="store",
- help="specify path to backup data store")
-parser.add_option("--localdb", dest="localdb",
- help="specify path to local database")
-parser.add_option("--intent", dest="intent", default=1.0,
- help="give expected next snapshot type when cleaning")
-(options, args) = parser.parse_args(sys.argv[1:])
-
-# Read a passphrase from the user and store it in the LBS_GPG_PASSPHRASE
-# environment variable.
-def get_passphrase():
- ENV_KEY = 'LBS_GPG_PASSPHRASE'
- if not os.environ.has_key(ENV_KEY):
- os.environ[ENV_KEY] = getpass.getpass()
-
-# Delete old snapshots from the local database, though do not actually schedule
-# any segment cleaning.
-# Syntax: $0 --localdb=LOCALDB prune-db
-def cmd_prune_db():
- db = lbs.LocalDatabase(options.localdb)
-
- # Delete old snapshots from the local database.
- #db.garbage_collect()
- #db.commit()
-
-# Run the segment cleaner.
-# Syntax: $0 --localdb=LOCALDB clean
-def cmd_clean(clean_threshold=7.0):
- db = lbs.LocalDatabase(options.localdb)
-
- # Delete old snapshots from the local database.
- intent = float(options.intent)
- for s in db.list_schemes():
- db.garbage_collect(s, intent)
-
- # Expire segments which are poorly-utilized.
- for s in db.get_segment_cleaning_list():
- if s.cleaning_benefit > clean_threshold:
- print "Cleaning segment %d (benefit %.2f)" % (s.id,
- s.cleaning_benefit)
- db.mark_segment_expired(s)
- else:
- break
- db.balance_expired_objects()
- db.commit()
-
-# List snapshots stored.
-# Syntax: $0 --data=DATADIR list-snapshots
-def cmd_list_snapshots():
- store = lbs.LowlevelDataStore(options.store)
- for s in sorted(store.list_snapshots()):
- print s
-
-# List size of data needed for each snapshot.
-# Syntax: $0 --data=DATADIR list-snapshot-sizes
-def cmd_list_snapshot_sizes():
- lowlevel = lbs.LowlevelDataStore(options.store)
- store = lbs.ObjectStore(lowlevel)
- previous = set()
- for s in sorted(lowlevel.list_snapshots()):
- d = lbs.parse_full(store.load_snapshot(s))
- check_version(d['Format'])
- segments = d['Segments'].split()
- (size, added, removed) = (0, 0, 0)
- for seg in segments:
- segsize = lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
- size += segsize
- if seg not in previous: added += segsize
- for seg in previous:
- if seg not in segments:
- removed += lowlevel.lowlevel_stat(seg + ".tar.gpg")['size']
- previous = set(segments)
- print "%s: %.3f +%.3f -%.3f" % (s, size / 1024.0**2, added / 1024.0**2, removed / 1024.0**2)
-
-# Build checksum list for objects in the given segments, or all segments if
-# none are specified.
-def cmd_object_checksums(segments):
- get_passphrase()
- lowlevel = lbs.LowlevelDataStore(options.store)
- store = lbs.ObjectStore(lowlevel)
- if len(segments) == 0:
- segments = sorted(lowlevel.list_segments())
- for s in segments:
- for (o, data) in store.load_segment(s):
- csum = lbs.ChecksumCreator().update(data).compute()
- print "%s/%s:%d:%s" % (s, o, len(data), csum)
- store.cleanup()
-
-# Read a snapshot file
-def cmd_read_snapshots(snapshots):
- get_passphrase()
- lowlevel = lbs.LowlevelDataStore(options.store)
- store = lbs.ObjectStore(lowlevel)
- for s in snapshots:
- d = lbs.parse_full(store.load_snapshot(s))
- check_version(d['Format'])
- print d
- print d['Segments'].split()
- store.cleanup()
-
-# Produce a flattened metadata dump from a snapshot
-def cmd_read_metadata(snapshot):
- get_passphrase()
- lowlevel = lbs.LowlevelDataStore(options.store)
- store = lbs.ObjectStore(lowlevel)
- d = lbs.parse_full(store.load_snapshot(snapshot))
- check_version(d['Format'])
- metadata = lbs.read_metadata(store, d['Root'])
- blank = True
- for l in metadata:
- if l == '\n':
- if blank: continue
- blank = True
- else:
- blank = False
- sys.stdout.write(l)
- store.cleanup()
-
-# Verify snapshot integrity
-def cmd_verify_snapshots(snapshots):
- get_passphrase()
- lowlevel = lbs.LowlevelDataStore(options.store)
- store = lbs.ObjectStore(lowlevel)
- for s in snapshots:
- lbs.accessed_segments.clear()
- print "#### Snapshot", s
- d = lbs.parse_full(store.load_snapshot(s))
- check_version(d['Format'])
- print "## Root:", d['Root']
- metadata = lbs.iterate_metadata(store, d['Root'])
- for m in metadata:
- if m.fields['type'] not in ('-', 'f'): continue
- print "%s [%d bytes]" % (m.fields['name'], int(m.fields['size']))
- verifier = lbs.ChecksumVerifier(m.fields['checksum'])
- size = 0
- for block in m.data():
- data = store.get(block)
- verifier.update(data)
- size += len(data)
- if int(m.fields['size']) != size:
- raise ValueError("File size does not match!")
- if not verifier.valid():
- raise ValueError("Bad checksum found")
-
- # Verify that the list of segments included with the snapshot was
- # actually accurate: covered all segments that were really read, and
- # doesn't contain duplicates.
- listed_segments = set(d['Segments'].split())
- if lbs.accessed_segments - listed_segments:
- print "Error: Some segments not listed in descriptor!"
- print sorted(list(lbs.accessed_segments - listed_segments))
- if listed_segments - lbs.accessed_segments :
- print "Warning: Extra unused segments listed in descriptor!"
- print sorted(list(listed_segments - lbs.accessed_segments))
- store.cleanup()
-
-# Restore a snapshot, or some subset of files from it
-def cmd_restore_snapshot(args):
- get_passphrase()
- lowlevel = lbs.LowlevelDataStore(options.store)
- store = lbs.ObjectStore(lowlevel)
- snapshot = lbs.parse_full(store.load_snapshot(args[0]))
- check_version(snapshot['Format'])
- destdir = args[1]
- paths = args[2:]
-
- def matchpath(path):
- "Return true if the specified path should be included in the restore."
-
- # No specification of what to restore => restore everything
- if len(paths) == 0: return True
-
- for p in paths:
- if path == p: return True
- if path.startswith(p + "/"): return True
- return False
-
- def warn(m, msg):
- print "Warning: %s: %s" % (m.items.name, msg)
-
- # Phase 1: Read the complete metadata log and create directory structure.
- metadata_items = []
- metadata_paths = {}
- metadata_segments = {}
- for m in lbs.iterate_metadata(store, snapshot['Root']):
- pathname = os.path.normpath(m.items.name)
- while os.path.isabs(pathname):
- pathname = pathname[1:]
- if not matchpath(pathname): continue
-
- destpath = os.path.join(destdir, pathname)
- if m.items.type == 'd':
- path = destpath
- else:
- (path, filename) = os.path.split(destpath)
-
- metadata_items.append((pathname, m))
- if m.items.type in ('-', 'f'):
- metadata_paths[pathname] = m
- for block in m.data():
- (segment, object, checksum, slice) \
- = lbs.ObjectStore.parse_ref(block)
- if segment not in metadata_segments:
- metadata_segments[segment] = set()
- metadata_segments[segment].add(pathname)
-
- try:
- if not os.path.isdir(path):
- print "mkdir:", path
- os.makedirs(path)
- except Exception, e:
- warn(m, "Error creating directory structure: %s" % (e,))
- continue
-
- # Phase 2: Restore files, ordered by how data is stored in segments.
- def restore_file(pathname, m):
- assert m.items.type in ('-', 'f')
- print "extract:", pathname
- destpath = os.path.join(destdir, pathname)
-
- file = open(destpath, 'wb')
- verifier = lbs.ChecksumVerifier(m.items.checksum)
- size = 0
- for block in m.data():
- data = store.get(block)
- verifier.update(data)
- size += len(data)
- file.write(data)
- file.close()
- if int(m.fields['size']) != size:
- raise ValueError("File size does not match!")
- if not verifier.valid():
- raise ValueError("Bad checksum found")
-
- while metadata_segments:
- (segment, items) = metadata_segments.popitem()
- print "+ Segment", segment
- for pathname in sorted(items):
- if pathname in metadata_paths:
- restore_file(pathname, metadata_paths[pathname])
- del metadata_paths[pathname]
-
- print "+ Remaining files"
- while metadata_paths:
- (pathname, m) = metadata_paths.popitem()
- restore_file(pathname, m)
-
- # Phase 3: Restore special files (symlinks, devices).
- # Phase 4: Restore directory permissions and modification times.
- for (pathname, m) in reversed(metadata_items):
- print "permissions:", pathname
- destpath = os.path.join(destdir, pathname)
- (path, filename) = os.path.split(destpath)
-
- # TODO: Check for ../../../paths that might attempt to write outside
- # the destination directory. Maybe also check attempts to follow
- # symlinks pointing outside?
-
- try:
- if m.items.type in ('-', 'f', 'd'):
- pass
- elif m.items.type == 'l':
- try:
- target = m.items.target
- except:
- # Old (v0.2 format) name for 'target'
- target = m.items.contents
- os.symlink(target, destpath)
- elif m.items.type == 'p':
- os.mkfifo(destpath)
- elif m.items.type in ('c', 'b'):
- if m.items.type == 'c':
- mode = 0600 | stat.S_IFCHR
- else:
- mode = 0600 | stat.S_IFBLK
- os.mknod(destpath, mode, os.makedev(*m.items.device))
- elif m.items.type == 's':
- pass # TODO: Implement
- else:
- warn(m, "Unknown type code: " + m.items.type)
- continue
-
- except Exception, e:
- warn(m, "Error restoring: %s" % (e,))
- continue
-
- try:
- uid = m.items.user[0]
- gid = m.items.group[0]
- os.lchown(destpath, uid, gid)
- except Exception, e:
- warn(m, "Error restoring file ownership: %s" % (e,))
-
- if m.items.type == 'l':
- continue
-
- try:
- os.chmod(destpath, m.items.mode)
- except Exception, e:
- warn(m, "Error restoring file permissions: %s" % (e,))
-
- try:
- os.utime(destpath, (time.time(), m.items.mtime))
- except Exception, e:
- warn(m, "Error restoring file timestamps: %s" % (e,))
-
- store.cleanup()
-
-if len(args) == 0:
- parser.print_usage()
- sys.exit(1)
-cmd = args[0]
-args = args[1:]
-if cmd == 'clean':
- cmd_clean()
-elif cmd == 'prune-db':
- cmd_prune_db()
-elif cmd == 'list-snapshots':
- cmd_list_snapshots()
-elif cmd == 'object-sums':
- cmd_object_checksums(args)
-elif cmd == 'read-snapshots':
- cmd_read_snapshots(args)
-elif cmd == 'read-metadata':
- cmd_read_metadata(args[0])
-elif cmd == 'list-snapshot-sizes':
- cmd_list_snapshot_sizes()
-elif cmd == 'verify-snapshots':
- cmd_verify_snapshots(args)
-elif cmd == 'restore-snapshot':
- cmd_restore_snapshot(args)
-else:
- print "Unknown command:", cmd
- parser.print_usage()
- sys.exit(1)