summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Scheller <github@loowis.durge.org>2016-07-04 15:14:39 +0100
committerAndrew Scheller <github@loowis.durge.org>2016-07-04 15:14:39 +0100
commit119fd3e8d8c0297e1103c7d5e128edbaf737a006 (patch)
tree0e85792028f63251b6f57c27c476d0280e6a293e
parent249342f38762b223b7846526e4637553987f4006 (diff)
downloadpyfilesystem-git-119fd3e8d8c0297e1103c7d5e128edbaf737a006.tar.gz
Minor whitespace tidyups
* convert all files to UNIX line-endings * strip trailing whitespace
-rw-r--r--fs/browsewin.py10
-rw-r--r--fs/commands/fscat.py18
-rw-r--r--fs/commands/fscp.py184
-rw-r--r--fs/commands/fsinfo.py46
-rw-r--r--fs/commands/fsmkdir.py16
-rw-r--r--fs/commands/fsmv.py18
-rw-r--r--fs/commands/fsrm.py24
-rw-r--r--fs/commands/fstree.py28
-rw-r--r--fs/contrib/bigfs/__init__.py36
-rw-r--r--fs/contrib/bigfs/subrangefile.py6
-rw-r--r--fs/contrib/davfs/util.py6
-rw-r--r--fs/contrib/sqlitefs.py240
-rw-r--r--fs/contrib/tahoelafs/__init__.py830
-rw-r--r--fs/contrib/tahoelafs/connection.py24
-rw-r--r--fs/contrib/tahoelafs/test_tahoelafs.py104
-rw-r--r--fs/contrib/tahoelafs/util.py280
-rw-r--r--fs/expose/django_storage.py2
-rw-r--r--fs/expose/fuse/fuse.py156
-rw-r--r--fs/expose/fuse/fuse_ctypes.py162
-rw-r--r--fs/expose/importhook.py2
-rw-r--r--fs/expose/serve/packetstream.py120
-rw-r--r--fs/expose/serve/server.py86
-rw-r--r--fs/expose/serve/threadpool.py58
-rw-r--r--fs/memoryfs.py1388
-rw-r--r--fs/remotefs.py104
-rw-r--r--fs/tests/test_multifs.py20
-rw-r--r--fs/tests/test_s3fs.py6
-rw-r--r--fs/tests/test_sqlitefs.py6
-rw-r--r--fs/tests/test_utils.py66
-rw-r--r--fs/tests/test_wrapfs.py12
-rw-r--r--fs/tests/test_xattr.py4
-rw-r--r--fs/tests/zipfs_binary_test.py90
-rw-r--r--fs/wrapfs/debugfs.py286
-rw-r--r--fs/xattrs.py6
34 files changed, 2222 insertions, 2222 deletions
diff --git a/fs/browsewin.py b/fs/browsewin.py
index 9ce44fe..32f9b9c 100644
--- a/fs/browsewin.py
+++ b/fs/browsewin.py
@@ -37,7 +37,7 @@ class InfoFrame(wx.Frame):
for key in sorted(keys, key=lambda k:k.lower()):
self.list_ctrl.Append((key, unicode(info.get(key))))
-
+
self.Center()
@@ -109,10 +109,10 @@ class BrowseFrame(wx.Frame):
msg = "Failed to get directory listing for %s\n\nThe following error was reported:\n\n%s" % (path, e)
wx.MessageDialog(self, msg, "Error listing directory", wx.OK).ShowModal()
paths = []
-
-
+
+
#paths = [(self.fs.isdir(p), p) for p in self.fs.listdir(path, absolute=True)]
-
+
if self.hide_dotfiles:
paths = [p for p in paths if not isdotfile(p[1])]
@@ -173,7 +173,7 @@ class BrowseFrame(wx.Frame):
path = item_data["path"]
info = self.fs.getinfo(path)
- info_frame = InfoFrame(self, path, self.fs.desc(path), info)
+ info_frame = InfoFrame(self, path, self.fs.desc(path), info)
info_frame.Show()
info_frame.CenterOnParent()
diff --git a/fs/commands/fscat.py b/fs/commands/fscat.py
index 2849ce1..077a0e2 100644
--- a/fs/commands/fscat.py
+++ b/fs/commands/fscat.py
@@ -3,26 +3,26 @@ from fs.commands.runner import Command
import sys
class FSCat(Command):
-
+
usage = """fscat [OPTION]... [FILE]...
Concetanate FILE(s)"""
version = "1.0"
-
+
def do_run(self, options, args):
- count = 0
- for fs, path, is_dir in self.get_resources(args):
+ count = 0
+ for fs, path, is_dir in self.get_resources(args):
if is_dir:
self.error('%s is a directory\n' % path)
- return 1
+ return 1
self.output(fs.getcontents(path))
- count += 1
+ count += 1
if self.is_terminal() and count:
self.output('\n')
-
+
def run():
return FSCat().run()
-
+
if __name__ == "__main__":
sys.exit(run())
- \ No newline at end of file
+
diff --git a/fs/commands/fscp.py b/fs/commands/fscp.py
index 58d6c1a..6d61c46 100644
--- a/fs/commands/fscp.py
+++ b/fs/commands/fscp.py
@@ -8,52 +8,52 @@ import time
import threading
-class FileOpThread(threading.Thread):
-
+class FileOpThread(threading.Thread):
+
def __init__(self, action, name, dest_fs, queue, on_done, on_error):
- self.action = action
+ self.action = action
self.dest_fs = dest_fs
self.queue = queue
self.on_done = on_done
self.on_error = on_error
self.finish_event = threading.Event()
- super(FileOpThread, self).__init__()
-
- def run(self):
-
- while not self.finish_event.isSet():
+ super(FileOpThread, self).__init__()
+
+ def run(self):
+
+ while not self.finish_event.isSet():
try:
path_type, fs, path, dest_path = self.queue.get(timeout=0.1)
except queue.Empty:
continue
- try:
- if path_type == FScp.DIR:
+ try:
+ if path_type == FScp.DIR:
self.dest_fs.makedir(path, recursive=True, allow_recreate=True)
- else:
- self.action(fs, path, self.dest_fs, dest_path, overwrite=True)
- except Exception, e:
- self.on_error(e)
- self.queue.task_done()
- break
+ else:
+ self.action(fs, path, self.dest_fs, dest_path, overwrite=True)
+ except Exception, e:
+ self.on_error(e)
+ self.queue.task_done()
+ break
else:
- self.queue.task_done()
+ self.queue.task_done()
self.on_done(path_type, fs, path, self.dest_fs, dest_path)
-
-
+
+
class FScp(Command):
-
+
DIR, FILE = 0, 1
-
+
usage = """fscp [OPTION]... [SOURCE]... [DESTINATION]
Copy SOURCE to DESTINATION"""
-
+
def get_action(self):
if self.options.threads > 1:
return copyfile_non_atomic
else:
return copyfile
-
+
def get_verb(self):
return 'copying...'
@@ -62,40 +62,40 @@ Copy SOURCE to DESTINATION"""
optparse.add_option('-p', '--progress', dest='progress', action="store_true", default=False,
help="show progress", metavar="PROGRESS")
optparse.add_option('-t', '--threads', dest='threads', action="store", default=1,
- help="number of threads to use", type="int", metavar="THREAD_COUNT")
+ help="number of threads to use", type="int", metavar="THREAD_COUNT")
return optparse
-
+
def do_run(self, options, args):
-
- self.options = options
+
+ self.options = options
if len(args) < 2:
self.error("at least two filesystems required\n")
return 1
-
+
srcs = args[:-1]
- dst = args[-1]
-
+ dst = args[-1]
+
dst_fs, dst_path = self.open_fs(dst, writeable=True, create_dir=True)
-
+
if dst_path is not None and dst_fs.isfile(dst_path):
self.error('Destination must be a directory\n')
return 1
-
+
if dst_path:
dst_fs = dst_fs.makeopendir(dst_path)
- dst_path = None
-
+ dst_path = None
+
copy_fs_paths = []
-
- progress = options.progress
-
+
+ progress = options.progress
+
if progress:
sys.stdout.write(self.progress_bar(len(srcs), 0, 'scanning...'))
sys.stdout.flush()
-
- self.root_dirs = []
+
+ self.root_dirs = []
for i, fs_url in enumerate(srcs):
- src_fs, src_path = self.open_fs(fs_url)
+ src_fs, src_path = self.open_fs(fs_url)
if src_path is None:
src_path = '/'
@@ -103,44 +103,44 @@ Copy SOURCE to DESTINATION"""
if iswildcard(src_path):
for file_path in src_fs.listdir(wildcard=src_path, full=True):
copy_fs_paths.append((self.FILE, src_fs, file_path, file_path))
-
- else:
- if src_fs.isdir(src_path):
- self.root_dirs.append((src_fs, src_path))
+
+ else:
+ if src_fs.isdir(src_path):
+ self.root_dirs.append((src_fs, src_path))
src_sub_fs = src_fs.opendir(src_path)
for dir_path, file_paths in src_sub_fs.walk():
- if dir_path not in ('', '/'):
+ if dir_path not in ('', '/'):
copy_fs_paths.append((self.DIR, src_sub_fs, dir_path, dir_path))
sub_fs = src_sub_fs.opendir(dir_path)
- for file_path in file_paths:
+ for file_path in file_paths:
copy_fs_paths.append((self.FILE, sub_fs, file_path, pathjoin(dir_path, file_path)))
else:
if src_fs.exists(src_path):
copy_fs_paths.append((self.FILE, src_fs, src_path, src_path))
else:
self.error('%s is not a file or directory\n' % src_path)
- return 1
-
+ return 1
+
if progress:
sys.stdout.write(self.progress_bar(len(srcs), i + 1, 'scanning...'))
sys.stdout.flush()
-
+
if progress:
sys.stdout.write(self.progress_bar(len(copy_fs_paths), 0, self.get_verb()))
sys.stdout.flush()
-
- if self.options.threads > 1:
+
+ if self.options.threads > 1:
copy_fs_dirs = [r for r in copy_fs_paths if r[0] == self.DIR]
- copy_fs_paths = [r for r in copy_fs_paths if r[0] == self.FILE]
- for path_type, fs, path, dest_path in copy_fs_dirs:
- dst_fs.makedir(path, allow_recreate=True, recursive=True)
-
+ copy_fs_paths = [r for r in copy_fs_paths if r[0] == self.FILE]
+ for path_type, fs, path, dest_path in copy_fs_dirs:
+ dst_fs.makedir(path, allow_recreate=True, recursive=True)
+
self.lock = threading.RLock()
-
+
self.total_files = len(copy_fs_paths)
self.done_files = 0
-
- file_queue = queue.Queue()
+
+ file_queue = queue.Queue()
threads = [FileOpThread(self.get_action(),
'T%i' % i,
dst_fs,
@@ -148,59 +148,59 @@ Copy SOURCE to DESTINATION"""
self.on_done,
self.on_error)
for i in xrange(options.threads)]
-
+
for thread in threads:
thread.start()
-
+
self.action_errors = []
complete = False
- try:
- enqueue = file_queue.put
+ try:
+ enqueue = file_queue.put
for resource in copy_fs_paths:
enqueue(resource)
-
+
while not file_queue.empty():
time.sleep(0)
if self.any_error():
raise SystemExit
# Can't use queue.join here, or KeyboardInterrupt will not be
- # caught until the queue is finished
+ # caught until the queue is finished
#file_queue.join()
-
- except KeyboardInterrupt:
- options.progress = False
+
+ except KeyboardInterrupt:
+ options.progress = False
self.output("\nCancelling...\n")
-
+
except SystemExit:
- options.progress = False
-
+ options.progress = False
+
finally:
- sys.stdout.flush()
+ sys.stdout.flush()
for thread in threads:
- thread.finish_event.set()
+ thread.finish_event.set()
for thread in threads:
thread.join()
complete = True
if not self.any_error():
self.post_actions()
-
+
dst_fs.close()
-
+
if self.action_errors:
for error in self.action_errors:
- self.error(self.wrap_error(unicode(error)) + '\n')
+ self.error(self.wrap_error(unicode(error)) + '\n')
sys.stdout.flush()
else:
if complete and options.progress:
sys.stdout.write(self.progress_bar(self.total_files, self.done_files, ''))
sys.stdout.write('\n')
sys.stdout.flush()
-
+
def post_actions(self):
pass
-
- def on_done(self, path_type, src_fs, src_path, dst_fs, dst_path):
- self.lock.acquire()
+
+ def on_done(self, path_type, src_fs, src_path, dst_fs, dst_path):
+ self.lock.acquire()
try:
if self.options.verbose:
if path_type == self.DIR:
@@ -208,44 +208,44 @@ Copy SOURCE to DESTINATION"""
else:
print "%s -> %s" % (src_fs.desc(src_path), dst_fs.desc(dst_path))
elif self.options.progress:
- self.done_files += 1
+ self.done_files += 1
sys.stdout.write(self.progress_bar(self.total_files, self.done_files, self.get_verb()))
sys.stdout.flush()
finally:
self.lock.release()
-
- def on_error(self, e):
+
+ def on_error(self, e):
self.lock.acquire()
try:
self.action_errors.append(e)
finally:
self.lock.release()
-
- def any_error(self):
+
+ def any_error(self):
self.lock.acquire()
try:
return bool(self.action_errors)
finally:
self.lock.release()
-
+
def progress_bar(self, total, remaining, msg=''):
bar_width = 20
throbber = '|/-\\'
throb = throbber[remaining % len(throbber)]
done = float(remaining) / total
-
+
done_steps = int(done * bar_width)
- bar_steps = ('#' * done_steps).ljust(bar_width)
-
- msg = '%s %i%%' % (msg, int(done * 100.0))
+ bar_steps = ('#' * done_steps).ljust(bar_width)
+
+ msg = '%s %i%%' % (msg, int(done * 100.0))
msg = msg.ljust(20)
-
+
if total == remaining:
throb = ''
-
+
bar = '\r%s[%s] %s\r' % (throb, bar_steps, msg.lstrip())
return bar
-
+
def run():
return FScp().run()
diff --git a/fs/commands/fsinfo.py b/fs/commands/fsinfo.py
index 943a28f..5cdbbac 100644
--- a/fs/commands/fsinfo.py
+++ b/fs/commands/fsinfo.py
@@ -4,10 +4,10 @@ import sys
from datetime import datetime
class FSInfo(Command):
-
+
usage = """fsinfo [OPTION]... [PATH]
Display information regarding an FS resource"""
-
+
def get_optparse(self):
optparse = super(FSInfo, self).get_optparse()
optparse.add_option('-k', '--key', dest='keys', action='append', default=[],
@@ -20,12 +20,12 @@ Display information regarding an FS resource"""
help="list directories only", metavar="DIRSONLY")
optparse.add_option('-f', '--filesonly', dest='filesonly', action="store_true", default=False,
help="list files only", metavar="FILESONLY")
- return optparse
-
-
+ return optparse
+
+
def do_run(self, options, args):
-
- def wrap_value(val):
+
+ def wrap_value(val):
if val.rstrip() == '\0':
return self.wrap_error('... missing ...')
return val
@@ -40,41 +40,41 @@ Display information regarding an FS resource"""
except:
text = repr(text)
return text
-
-
+
+
keys = options.keys or None
for fs, path, is_dir in self.get_resources(args,
files_only=options.filesonly,
- dirs_only=options.dirsonly):
+ dirs_only=options.dirsonly):
if not options.omit:
- if options.simple:
+ if options.simple:
file_line = u'%s\n' % self.wrap_filename(path)
else:
file_line = u'[%s] %s\n' % (self.wrap_filename(path), self.wrap_faded(fs.desc(path)))
- self.output(file_line)
+ self.output(file_line)
info = fs.getinfo(path)
-
+
for k, v in info.items():
if k.startswith('_'):
del info[k]
elif not isinstance(v, (basestring, int, long, float, bool, datetime)):
- del info[k]
-
- if keys:
+ del info[k]
+
+ if keys:
table = [(k, make_printable(info.get(k, '\0'))) for k in keys]
else:
keys = sorted(info.keys())
table = [(k, make_printable(info[k])) for k in sorted(info.keys())]
-
+
if options.simple:
for row in table:
self.output(row[-1] + '\n')
- else:
- self.output_table(table, {0:self.wrap_table_header, 1:wrap_value})
+ else:
+ self.output_table(table, {0:self.wrap_table_header, 1:wrap_value})
+
-
def run():
- return FSInfo().run()
-
+ return FSInfo().run()
+
if __name__ == "__main__":
- sys.exit(run())
+ sys.exit(run())
diff --git a/fs/commands/fsmkdir.py b/fs/commands/fsmkdir.py
index ad4324e..73c0a02 100644
--- a/fs/commands/fsmkdir.py
+++ b/fs/commands/fsmkdir.py
@@ -3,20 +3,20 @@ from fs.commands.runner import Command
import sys
class FSMkdir(Command):
-
+
usage = """fsmkdir [PATH]
Make a directory"""
version = "1.0"
-
+
def do_run(self, options, args):
-
- for fs_url in args:
- self.open_fs(fs_url, create_dir=True)
-
+
+ for fs_url in args:
+ self.open_fs(fs_url, create_dir=True)
+
def run():
return FSMkdir().run()
-
+
if __name__ == "__main__":
sys.exit(run())
- \ No newline at end of file
+
diff --git a/fs/commands/fsmv.py b/fs/commands/fsmv.py
index 9e96de6..5867a4c 100644
--- a/fs/commands/fsmv.py
+++ b/fs/commands/fsmv.py
@@ -5,26 +5,26 @@ from fs.commands import fscp
import sys
class FSmv(fscp.FScp):
-
+
usage = """fsmv [OPTION]... [SOURCE] [DESTINATION]
Move files from SOURCE to DESTINATION"""
-
+
def get_verb(self):
return 'moving...'
-
- def get_action(self):
- if self.options.threads > 1:
+
+ def get_action(self):
+ if self.options.threads > 1:
return movefile_non_atomic
else:
return movefile
-
+
def post_actions(self):
for fs, dirpath in self.root_dirs:
- if not contains_files(fs, dirpath):
+ if not contains_files(fs, dirpath):
fs.removedir(dirpath, force=True)
-
+
def run():
return FSmv().run()
-
+
if __name__ == "__main__":
sys.exit(run())
diff --git a/fs/commands/fsrm.py b/fs/commands/fsrm.py
index 5e2bd09..8985e64 100644
--- a/fs/commands/fsrm.py
+++ b/fs/commands/fsrm.py
@@ -6,10 +6,10 @@ from fs.commands.runner import Command
import sys
class FSrm(Command):
-
+
usage = """fsrm [OPTION]... [PATH]
Remove a file or directory at PATH"""
-
+
def get_optparse(self):
optparse = super(FSrm, self).get_optparse()
optparse.add_option('-f', '--force', dest='force', action='store_true', default=False,
@@ -19,13 +19,13 @@ Remove a file or directory at PATH"""
optparse.add_option('-r', '--recursive', dest='recursive', action='store_true', default=False,
help='remove directories and their contents recursively')
return optparse
-
+
def do_run(self, options, args):
-
+
interactive = options.interactive
verbose = options.verbose
-
- for fs, path, is_dir in self.get_resources(args):
+
+ for fs, path, is_dir in self.get_resources(args):
if interactive:
if is_dir:
msg = "remove directory '%s'?" % path
@@ -44,11 +44,11 @@ Remove a file or directory at PATH"""
else:
if verbose:
self.output("removed '%s'\n" % path)
-
-
-def run():
+
+
+def run():
return FSrm().run()
-
+
if __name__ == "__main__":
- sys.exit(run())
- \ No newline at end of file
+ sys.exit(run())
+
diff --git a/fs/commands/fstree.py b/fs/commands/fstree.py
index 3d2a7b9..bbb88b5 100644
--- a/fs/commands/fstree.py
+++ b/fs/commands/fstree.py
@@ -7,18 +7,18 @@ from fs.commands.runner import Command
from fs.utils import print_fs
class FSTree(Command):
-
+
usage = """fstree [OPTION]... [PATH]
Recursively display the contents of PATH in an ascii tree"""
-
+
def get_optparse(self):
- optparse = super(FSTree, self).get_optparse()
+ optparse = super(FSTree, self).get_optparse()
optparse.add_option('-l', '--level', dest='depth', type="int", default=5,
help="Descend only LEVEL directories deep (-1 for infinite)", metavar="LEVEL")
optparse.add_option('-g', '--gui', dest='gui', action='store_true', default=False,
help="browse the tree with a gui")
optparse.add_option('-a', '--all', dest='all', action='store_true', default=False,
- help="do not hide dot files")
+ help="do not hide dot files")
optparse.add_option('--dirsfirst', dest='dirsfirst', action='store_true', default=False,
help="List directories before files")
optparse.add_option('-P', dest="pattern", default=None,
@@ -26,13 +26,13 @@ Recursively display the contents of PATH in an ascii tree"""
optparse.add_option('-d', dest="dirsonly", default=False, action='store_true',
help="List directories only")
return optparse
-
- def do_run(self, options, args):
-
+
+ def do_run(self, options, args):
+
if not args:
args = ['.']
-
- for fs, path, is_dir in self.get_resources(args, single=True):
+
+ for fs, path, is_dir in self.get_resources(args, single=True):
if not is_dir:
self.error(u"'%s' is not a dir\n" % path)
return 1
@@ -62,13 +62,13 @@ Recursively display the contents of PATH in an ascii tree"""
return '%i %s' % (count, one)
else:
return '%i %s' % (count, many)
-
+
self.output("%s, %s\n" % (pluralize('directory', 'directories', dircount),
pluralize('file', 'files', filecount)))
-
+
def run():
- return FSTree().run()
-
+ return FSTree().run()
+
if __name__ == "__main__":
sys.exit(run())
-
+
diff --git a/fs/contrib/bigfs/__init__.py b/fs/contrib/bigfs/__init__.py
index 27d05be..fe47350 100644
--- a/fs/contrib/bigfs/__init__.py
+++ b/fs/contrib/bigfs/__init__.py
@@ -2,7 +2,7 @@
fs.contrib.bigfs
================
-A FS object that represents the contents of a BIG file
+A FS object that represents the contents of a BIG file
(C&C Generals, BfME C&C3, C&C Red Alert 3, C&C4 file format)
Written by Koen van de Sande
@@ -34,14 +34,14 @@ class BIGEntry:
return f
else:
return self.decompress(f, wrapAsFile=True)
-
+
def getcontents(self, baseFile):
f = SubrangeFile(baseFile, self.offset, self.storedSize)
if not self.isCompressed:
return f.read()
else:
return self.decompress(f, wrapAsFile=False)
-
+
def decompress(self, g, wrapAsFile=True):
buf = g.read(2)
magic = unpack(">H", buf)[0]
@@ -55,7 +55,7 @@ class BIGEntry:
outputSize = unpack(">I", "\0" + g.read(3))[0]
if magic & 0x100:
unknown1 = unpack(">I", "\0" + g.read(3))[0]
-
+
output = []
while True:
opcode = unpack("B", g.read(1))[0]
@@ -63,36 +63,36 @@ class BIGEntry:
# read second opcode
opcode2 = unpack("B", g.read(1))[0]
#print "0x80", toBits(opcode), toBits(opcode2), opcode & 0x03, (((opcode & 0x60) << 3) | opcode2) + Q, ((opcode & 0x1C) >> 2) + 2 + R
-
+
# copy at most 3 bytes to output stream (lowest 2 bits of opcode)
count = opcode & 0x03
for i in range(count):
output.append(g.read(1))
-
+
# you always have to look at least one byte, hence the +1
# use bit6 and bit5 (bit7=0 to trigger the if-statement) of opcode, and 8 bits of opcode2 (10-bits)
lookback = (((opcode & 0x60) << 3) | opcode2) + 1
-
+
# use bit4..2 of opcode
count = ((opcode & 0x1C) >> 2) + 3
-
+
for i in range(count):
output.append(output[-lookback])
elif not (opcode & 0x40): # opcode: bit7..6==10 to get here
opcode2 = unpack("B", g.read(1))[0]
opcode3 = unpack("B", g.read(1))[0]
#print "0x40", toBits(opcode), toBits(opcode2), toBits(opcode3)
-
+
# copy count bytes (upper 2 bits of opcode2)
count = opcode2 >> 6
for i in range(count):
output.append(g.read(1))
-
+
# look back again (lower 6 bits of opcode2, all 8 bits of opcode3, total 14-bits)
lookback = (((opcode2 & 0x3F) << 8) | opcode3) + 1
# lower 6 bits of opcode are the count to copy
count = (opcode & 0x3F) + 4
-
+
for i in range(count):
output.append(output[-lookback])
elif not (opcode & 0x20): # opcode: bit7..5=110 to get here
@@ -104,7 +104,7 @@ class BIGEntry:
count = opcode & 0x03
for i in range(count):
output.append(g.read(1))
-
+
# look back: bit4 of opcode, all bits of opcode2 and opcode3, total 17-bits
lookback = (((opcode & 0x10) >> 4) << 16) | (opcode2 << 8) | (opcode3) + 1
# bit3..2 of opcode and the whole of opcode4
@@ -129,15 +129,15 @@ class BIGEntry:
for i in range(count):
output.append(g.read(1))
#print "0xLO", toBits(opcode), count
-
+
if wrapAsFile:
return StringIO("".join(output))
else:
return "".join(output)
-
+
def __str__(self):
return "<BIGEntry %s offset=%d storedSize=%d isCompressed=%s realSize=%d in %s" % (self.filename, self.offset, self.storedSize, str(self.isCompressed), self.realSize, self.filenameBIG)
-
+
class _ExceptionProxy(object):
@@ -156,12 +156,12 @@ class _ExceptionProxy(object):
class BigFS(FS):
"""A FileSystem that represents a BIG file."""
-
+
_meta = { 'virtual' : False,
'read_only' : True,
'unicode_paths' : True,
'case_insensitive_paths' : False,
- 'network' : False,
+ 'network' : False,
}
def __init__(self, filename, mode="r", thread_synchronize=True):
@@ -260,7 +260,7 @@ class BigFS(FS):
@synchronize
def open(self, path, mode="r", **kwargs):
- path = normpath(relpath(path))
+ path = normpath(relpath(path))
if 'r' in mode:
if self.file_mode not in 'ra':
diff --git a/fs/contrib/bigfs/subrangefile.py b/fs/contrib/bigfs/subrangefile.py
index e0ca1cb..3a99c55 100644
--- a/fs/contrib/bigfs/subrangefile.py
+++ b/fs/contrib/bigfs/subrangefile.py
@@ -28,7 +28,7 @@ class SubrangeFile:
self.startOffset = startOffset
self.fileSize = fileSize
self.seek(0)
-
+
def __str__(self):
return "<SubrangeFile: %s@%d size=%d>" % (self.name, self.startOffset, self.fileSize)
@@ -48,7 +48,7 @@ class SubrangeFile:
offset = 0
offset = self.startOffset + self.fileSize + offset
self.f.seek(offset)
-
+
def tell(self):
return self.f.tell() - self.startOffset
@@ -60,7 +60,7 @@ class SubrangeFile:
if self.tell() + iSize > self.fileSize:
iSize = self.fileSize - self.tell()
return iSize
-
+
def readline(self,size=None):
toRead = self.__maxSize(size)
return self.f.readline(toRead)
diff --git a/fs/contrib/davfs/util.py b/fs/contrib/davfs/util.py
index 0fd587a..1dd9e7d 100644
--- a/fs/contrib/davfs/util.py
+++ b/fs/contrib/davfs/util.py
@@ -42,7 +42,7 @@ def get_filesize(file):
raise AttributeError
return file.size
-
+
def file_chunks(f,chunk_size=1024*64):
"""Generator yielding chunks of a file.
@@ -108,7 +108,7 @@ class FakeReq:
def add_unredirected_header(self,header,value):
self.connection.putheader(header,value)
-
+
class FakeResp:
"""Compatability interface to use cookielib with raw httplib objects."""
@@ -178,5 +178,5 @@ if len(cookielib.parse_ns_headers([_test_cookie])) != 2:
result.append(pairs)
return result
cookielib.parse_ns_headers = parse_ns_headers
- assert len(cookielib.parse_ns_headers([_test_cookie])) == 2
+ assert len(cookielib.parse_ns_headers([_test_cookie])) == 2
diff --git a/fs/contrib/sqlitefs.py b/fs/contrib/sqlitefs.py
index b334d32..d6248e0 100644
--- a/fs/contrib/sqlitefs.py
+++ b/fs/contrib/sqlitefs.py
@@ -48,14 +48,14 @@ class SqliteFsFileBase(object):
self.closed = False
#real file like object. Most of the methods are passed to this object
self.real_stream= real_file
-
+
def close(self):
if not self.closed and self.real_stream is not None:
self._do_close()
self.fs._on_close(self)
self.real_stream.close()
self.closed = True
-
+
def __str__(self):
return "<SqliteFS File in %s %s>" % (self.fs, self.path)
@@ -70,18 +70,18 @@ class SqliteFsFileBase(object):
def flush(self):
self.real_stream.flush()
-
+
def __iter__(self):
raise OperationFailedError('__iter__', self.path)
-
+
def next(self):
raise OperationFailedError('next', self.path)
-
+
def readline(self, *args, **kwargs):
- raise OperationFailedError('readline', self.path)
-
+ raise OperationFailedError('readline', self.path)
+
def read(self, size=None):
- raise OperationFailedError('read', self.path)
+ raise OperationFailedError('read', self.path)
def seek(self, *args, **kwargs):
return self.real_stream.seek(*args, **kwargs)
@@ -91,20 +91,20 @@ class SqliteFsFileBase(object):
def truncate(self, *args, **kwargs):
raise OperationFailedError('truncate', self.path)
-
+
def write(self, data):
raise OperationFailedError('write', self.path)
-
+
def writelines(self, *args, **kwargs):
raise OperationFailedError('writelines', self.path)
-
+
def __enter__(self):
return self
def __exit__(self,exc_type,exc_value,traceback):
self.close()
return False
-
+
class SqliteWritableFile(SqliteFsFileBase):
'''
represents an sqlite file. Usually used for 'writing'. OnClose will
@@ -114,28 +114,28 @@ class SqliteWritableFile(SqliteFsFileBase):
super(SqliteWritableFile, self).__init__(fs, path, id)
#open a temp file and return that.
self.real_stream = tempfile.SpooledTemporaryFile(max_size='128*1000')
-
+
def _do_close(self):
#push the contents of the file to blob
self.fs._writeblob(self.id, self.real_stream)
-
+
def truncate(self, *args, **kwargs):
return self.real_stream.truncate(*args, **kwargs)
-
+
def write(self, data):
return self.real_stream.write(data)
def writelines(self, *args, **kwargs):
return self.real_stream.writelines(*args, **kwargs)
-
+
class SqliteReadableFile(SqliteFsFileBase):
def __init__(self,fs, path, id, real_file):
super(SqliteReadableFile, self).__init__(fs, path, id, real_file)
- assert(self.real_stream != None)
-
+ assert(self.real_stream != None)
+
def _do_close(self):
pass
-
+
def __iter__(self):
return iter(self.real_stream)
@@ -144,13 +144,13 @@ class SqliteReadableFile(SqliteFsFileBase):
def readline(self, *args, **kwargs):
return self.real_stream.readline(*args, **kwargs)
-
+
def read(self, size=None):
if( size==None):
size=-1
return self.real_stream.read(size)
-
+
class SqliteFS(FS):
'''
sqlite based file system to store the files in sqlite database as 'blobs'
@@ -161,12 +161,12 @@ class SqliteFS(FS):
id : file id
name : name of file
parent : id of parent directory for the file.
-
+
FsDirMetaData table:
name : name of the directory (wihtout parent directory names)
fullpath : full path of the directory including the parent directory name
parent_id : id of the parent directory
-
+
FsFileTable:
size : file size in bytes (this is actual file size). Blob size may be
different if compressed
@@ -176,49 +176,49 @@ class SqliteFS(FS):
last_modified : timestamp of last modification
author : who changed it last
content : blob where actual file contents are stored.
-
+
TODO : Need an open files table or a flag in sqlite database. To avoid
opening the file twice. (even from the different process or thread)
'''
-
+
def __init__(self, sqlite_filename):
super(SqliteFS, self).__init__()
self.dbpath =sqlite_filename
- self.dbcon =None
+ self.dbcon =None
self.__actual_query_cur = None
self.__actual_update_cur =None
self.open_files = []
-
+
def close(self):
'''
unlock all files. and close all open connections.
'''
- self.close_all()
+ self.close_all()
self._closedb()
super(SqliteFS,self).close()
-
+
def _initdb(self):
if( self.dbcon is None):
- self.dbcon = apsw.Connection(self.dbpath)
+ self.dbcon = apsw.Connection(self.dbpath)
self._create_tables()
-
+
@property
def _querycur(self):
assert(self.dbcon != None)
if( self.__actual_query_cur == None):
self.__actual_query_cur = self.dbcon.cursor()
return(self.__actual_query_cur)
-
+
@property
def _updatecur(self):
assert(self.dbcon != None)
if( self.__actual_update_cur == None):
self.__actual_update_cur = self.dbcon.cursor()
return(self.__actual_update_cur)
-
+
def _closedb(self):
self.dbcon.close()
-
+
def close_all(self):
'''
close all open files
@@ -226,7 +226,7 @@ class SqliteFS(FS):
openfiles = list(self.open_files)
for fileobj in openfiles:
fileobj.close()
-
+
def _create_tables(self):
cur = self._updatecur
cur.execute("CREATE TABLE IF NOT EXISTS FsFileMetaData(name text, fileid INTEGER, parent INTEGER)")
@@ -235,12 +235,12 @@ class SqliteFS(FS):
cur.execute("CREATE TABLE IF NOT EXISTS FsFileTable(type text, compression text, author TEXT, \
created timestamp, last_modified timestamp, last_accessed timestamp, \
locked BOOL, size INTEGER, contents BLOB)")
-
+
#if the root directory name is created
rootid = self._get_dir_id('/')
if( rootid is None):
cur.execute("INSERT INTO FsDirMetaData (name, fullpath) VALUES ('/','/')")
-
+
def _get_dir_id(self, dirpath):
'''
get the id for given directory path.
@@ -248,15 +248,15 @@ class SqliteFS(FS):
dirpath = remove_end_slash(dirpath)
if( dirpath== None or len(dirpath)==0):
dirpath = '/'
-
+
self._querycur.execute("SELECT rowid from FsDirMetaData where fullpath=?",(dirpath,))
dirid = None
dirrow = fetchone(self._querycur)
if( dirrow):
dirid = dirrow[0]
-
+
return(dirid)
-
+
def _get_file_id(self, dir_id, filename):
'''
get the file id from the path
@@ -269,10 +269,10 @@ class SqliteFS(FS):
if( row ):
file_id = row[0]
return(file_id)
-
+
def _get_file_contentid(self, file_id):
'''
- return the file content id from the 'content' table (i.e. FsFileTable)
+ return the file content id from the 'content' table (i.e. FsFileTable)
'''
assert(file_id != None)
content_id = None
@@ -281,7 +281,7 @@ class SqliteFS(FS):
assert(row != None)
content_id = row[0]
return(content_id)
-
+
def _create_file_entry(self, dirid, filename, **kwargs):
'''
create file entry in the file table
@@ -302,7 +302,7 @@ class SqliteFS(FS):
#self.dbcon.commit()
fileid = self.dbcon.last_insert_rowid()
return(fileid)
-
+
def _writeblob(self, fileid, stream):
'''
extract the data from stream and write it as blob.
@@ -314,15 +314,15 @@ class SqliteFS(FS):
blob_stream=self.dbcon.blobopen("main", "FsFileTable", "contents", fileid, True) # 1 is for read/write
stream.seek(0)
blob_stream.write(stream.read())
- blob_stream.close()
-
- def _on_close(self, fileobj):
+ blob_stream.close()
+
+ def _on_close(self, fileobj):
#Unlock file on close.
assert(fileobj != None and fileobj.id != None)
self._lockfileentry(fileobj.id, lock=False)
#Now remove it from openfile list.
self.open_files.remove(fileobj)
-
+
def _islocked(self, fileid):
'''
check if the file is locked.
@@ -336,7 +336,7 @@ class SqliteFS(FS):
assert(row != None)
locked = row[0]
return(locked)
-
+
def _lockfileentry(self, contentid, lock=True):
'''
lock the file entry in the database.
@@ -345,18 +345,18 @@ class SqliteFS(FS):
last_accessed=datetime.datetime.now().isoformat()
self._updatecur.execute('UPDATE FsFileTable SET locked=?, last_accessed=? where rowid=?',
(lock, last_accessed, contentid))
-
- def _makedir(self, parent_id, dname):
+
+ def _makedir(self, parent_id, dname):
self._querycur.execute("SELECT fullpath from FsDirMetaData where rowid=?",(parent_id,))
row = fetchone(self._querycur)
assert(row != None)
- parentpath = row[0]
+ parentpath = row[0]
fullpath= pathjoin(parentpath, dname)
- fullpath= remove_end_slash(fullpath)
+ fullpath= remove_end_slash(fullpath)
created = datetime.datetime.now().isoformat()
self._updatecur.execute('INSERT INTO FsDirMetaData(name, fullpath, parentid,created) \
VALUES(?,?,?,?)', (dname, fullpath, parent_id,created))
-
+
def _rename_file(self, src, dst):
'''
rename source file 'src' to destination file 'dst'
@@ -374,8 +374,8 @@ class SqliteFS(FS):
if( dstfile_id != None):
raise DestinationExistsError(dst)
#All checks are done. Delete the entry for the source file.
- #Create an entry for the destination file.
-
+ #Create an entry for the destination file.
+
srcdir_id = self._get_dir_id(srcdir)
assert(srcdir_id != None)
srcfile_id = self._get_file_id(srcdir_id, srcfname)
@@ -384,7 +384,7 @@ class SqliteFS(FS):
self._updatecur.execute('DELETE FROM FsFileMetaData where ROWID=?',(srcfile_id,))
self._updatecur.execute("INSERT INTO FsFileMetaData(name, parent, fileid) \
VALUES(?,?,?)",(dstfname, dstdirid, srccontent_id))
-
+
def _rename_dir(self, src, dst):
src = remove_end_slash(src)
dst = remove_end_slash(dst)
@@ -397,27 +397,27 @@ class SqliteFS(FS):
raise ParentDirectoryMissingError(dst)
srcdirid = self._get_dir_id(src)
assert(srcdirid != None)
- dstdname = basename(dst)
+ dstdname = basename(dst)
self._updatecur.execute('UPDATE FsDirMetaData SET name=?, fullpath=?, \
parentid=? where ROWID=?',(dstdname, dst, dstparentid, srcdirid,))
-
+
def _get_dir_list(self, dirid, path, full):
assert(dirid != None)
assert(path != None)
if( full==True):
dirsearchpath = path + r'%'
self._querycur.execute('SELECT fullpath FROM FsDirMetaData where fullpath LIKE ?',
- (dirsearchpath,))
+ (dirsearchpath,))
else:
#search inside this directory only
self._querycur.execute('SELECT fullpath FROM FsDirMetaData where parentid=?',
(dirid,))
- dirlist = [row[0] for row in self._querycur]
+ dirlist = [row[0] for row in self._querycur]
return dirlist
-
+
def _get_file_list(self, dirpath, full):
assert(dirpath != None)
-
+
if( full==True):
searchpath = dirpath + r"%"
self._querycur.execute('SELECT FsFileMetaData.name, FsDirMetaData.fullpath \
@@ -429,10 +429,10 @@ class SqliteFS(FS):
self._querycur.execute('SELECT FsFileMetaData.name, FsDirMetaData.fullpath \
FROM FsFileMetaData, FsDirMetaData where FsFileMetaData.parent=FsDirMetaData.ROWID \
and FsFileMetaData.parent =?',(parentid,))
-
- filelist = [pathjoin(row[1],row[0]) for row in self._querycur]
+
+ filelist = [pathjoin(row[1],row[0]) for row in self._querycur]
return(filelist)
-
+
def _get_dir_info(self, path):
'''
get the directory information dictionary.
@@ -440,7 +440,7 @@ class SqliteFS(FS):
info = dict()
info['st_mode'] = 0755
return info
-
+
def _get_file_info(self, path):
filedir = dirname(path)
filename = basename(path)
@@ -462,36 +462,36 @@ class SqliteFS(FS):
info['last_accessed'] = row[4]
info['st_mode'] = 0666
return(info)
-
+
def _isfile(self,path):
path = normpath(path)
filedir = dirname(path)
filename = basename(path)
- dirid = self._get_dir_id(filedir)
+ dirid = self._get_dir_id(filedir)
return(dirid is not None and self._get_file_id(dirid, filename) is not None)
-
+
def _isdir(self,path):
- path = normpath(path)
+ path = normpath(path)
return(self._get_dir_id(path) is not None)
-
+
def _isexist(self,path):
return self._isfile(path) or self._isdir(path)
-
+
@synchronize
def open(self, path, mode='r', **kwargs):
self._initdb()
path = normpath(path)
filedir = dirname(path)
filename = basename(path)
-
+
dir_id = self._get_dir_id(filedir)
if( dir_id == None):
raise ResourceNotFoundError(filedir)
-
- file_id = self._get_file_id(dir_id, filename)
+
+ file_id = self._get_file_id(dir_id, filename)
if( self._islocked(file_id)):
- raise ResourceLockedError(path)
-
+ raise ResourceLockedError(path)
+
sqfsfile=None
if 'r' in mode:
if file_id is None:
@@ -500,74 +500,74 @@ class SqliteFS(FS):
#make sure lock status is updated before the blob is opened
self._lockfileentry(content_id, lock=True)
blob_stream=self.dbcon.blobopen("main", "FsFileTable", "contents", file_id, False) # 1 is for read/write
- sqfsfile = SqliteReadableFile(self, path, content_id, blob_stream)
-
+ sqfsfile = SqliteReadableFile(self, path, content_id, blob_stream)
+
elif 'w' in mode or 'a' in mode:
if( file_id is None):
file_id= self._create_file_entry(dir_id, filename)
assert(file_id != None)
-
+
content_id = self._get_file_contentid(file_id)
- #file_dir_entry.accessed_time = datetime.datetime.now()
+ #file_dir_entry.accessed_time = datetime.datetime.now()
self._lockfileentry(content_id, lock=True)
- sqfsfile = SqliteWritableFile(self, path, content_id)
-
+ sqfsfile = SqliteWritableFile(self, path, content_id)
+
if( sqfsfile):
self.open_files.append(sqfsfile)
return sqfsfile
-
- raise ResourceNotFoundError(path)
-
+
+ raise ResourceNotFoundError(path)
+
@synchronize
def isfile(self, path):
self._initdb()
return self._isfile(path)
-
+
@synchronize
def isdir(self, path):
self._initdb()
return self._isdir(path)
-
+
@synchronize
def listdir(self, path='/', wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
- path = normpath(path)
+ path = normpath(path)
dirid = self._get_dir_id(path)
if( dirid == None):
raise ResourceInvalidError(path)
-
+
dirlist = self._get_dir_list(dirid, path,full)
if( dirs_only):
pathlist = dirlist
- else:
+ else:
filelist = self._get_file_list(path, full)
-
+
if( files_only == True):
pathlist = filelist
else:
pathlist = filelist + dirlist
-
-
+
+
if( wildcard and dirs_only == False):
pass
-
+
if( absolute == False):
pathlist = map(lambda dpath:frombase(path,dpath), pathlist)
-
+
return(pathlist)
-
-
+
+
@synchronize
def makedir(self, path, recursive=False, allow_recreate=False):
self._initdb()
path = remove_end_slash(normpath(path))
-
+
if(self._isexist(path)==False):
parentdir = dirname(path)
dname = basename(path)
-
+
parent_id = self._get_dir_id(parentdir)
if( parent_id ==None):
- if( recursive == False):
+ if( recursive == False):
raise ParentDirectoryMissingError(path)
else:
self.makedir(parentdir, recursive,allow_recreate)
@@ -575,7 +575,7 @@ class SqliteFS(FS):
self._makedir(parent_id,dname)
else:
raise DestinationExistsError(path)
-
+
@synchronize
def remove(self, path):
self._initdb()
@@ -583,16 +583,16 @@ class SqliteFS(FS):
if( self.isdir(path)==True):
#path is actually a directory
raise ResourceInvalidError(path)
-
+
filedir = dirname(path)
filename = basename(path)
dirid = self._get_dir_id(filedir)
fileid = self._get_file_id(dirid, filename)
if( fileid == None):
raise ResourceNotFoundError(path)
-
+
content_id = self._get_file_contentid(fileid)
-
+
self._updatecur.execute("DELETE FROM FsFileMetaData where ROWID=?",(fileid,))
#check there is any other file pointing to same location. If not
#delete the content as well.
@@ -600,8 +600,8 @@ class SqliteFS(FS):
(content_id,))
row = fetchone(self._querycur)
if( row == None or row[0] == 0):
- self._updatecur.execute("DELETE FROM FsFileTable where ROWID=?",(content_id,))
-
+ self._updatecur.execute("DELETE FROM FsFileTable where ROWID=?",(content_id,))
+
@synchronize
def removedir(self,path, recursive=False, force=False):
self._initdb()
@@ -617,36 +617,36 @@ class SqliteFS(FS):
row = fetchone(self._qurycur)
if( row[0] > 0):
raise DirectoryNotEmptyError(path)
- self._updatecur.execute("DELETE FROM FsDirMetaData where ROWID=?",(dirid,))
-
+ self._updatecur.execute("DELETE FROM FsDirMetaData where ROWID=?",(dirid,))
+
@synchronize
def rename(self,src, dst):
self._initdb()
src = normpath(src)
dst = normpath(dst)
if self._isexist(dst)== False:
- #first check if this is a directory rename or a file rename
+ #first check if this is a directory rename or a file rename
if( self.isfile(src)):
self._rename_file(src, dst)
elif self.isdir(src):
self._rename_dir(src, dst)
else:
raise ResourceNotFoundError(path)
- else:
+ else:
raise DestinationExistsError(dst)
-
+
@synchronize
def getinfo(self, path):
- self._initdb()
+ self._initdb()
path = normpath(path)
isfile = False
isdir = self.isdir(path)
if( isdir == False):
isfile=self.isfile(path)
-
+
if( not isfile and not isdir):
raise ResourceNotFoundError(path)
-
+
if isdir:
info= self._get_dir_info(path)
else:
@@ -664,7 +664,7 @@ class SqliteFS(FS):
# #mp = dokan.mount(sqfs,driveletter,foreground=True)
# #mp.unmount()
# sqfs.close()
-#
+#
#def run_tests(sqlfilename):
# fs = SqliteFS(sqlfilename)
# fs.makedir('/test')
@@ -691,15 +691,15 @@ class SqliteFS(FS):
# flist = fs.listdir('/', full=True,absolute=True,files_only=True)
# print flist
# fs.close()
-#
+#
#if __name__ == '__main__':
# run_tests("sqfs.sqlite")
# mount_windows("sqfs.sqlite", 'm')
-#
+#
# #fs.remove('/test1/test1.txt')
# #try:
# # f = fs.open('/test1/test1.txt', "r")
# #except ResourceNotFoundError:
# # print "Success : file doesnot exist"
# #fs.browse()
-# \ No newline at end of file
+#
diff --git a/fs/contrib/tahoelafs/__init__.py b/fs/contrib/tahoelafs/__init__.py
index c01dd5e..91858bd 100644
--- a/fs/contrib/tahoelafs/__init__.py
+++ b/fs/contrib/tahoelafs/__init__.py
@@ -1,415 +1,415 @@
-'''
-fs.contrib.tahoelafs
-====================
-
-This modules provides a PyFilesystem interface to the Tahoe Least Authority
-File System. Tahoe-LAFS is a distributed, encrypted, fault-tolerant storage
-system:
-
- http://tahoe-lafs.org/
-
-You will need access to a Tahoe-LAFS "web api" service.
-
-Example (it will use publicly available (but slow) Tahoe-LAFS cloud)::
-
- from fs.contrib.tahoelafs import TahoeLAFS, Connection
- dircap = TahoeLAFS.createdircap(webapi='http://insecure.tahoe-lafs.org')
- print "Your dircap (unique key to your storage directory) is", dircap
- print "Keep it safe!"
- fs = TahoeLAFS(dircap, autorun=False, webapi='http://insecure.tahoe-lafs.org')
- f = fs.open("foo.txt", "a")
- f.write('bar!')
- f.close()
- print "Now visit %s and enjoy :-)" % fs.getpathurl('foo.txt')
-
-When any problem occurred, you can turn on internal debugging messages::
-
- import logging
- l = logging.getLogger()
- l.setLevel(logging.DEBUG)
- l.addHandler(logging.StreamHandler(sys.stdout))
-
- ... your Python code using TahoeLAFS ...
-
-TODO:
-
- * unicode support
- * try network errors / bad happiness
- * exceptions
- * tests
- * sanitize all path types (., /)
- * support for extra large file uploads (poster module)
- * Possibility to block write until upload done (Tahoe mailing list)
- * Report something sane when Tahoe crashed/unavailable
- * solve failed unit tests (makedir_winner, ...)
- * file times
- * docs & author
- * python3 support
- * remove creating blank files (depends on FileUploadManager)
-
-TODO (Not TahoeLAFS specific tasks):
- * RemoteFileBuffer on the fly buffering support
- * RemoteFileBuffer unit tests
- * RemoteFileBuffer submit to trunk
- * Implement FileUploadManager + faking isfile/exists of just processing file
- * pyfilesystem docs is outdated (rename, movedir, ...)
-
-'''
-
-
-import stat as statinfo
-
-import logging
-from logging import DEBUG, INFO, ERROR, CRITICAL
-
-import fs
-import fs.errors as errors
-from fs.path import abspath, relpath, normpath, dirname, pathjoin
-from fs.base import FS, NullFile
-from fs import _thread_synchronize_default, SEEK_END
-from fs.remote import CacheFSMixin, RemoteFileBuffer
-from fs.base import fnmatch, NoDefaultMeta
-
-from util import TahoeUtil
-from connection import Connection
-
-from six import b
-
-logger = fs.getLogger('fs.tahoelafs')
-
-def _fix_path(func):
- """Method decorator for automatically normalising paths."""
- def wrapper(self, *args, **kwds):
- if len(args):
- args = list(args)
- args[0] = _fixpath(args[0])
- return func(self, *args, **kwds)
- return wrapper
-
-
-def _fixpath(path):
- """Normalize the given path."""
- return abspath(normpath(path))
-
-
-
-class _TahoeLAFS(FS):
- """FS providing raw access to a Tahoe-LAFS Filesystem.
-
- This class implements all the details of interacting with a Tahoe-backed
- filesystem, but you probably don't want to use it in practice. Use the
- TahoeLAFS class instead, which has some internal caching to improve
- performance.
- """
-
- _meta = { 'virtual' : False,
- 'read_only' : False,
- 'unicode_paths' : True,
- 'case_insensitive_paths' : False,
- 'network' : True
- }
-
-
- def __init__(self, dircap, largefilesize=10*1024*1024, webapi='http://127.0.0.1:3456'):
- '''Creates instance of TahoeLAFS.
-
- :param dircap: special hash allowing user to work with TahoeLAFS directory.
- :param largefilesize: - Create placeholder file for files larger than this treshold.
- Uploading and processing of large files can last extremely long (many hours),
- so placing this placeholder can help you to remember that upload is processing.
- Setting this to None will skip creating placeholder files for any uploads.
- '''
- self.dircap = dircap if not dircap.endswith('/') else dircap[:-1]
- self.largefilesize = largefilesize
- self.connection = Connection(webapi)
- self.tahoeutil = TahoeUtil(webapi)
- super(_TahoeLAFS, self).__init__(thread_synchronize=_thread_synchronize_default)
-
- def __str__(self):
- return "<TahoeLAFS: %s>" % self.dircap
-
- @classmethod
- def createdircap(cls, webapi='http://127.0.0.1:3456'):
- return TahoeUtil(webapi).createdircap()
-
- def getmeta(self,meta_name,default=NoDefaultMeta):
- if meta_name == "read_only":
- return self.dircap.startswith('URI:DIR2-RO')
- return super(_TahoeLAFS,self).getmeta(meta_name,default)
-
- @_fix_path
- def open(self, path, mode='r', **kwargs):
- self._log(INFO, 'Opening file %s in mode %s' % (path, mode))
- newfile = False
- if not self.exists(path):
- if 'w' in mode or 'a' in mode:
- newfile = True
- else:
- self._log(DEBUG, "File %s not found while opening for reads" % path)
- raise errors.ResourceNotFoundError(path)
- elif self.isdir(path):
- self._log(DEBUG, "Path %s is directory, not a file" % path)
- raise errors.ResourceInvalidError(path)
- elif 'w' in mode:
- newfile = True
-
- if newfile:
- self._log(DEBUG, 'Creating empty file %s' % path)
- if self.getmeta("read_only"):
- raise errors.UnsupportedError('read only filesystem')
- self.setcontents(path, b(''))
- handler = NullFile()
- else:
- self._log(DEBUG, 'Opening existing file %s for reading' % path)
- handler = self.getrange(path,0)
-
- return RemoteFileBuffer(self, path, mode, handler,
- write_on_flush=False)
-
- @_fix_path
- def desc(self, path):
- try:
- return self.getinfo(path)
- except:
- return ''
-
- @_fix_path
- def exists(self, path):
- try:
- self.getinfo(path)
- self._log(DEBUG, "Path %s exists" % path)
- return True
- except errors.ResourceNotFoundError:
- self._log(DEBUG, "Path %s does not exists" % path)
- return False
- except errors.ResourceInvalidError:
- self._log(DEBUG, "Path %s does not exists, probably misspelled URI" % path)
- return False
-
- @_fix_path
- def getsize(self, path):
- try:
- size = self.getinfo(path)['size']
- self._log(DEBUG, "Size of %s is %d" % (path, size))
- return size
- except errors.ResourceNotFoundError:
- return 0
-
- @_fix_path
- def isfile(self, path):
- try:
- isfile = (self.getinfo(path)['type'] == 'filenode')
- except errors.ResourceNotFoundError:
- #isfile = not path.endswith('/')
- isfile = False
- self._log(DEBUG, "Path %s is file: %d" % (path, isfile))
- return isfile
-
- @_fix_path
- def isdir(self, path):
- try:
- isdir = (self.getinfo(path)['type'] == 'dirnode')
- except errors.ResourceNotFoundError:
- isdir = False
- self._log(DEBUG, "Path %s is directory: %d" % (path, isdir))
- return isdir
-
-
- def listdir(self, *args, **kwargs):
- return [ item[0] for item in self.listdirinfo(*args, **kwargs) ]
-
- def listdirinfo(self, *args, **kwds):
- return list(self.ilistdirinfo(*args,**kwds))
-
- def ilistdir(self, *args, **kwds):
- for item in self.ilistdirinfo(*args,**kwds):
- yield item[0]
-
- @_fix_path
- def ilistdirinfo(self, path="/", wildcard=None, full=False, absolute=False,
- dirs_only=False, files_only=False):
- self._log(DEBUG, "Listing directory (listdirinfo) %s" % path)
-
- if dirs_only and files_only:
- raise ValueError("dirs_only and files_only can not both be True")
-
- for item in self.tahoeutil.list(self.dircap, path):
- if dirs_only and item['type'] == 'filenode':
- continue
- elif files_only and item['type'] == 'dirnode':
- continue
-
- if wildcard is not None:
- if isinstance(wildcard,basestring):
- if not fnmatch.fnmatch(item['name'], wildcard):
- continue
- else:
- if not wildcard(item['name']):
- continue
-
- if full:
- item_path = relpath(pathjoin(path, item['name']))
- elif absolute:
- item_path = abspath(pathjoin(path, item['name']))
- else:
- item_path = item['name']
-
- yield (item_path, item)
-
- @_fix_path
- def remove(self, path):
- self._log(INFO, 'Removing file %s' % path)
- if self.getmeta("read_only"):
- raise errors.UnsupportedError('read only filesystem')
-
- if not self.isfile(path):
- if not self.isdir(path):
- raise errors.ResourceNotFoundError(path)
- raise errors.ResourceInvalidError(path)
-
- try:
- self.tahoeutil.unlink(self.dircap, path)
- except Exception, e:
- raise errors.ResourceInvalidError(path)
-
- @_fix_path
- def removedir(self, path, recursive=False, force=False):
- self._log(INFO, "Removing directory %s" % path)
- if self.getmeta("read_only"):
- raise errors.UnsupportedError('read only filesystem')
- if not self.isdir(path):
- if not self.isfile(path):
- raise errors.ResourceNotFoundError(path)
- raise errors.ResourceInvalidError(path)
- if not force and self.listdir(path):
- raise errors.DirectoryNotEmptyError(path)
-
- self.tahoeutil.unlink(self.dircap, path)
-
- if recursive and path != '/':
- try:
- self.removedir(dirname(path), recursive=True)
- except errors.DirectoryNotEmptyError:
- pass
-
- @_fix_path
- def makedir(self, path, recursive=False, allow_recreate=False):
- self._log(INFO, "Creating directory %s" % path)
- if self.getmeta("read_only"):
- raise errors.UnsupportedError('read only filesystem')
- if self.exists(path):
- if not self.isdir(path):
- raise errors.ResourceInvalidError(path)
- if not allow_recreate:
- raise errors.DestinationExistsError(path)
- if not recursive and not self.exists(dirname(path)):
- raise errors.ParentDirectoryMissingError(path)
- self.tahoeutil.mkdir(self.dircap, path)
-
- def movedir(self, src, dst, overwrite=False):
- self.move(src, dst, overwrite=overwrite)
-
- def move(self, src, dst, overwrite=False):
- self._log(INFO, "Moving file from %s to %s" % (src, dst))
- if self.getmeta("read_only"):
- raise errors.UnsupportedError('read only filesystem')
- src = _fixpath(src)
- dst = _fixpath(dst)
- if not self.exists(dirname(dst)):
- raise errors.ParentDirectoryMissingError(dst)
- if not overwrite and self.exists(dst):
- raise errors.DestinationExistsError(dst)
- self.tahoeutil.move(self.dircap, src, dst)
-
- def rename(self, src, dst):
- self.move(src, dst)
-
- def copy(self, src, dst, overwrite=False, chunk_size=16384):
- if self.getmeta("read_only"):
- raise errors.UnsupportedError('read only filesystem')
- # FIXME: this is out of date; how to do native tahoe copy?
- # FIXME: Workaround because isfile() not exists on _TahoeLAFS
- FS.copy(self, src, dst, overwrite, chunk_size)
-
- def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
- if self.getmeta("read_only"):
- raise errors.UnsupportedError('read only filesystem')
- # FIXME: this is out of date; how to do native tahoe copy?
- # FIXME: Workaround because isfile() not exists on _TahoeLAFS
- FS.copydir(self, src, dst, overwrite, ignore_errors, chunk_size)
-
-
- def _log(self, level, message):
- if not logger.isEnabledFor(level): return
- logger.log(level, u'(%d) %s' % (id(self),
- unicode(message).encode('ASCII', 'replace')))
-
- @_fix_path
- def getpathurl(self, path, allow_none=False, webapi=None):
- '''
- Retrieve URL where the file/directory is stored
- '''
- if webapi == None:
- webapi = self.connection.webapi
- self._log(DEBUG, "Retrieving URL for %s over %s" % (path, webapi))
- path = self.tahoeutil.fixwinpath(path, False)
- return u"%s/uri/%s%s" % (webapi, self.dircap, path)
-
- @_fix_path
- def getrange(self, path, offset, length=None):
- return self.connection.get(u'/uri/%s%s' % (self.dircap, path),
- offset=offset, length=length)
-
- @_fix_path
- def setcontents(self, path, file, chunk_size=64*1024):
- self._log(INFO, 'Uploading file %s' % path)
- size=None
-
- if self.getmeta("read_only"):
- raise errors.UnsupportedError('read only filesystem')
-
- # Workaround for large files:
- # First create zero file placeholder, then
- # upload final content.
- if self.largefilesize != None and getattr(file, 'read', None):
- # As 'file' can be also a string, need to check,
- # if 'file' looks like duck. Sorry, file.
- file.seek(0, SEEK_END)
- size = file.tell()
- file.seek(0)
-
- if size > self.largefilesize:
- self.connection.put(u'/uri/%s%s' % (self.dircap, path),
- "PyFilesystem.TahoeLAFS: Upload started, final size %d" % size)
-
- self.connection.put(u'/uri/%s%s' % (self.dircap, path), file, size=size)
-
- @_fix_path
- def getinfo(self, path):
- self._log(INFO, 'Reading meta for %s' % path)
- info = self.tahoeutil.info(self.dircap, path)
- #import datetime
- #info['created_time'] = datetime.datetime.now()
- #info['modified_time'] = datetime.datetime.now()
- #info['accessed_time'] = datetime.datetime.now()
- if info['type'] == 'filenode':
- info["st_mode"] = 0x700 | statinfo.S_IFREG
- elif info['type'] == 'dirnode':
- info["st_mode"] = 0x700 | statinfo.S_IFDIR
- return info
-
-
-
-class TahoeLAFS(CacheFSMixin,_TahoeLAFS):
- """FS providing cached access to a Tahoe Filesystem.
-
- This class is the preferred means to access a Tahoe filesystem. It
- maintains an internal cache of recently-accessed metadata to speed
- up operations.
- """
-
- def __init__(self, *args, **kwds):
- kwds.setdefault("cache_timeout",60)
- super(TahoeLAFS,self).__init__(*args,**kwds)
-
-
+'''
+fs.contrib.tahoelafs
+====================
+
+This modules provides a PyFilesystem interface to the Tahoe Least Authority
+File System. Tahoe-LAFS is a distributed, encrypted, fault-tolerant storage
+system:
+
+ http://tahoe-lafs.org/
+
+You will need access to a Tahoe-LAFS "web api" service.
+
+Example (it will use publicly available (but slow) Tahoe-LAFS cloud)::
+
+ from fs.contrib.tahoelafs import TahoeLAFS, Connection
+ dircap = TahoeLAFS.createdircap(webapi='http://insecure.tahoe-lafs.org')
+ print "Your dircap (unique key to your storage directory) is", dircap
+ print "Keep it safe!"
+ fs = TahoeLAFS(dircap, autorun=False, webapi='http://insecure.tahoe-lafs.org')
+ f = fs.open("foo.txt", "a")
+ f.write('bar!')
+ f.close()
+ print "Now visit %s and enjoy :-)" % fs.getpathurl('foo.txt')
+
+When any problem occurred, you can turn on internal debugging messages::
+
+ import logging
+ l = logging.getLogger()
+ l.setLevel(logging.DEBUG)
+ l.addHandler(logging.StreamHandler(sys.stdout))
+
+ ... your Python code using TahoeLAFS ...
+
+TODO:
+
+ * unicode support
+ * try network errors / bad happiness
+ * exceptions
+ * tests
+ * sanitize all path types (., /)
+ * support for extra large file uploads (poster module)
+ * Possibility to block write until upload done (Tahoe mailing list)
+ * Report something sane when Tahoe crashed/unavailable
+ * solve failed unit tests (makedir_winner, ...)
+ * file times
+ * docs & author
+ * python3 support
+ * remove creating blank files (depends on FileUploadManager)
+
+TODO (Not TahoeLAFS specific tasks):
+ * RemoteFileBuffer on the fly buffering support
+ * RemoteFileBuffer unit tests
+ * RemoteFileBuffer submit to trunk
+ * Implement FileUploadManager + faking isfile/exists of just processing file
+ * pyfilesystem docs is outdated (rename, movedir, ...)
+
+'''
+
+
+import stat as statinfo
+
+import logging
+from logging import DEBUG, INFO, ERROR, CRITICAL
+
+import fs
+import fs.errors as errors
+from fs.path import abspath, relpath, normpath, dirname, pathjoin
+from fs.base import FS, NullFile
+from fs import _thread_synchronize_default, SEEK_END
+from fs.remote import CacheFSMixin, RemoteFileBuffer
+from fs.base import fnmatch, NoDefaultMeta
+
+from util import TahoeUtil
+from connection import Connection
+
+from six import b
+
+logger = fs.getLogger('fs.tahoelafs')
+
+def _fix_path(func):
+ """Method decorator for automatically normalising paths."""
+ def wrapper(self, *args, **kwds):
+ if len(args):
+ args = list(args)
+ args[0] = _fixpath(args[0])
+ return func(self, *args, **kwds)
+ return wrapper
+
+
+def _fixpath(path):
+ """Normalize the given path."""
+ return abspath(normpath(path))
+
+
+
+class _TahoeLAFS(FS):
+ """FS providing raw access to a Tahoe-LAFS Filesystem.
+
+ This class implements all the details of interacting with a Tahoe-backed
+ filesystem, but you probably don't want to use it in practice. Use the
+ TahoeLAFS class instead, which has some internal caching to improve
+ performance.
+ """
+
+ _meta = { 'virtual' : False,
+ 'read_only' : False,
+ 'unicode_paths' : True,
+ 'case_insensitive_paths' : False,
+ 'network' : True
+ }
+
+
+ def __init__(self, dircap, largefilesize=10*1024*1024, webapi='http://127.0.0.1:3456'):
+ '''Creates instance of TahoeLAFS.
+
+ :param dircap: special hash allowing user to work with TahoeLAFS directory.
+ :param largefilesize: - Create placeholder file for files larger than this treshold.
+ Uploading and processing of large files can last extremely long (many hours),
+ so placing this placeholder can help you to remember that upload is processing.
+ Setting this to None will skip creating placeholder files for any uploads.
+ '''
+ self.dircap = dircap if not dircap.endswith('/') else dircap[:-1]
+ self.largefilesize = largefilesize
+ self.connection = Connection(webapi)
+ self.tahoeutil = TahoeUtil(webapi)
+ super(_TahoeLAFS, self).__init__(thread_synchronize=_thread_synchronize_default)
+
+ def __str__(self):
+ return "<TahoeLAFS: %s>" % self.dircap
+
+ @classmethod
+ def createdircap(cls, webapi='http://127.0.0.1:3456'):
+ return TahoeUtil(webapi).createdircap()
+
+ def getmeta(self,meta_name,default=NoDefaultMeta):
+ if meta_name == "read_only":
+ return self.dircap.startswith('URI:DIR2-RO')
+ return super(_TahoeLAFS,self).getmeta(meta_name,default)
+
+ @_fix_path
+ def open(self, path, mode='r', **kwargs):
+ self._log(INFO, 'Opening file %s in mode %s' % (path, mode))
+ newfile = False
+ if not self.exists(path):
+ if 'w' in mode or 'a' in mode:
+ newfile = True
+ else:
+ self._log(DEBUG, "File %s not found while opening for reads" % path)
+ raise errors.ResourceNotFoundError(path)
+ elif self.isdir(path):
+ self._log(DEBUG, "Path %s is directory, not a file" % path)
+ raise errors.ResourceInvalidError(path)
+ elif 'w' in mode:
+ newfile = True
+
+ if newfile:
+ self._log(DEBUG, 'Creating empty file %s' % path)
+ if self.getmeta("read_only"):
+ raise errors.UnsupportedError('read only filesystem')
+ self.setcontents(path, b(''))
+ handler = NullFile()
+ else:
+ self._log(DEBUG, 'Opening existing file %s for reading' % path)
+ handler = self.getrange(path,0)
+
+ return RemoteFileBuffer(self, path, mode, handler,
+ write_on_flush=False)
+
+ @_fix_path
+ def desc(self, path):
+ try:
+ return self.getinfo(path)
+ except:
+ return ''
+
+ @_fix_path
+ def exists(self, path):
+ try:
+ self.getinfo(path)
+ self._log(DEBUG, "Path %s exists" % path)
+ return True
+ except errors.ResourceNotFoundError:
+ self._log(DEBUG, "Path %s does not exists" % path)
+ return False
+ except errors.ResourceInvalidError:
+ self._log(DEBUG, "Path %s does not exists, probably misspelled URI" % path)
+ return False
+
+ @_fix_path
+ def getsize(self, path):
+ try:
+ size = self.getinfo(path)['size']
+ self._log(DEBUG, "Size of %s is %d" % (path, size))
+ return size
+ except errors.ResourceNotFoundError:
+ return 0
+
+ @_fix_path
+ def isfile(self, path):
+ try:
+ isfile = (self.getinfo(path)['type'] == 'filenode')
+ except errors.ResourceNotFoundError:
+ #isfile = not path.endswith('/')
+ isfile = False
+ self._log(DEBUG, "Path %s is file: %d" % (path, isfile))
+ return isfile
+
+ @_fix_path
+ def isdir(self, path):
+ try:
+ isdir = (self.getinfo(path)['type'] == 'dirnode')
+ except errors.ResourceNotFoundError:
+ isdir = False
+ self._log(DEBUG, "Path %s is directory: %d" % (path, isdir))
+ return isdir
+
+
+ def listdir(self, *args, **kwargs):
+ return [ item[0] for item in self.listdirinfo(*args, **kwargs) ]
+
+ def listdirinfo(self, *args, **kwds):
+ return list(self.ilistdirinfo(*args,**kwds))
+
+ def ilistdir(self, *args, **kwds):
+ for item in self.ilistdirinfo(*args,**kwds):
+ yield item[0]
+
+ @_fix_path
+ def ilistdirinfo(self, path="/", wildcard=None, full=False, absolute=False,
+ dirs_only=False, files_only=False):
+ self._log(DEBUG, "Listing directory (listdirinfo) %s" % path)
+
+ if dirs_only and files_only:
+ raise ValueError("dirs_only and files_only can not both be True")
+
+ for item in self.tahoeutil.list(self.dircap, path):
+ if dirs_only and item['type'] == 'filenode':
+ continue
+ elif files_only and item['type'] == 'dirnode':
+ continue
+
+ if wildcard is not None:
+ if isinstance(wildcard,basestring):
+ if not fnmatch.fnmatch(item['name'], wildcard):
+ continue
+ else:
+ if not wildcard(item['name']):
+ continue
+
+ if full:
+ item_path = relpath(pathjoin(path, item['name']))
+ elif absolute:
+ item_path = abspath(pathjoin(path, item['name']))
+ else:
+ item_path = item['name']
+
+ yield (item_path, item)
+
+ @_fix_path
+ def remove(self, path):
+ self._log(INFO, 'Removing file %s' % path)
+ if self.getmeta("read_only"):
+ raise errors.UnsupportedError('read only filesystem')
+
+ if not self.isfile(path):
+ if not self.isdir(path):
+ raise errors.ResourceNotFoundError(path)
+ raise errors.ResourceInvalidError(path)
+
+ try:
+ self.tahoeutil.unlink(self.dircap, path)
+ except Exception, e:
+ raise errors.ResourceInvalidError(path)
+
+ @_fix_path
+ def removedir(self, path, recursive=False, force=False):
+ self._log(INFO, "Removing directory %s" % path)
+ if self.getmeta("read_only"):
+ raise errors.UnsupportedError('read only filesystem')
+ if not self.isdir(path):
+ if not self.isfile(path):
+ raise errors.ResourceNotFoundError(path)
+ raise errors.ResourceInvalidError(path)
+ if not force and self.listdir(path):
+ raise errors.DirectoryNotEmptyError(path)
+
+ self.tahoeutil.unlink(self.dircap, path)
+
+ if recursive and path != '/':
+ try:
+ self.removedir(dirname(path), recursive=True)
+ except errors.DirectoryNotEmptyError:
+ pass
+
+ @_fix_path
+ def makedir(self, path, recursive=False, allow_recreate=False):
+ self._log(INFO, "Creating directory %s" % path)
+ if self.getmeta("read_only"):
+ raise errors.UnsupportedError('read only filesystem')
+ if self.exists(path):
+ if not self.isdir(path):
+ raise errors.ResourceInvalidError(path)
+ if not allow_recreate:
+ raise errors.DestinationExistsError(path)
+ if not recursive and not self.exists(dirname(path)):
+ raise errors.ParentDirectoryMissingError(path)
+ self.tahoeutil.mkdir(self.dircap, path)
+
+ def movedir(self, src, dst, overwrite=False):
+ self.move(src, dst, overwrite=overwrite)
+
+ def move(self, src, dst, overwrite=False):
+ self._log(INFO, "Moving file from %s to %s" % (src, dst))
+ if self.getmeta("read_only"):
+ raise errors.UnsupportedError('read only filesystem')
+ src = _fixpath(src)
+ dst = _fixpath(dst)
+ if not self.exists(dirname(dst)):
+ raise errors.ParentDirectoryMissingError(dst)
+ if not overwrite and self.exists(dst):
+ raise errors.DestinationExistsError(dst)
+ self.tahoeutil.move(self.dircap, src, dst)
+
+ def rename(self, src, dst):
+ self.move(src, dst)
+
+ def copy(self, src, dst, overwrite=False, chunk_size=16384):
+ if self.getmeta("read_only"):
+ raise errors.UnsupportedError('read only filesystem')
+ # FIXME: this is out of date; how to do native tahoe copy?
+ # FIXME: Workaround because isfile() not exists on _TahoeLAFS
+ FS.copy(self, src, dst, overwrite, chunk_size)
+
+ def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
+ if self.getmeta("read_only"):
+ raise errors.UnsupportedError('read only filesystem')
+ # FIXME: this is out of date; how to do native tahoe copy?
+ # FIXME: Workaround because isfile() not exists on _TahoeLAFS
+ FS.copydir(self, src, dst, overwrite, ignore_errors, chunk_size)
+
+
+ def _log(self, level, message):
+ if not logger.isEnabledFor(level): return
+ logger.log(level, u'(%d) %s' % (id(self),
+ unicode(message).encode('ASCII', 'replace')))
+
+ @_fix_path
+ def getpathurl(self, path, allow_none=False, webapi=None):
+ '''
+ Retrieve URL where the file/directory is stored
+ '''
+ if webapi == None:
+ webapi = self.connection.webapi
+ self._log(DEBUG, "Retrieving URL for %s over %s" % (path, webapi))
+ path = self.tahoeutil.fixwinpath(path, False)
+ return u"%s/uri/%s%s" % (webapi, self.dircap, path)
+
+ @_fix_path
+ def getrange(self, path, offset, length=None):
+ return self.connection.get(u'/uri/%s%s' % (self.dircap, path),
+ offset=offset, length=length)
+
+ @_fix_path
+ def setcontents(self, path, file, chunk_size=64*1024):
+ self._log(INFO, 'Uploading file %s' % path)
+ size=None
+
+ if self.getmeta("read_only"):
+ raise errors.UnsupportedError('read only filesystem')
+
+ # Workaround for large files:
+ # First create zero file placeholder, then
+ # upload final content.
+ if self.largefilesize != None and getattr(file, 'read', None):
+ # As 'file' can be also a string, need to check,
+ # if 'file' looks like duck. Sorry, file.
+ file.seek(0, SEEK_END)
+ size = file.tell()
+ file.seek(0)
+
+ if size > self.largefilesize:
+ self.connection.put(u'/uri/%s%s' % (self.dircap, path),
+ "PyFilesystem.TahoeLAFS: Upload started, final size %d" % size)
+
+ self.connection.put(u'/uri/%s%s' % (self.dircap, path), file, size=size)
+
+ @_fix_path
+ def getinfo(self, path):
+ self._log(INFO, 'Reading meta for %s' % path)
+ info = self.tahoeutil.info(self.dircap, path)
+ #import datetime
+ #info['created_time'] = datetime.datetime.now()
+ #info['modified_time'] = datetime.datetime.now()
+ #info['accessed_time'] = datetime.datetime.now()
+ if info['type'] == 'filenode':
+ info["st_mode"] = 0x700 | statinfo.S_IFREG
+ elif info['type'] == 'dirnode':
+ info["st_mode"] = 0x700 | statinfo.S_IFDIR
+ return info
+
+
+
+class TahoeLAFS(CacheFSMixin,_TahoeLAFS):
+ """FS providing cached access to a Tahoe Filesystem.
+
+ This class is the preferred means to access a Tahoe filesystem. It
+ maintains an internal cache of recently-accessed metadata to speed
+ up operations.
+ """
+
+ def __init__(self, *args, **kwds):
+ kwds.setdefault("cache_timeout",60)
+ super(TahoeLAFS,self).__init__(*args,**kwds)
+
+
diff --git a/fs/contrib/tahoelafs/connection.py b/fs/contrib/tahoelafs/connection.py
index e3df28a..448919b 100644
--- a/fs/contrib/tahoelafs/connection.py
+++ b/fs/contrib/tahoelafs/connection.py
@@ -17,12 +17,12 @@ class PutRequest(Request):
def __init__(self, *args, **kwargs):
self.get_method = lambda: u'PUT'
Request.__init__(self, *args, **kwargs)
-
+
class DeleteRequest(Request):
def __init__(self, *args, **kwargs):
self.get_method = lambda: u'DELETE'
Request.__init__(self, *args, **kwargs)
-
+
class Connection:
def __init__(self, webapi):
self.webapi = webapi
@@ -37,13 +37,13 @@ class Connection:
size = len(f)
elif getattr(f, 'read', None):
if size == None:
- # When size is already known, skip this
+ # When size is already known, skip this
f.seek(0, SEEK_END)
size = f.tell()
f.seek(0)
else:
raise errors.UnsupportedError("Cannot handle type %s" % type(f))
-
+
headers = {'Content-Length': size}
headers.update(self.headers)
return headers
@@ -59,7 +59,7 @@ class Connection:
if params:
return u"%s?%s" % (q, self._urlencode(params))
return q
-
+
def _urlopen(self, req):
try:
return urlopen(req)
@@ -74,17 +74,17 @@ class Connection:
# Standard not found
raise errors.ResourceNotFoundError(e.fp.read())
raise errors.ResourceInvalidError(e.fp.read())
-
+
def post(self, path, data={}, params={}):
data = self._urlencode(data)
path = self._quotepath(path, params)
req = Request(''.join([self.webapi, path]), data, headers=self.headers)
return self._urlopen(req)
-
+
def get(self, path, data={}, offset=None, length=None):
data = self._urlencode(data)
path = self._quotepath(path)
- if data:
+ if data:
path = u'?'.join([path, data])
headers = {}
@@ -95,17 +95,17 @@ class Connection:
(int(offset), int(offset+length))
else:
headers['Range'] = 'bytes=%d-' % int(offset)
-
+
req = Request(''.join([self.webapi, path]), headers=headers)
return self._urlopen(req)
def put(self, path, data, size=None, params={}):
path = self._quotepath(path, params)
headers = self._get_headers(data, size=size)
- req = PutRequest(''.join([self.webapi, path]), data, headers=headers)
+ req = PutRequest(''.join([self.webapi, path]), data, headers=headers)
return self._urlopen(req)
-
- def delete(self, path, data={}):
+
+ def delete(self, path, data={}):
path = self._quotepath(path)
req = DeleteRequest(''.join([self.webapi, path]), data, headers=self.headers)
return self._urlopen(req)
diff --git a/fs/contrib/tahoelafs/test_tahoelafs.py b/fs/contrib/tahoelafs/test_tahoelafs.py
index 61a28e5..5cb6ca7 100644
--- a/fs/contrib/tahoelafs/test_tahoelafs.py
+++ b/fs/contrib/tahoelafs/test_tahoelafs.py
@@ -1,52 +1,52 @@
-#!/usr/bin/python
-"""
- Test the TahoeLAFS
-
- @author: Marek Palatinus <marek@palatinus.cz>
-"""
-
-import sys
-import logging
-import unittest
-
-from fs.base import FS
-import fs.errors as errors
-from fs.tests import FSTestCases, ThreadingTestCases
-from fs.contrib.tahoelafs import TahoeLAFS, Connection
-
-logging.getLogger().setLevel(logging.DEBUG)
-logging.getLogger('fs.tahoelafs').addHandler(logging.StreamHandler(sys.stdout))
-
-WEBAPI = 'http://insecure.tahoe-lafs.org'
-
-
-# The public grid is too slow for threading testcases, disabling for now...
-class TestTahoeLAFS(unittest.TestCase,FSTestCases):#,ThreadingTestCases):
-
- # Disabled by default because it takes a *really* long time.
- __test__ = False
-
- def setUp(self):
- self.dircap = TahoeLAFS.createdircap(WEBAPI)
- self.fs = TahoeLAFS(self.dircap, cache_timeout=0, webapi=WEBAPI)
-
- def tearDown(self):
- self.fs.close()
-
- def test_dircap(self):
- # Is dircap in correct format?
- self.assert_(self.dircap.startswith('URI:DIR2:') and len(self.dircap) > 50)
-
- def test_concurrent_copydir(self):
- # makedir() on TahoeLAFS is currently not atomic
- pass
-
- def test_makedir_winner(self):
- # makedir() on TahoeLAFS is currently not atomic
- pass
-
- def test_big_file(self):
- pass
-
-if __name__ == '__main__':
- unittest.main()
+#!/usr/bin/python
+"""
+ Test the TahoeLAFS
+
+ @author: Marek Palatinus <marek@palatinus.cz>
+"""
+
+import sys
+import logging
+import unittest
+
+from fs.base import FS
+import fs.errors as errors
+from fs.tests import FSTestCases, ThreadingTestCases
+from fs.contrib.tahoelafs import TahoeLAFS, Connection
+
+logging.getLogger().setLevel(logging.DEBUG)
+logging.getLogger('fs.tahoelafs').addHandler(logging.StreamHandler(sys.stdout))
+
+WEBAPI = 'http://insecure.tahoe-lafs.org'
+
+
+# The public grid is too slow for threading testcases, disabling for now...
+class TestTahoeLAFS(unittest.TestCase,FSTestCases):#,ThreadingTestCases):
+
+ # Disabled by default because it takes a *really* long time.
+ __test__ = False
+
+ def setUp(self):
+ self.dircap = TahoeLAFS.createdircap(WEBAPI)
+ self.fs = TahoeLAFS(self.dircap, cache_timeout=0, webapi=WEBAPI)
+
+ def tearDown(self):
+ self.fs.close()
+
+ def test_dircap(self):
+ # Is dircap in correct format?
+ self.assert_(self.dircap.startswith('URI:DIR2:') and len(self.dircap) > 50)
+
+ def test_concurrent_copydir(self):
+ # makedir() on TahoeLAFS is currently not atomic
+ pass
+
+ def test_makedir_winner(self):
+ # makedir() on TahoeLAFS is currently not atomic
+ pass
+
+ def test_big_file(self):
+ pass
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/fs/contrib/tahoelafs/util.py b/fs/contrib/tahoelafs/util.py
index fda8a2b..9525f6d 100644
--- a/fs/contrib/tahoelafs/util.py
+++ b/fs/contrib/tahoelafs/util.py
@@ -1,140 +1,140 @@
-'''
-Created on 25.9.2010
-
-@author: marekp
-'''
-
-import sys
-import platform
-import stat as statinfo
-
-import fs.errors as errors
-from fs.path import pathsplit
-try:
- # For non-CPython or older CPython versions.
- # Simplejson also comes with C speedup module which
- # is not in standard CPython >=2.6 library.
- import simplejson as json
-except ImportError:
- try:
- import json
- except ImportError:
- print "simplejson (http://pypi.python.org/pypi/simplejson/) required"
- raise
-
-from .connection import Connection
-
-python3 = int(platform.python_version_tuple()[0]) > 2
-
-if python3:
- from urllib.error import HTTPError
-else:
- from urllib2 import HTTPError
-
-class TahoeUtil:
- def __init__(self, webapi):
- self.connection = Connection(webapi)
-
- def createdircap(self):
- return self.connection.post(u'/uri', params={u't': u'mkdir'}).read()
-
- def unlink(self, dircap, path=None):
- path = self.fixwinpath(path, False)
- self.connection.delete(u'/uri/%s%s' % (dircap, path))
-
- def info(self, dircap, path):
- path = self.fixwinpath(path, False)
- meta = json.load(self.connection.get(u'/uri/%s%s' % (dircap, path), {u't': u'json'}))
- return self._info(path, meta)
-
- def fixwinpath(self, path, direction=True):
- '''
- No, Tahoe really does not support file streams...
- This is ugly hack, because it is not Tahoe-specific.
- Should be move to middleware if will be any.
- '''
- if platform.system() != 'Windows':
- return path
-
- if direction and ':' in path:
- path = path.replace(':', '__colon__')
- elif not direction and '__colon__' in path:
- path = path.replace('__colon__', ':')
- return path
-
- def _info(self, path, data):
- if isinstance(data, list):
- type = data[0]
- data = data[1]
- elif isinstance(data, dict):
- type = data['type']
- else:
- raise errors.ResourceInvalidError('Metadata in unknown format!')
-
- if type == 'unknown':
- raise errors.ResourceNotFoundError(path)
-
- info = {'name': unicode(self.fixwinpath(path, True)),
- 'type': type,
- 'size': data.get('size', 0),
- 'ctime': None,
- 'uri': data.get('rw_uri', data.get('ro_uri'))}
- if 'metadata' in data:
- info['ctime'] = data['metadata'].get('ctime')
-
- if info['type'] == 'dirnode':
- info['st_mode'] = 0777 | statinfo.S_IFDIR
- else:
- info['st_mode'] = 0644
-
- return info
-
- def list(self, dircap, path=None):
- path = self.fixwinpath(path, False)
-
- data = json.load(self.connection.get(u'/uri/%s%s' % (dircap, path), {u't': u'json'}))
-
- if len(data) < 2 or data[0] != 'dirnode':
- raise errors.ResourceInvalidError('Metadata in unknown format!')
-
- data = data[1]['children']
- for i in data.keys():
- x = self._info(i, data[i])
- yield x
-
- def mkdir(self, dircap, path):
- path = self.fixwinpath(path, False)
- path = pathsplit(path)
-
- self.connection.post(u"/uri/%s%s" % (dircap, path[0]), data={u't': u'mkdir', u'name': path[1]})
-
- def move(self, dircap, src, dst):
- if src == '/' or dst == '/':
- raise errors.UnsupportedError("Too dangerous operation, aborting")
-
- src = self.fixwinpath(src, False)
- dst = self.fixwinpath(dst, False)
-
- src_tuple = pathsplit(src)
- dst_tuple = pathsplit(dst)
-
- if src_tuple[0] == dst_tuple[0]:
- # Move inside one directory
- self.connection.post(u"/uri/%s%s" % (dircap, src_tuple[0]), data={u't': u'rename',
- u'from_name': src_tuple[1], u'to_name': dst_tuple[1]})
- return
-
- # Move to different directory. Firstly create link on dst, then remove from src
- try:
- self.info(dircap, dst)
- except errors.ResourceNotFoundError:
- pass
- else:
- self.unlink(dircap, dst)
-
- uri = self.info(dircap, src)['uri']
- self.connection.put(u"/uri/%s%s" % (dircap, dst), data=uri, params={u't': u'uri'})
- if uri != self.info(dircap, dst)['uri']:
- raise errors.OperationFailedError('Move failed')
-
- self.unlink(dircap, src)
+'''
+Created on 25.9.2010
+
+@author: marekp
+'''
+
+import sys
+import platform
+import stat as statinfo
+
+import fs.errors as errors
+from fs.path import pathsplit
+try:
+ # For non-CPython or older CPython versions.
+ # Simplejson also comes with C speedup module which
+ # is not in standard CPython >=2.6 library.
+ import simplejson as json
+except ImportError:
+ try:
+ import json
+ except ImportError:
+ print "simplejson (http://pypi.python.org/pypi/simplejson/) required"
+ raise
+
+from .connection import Connection
+
+python3 = int(platform.python_version_tuple()[0]) > 2
+
+if python3:
+ from urllib.error import HTTPError
+else:
+ from urllib2 import HTTPError
+
+class TahoeUtil:
+ def __init__(self, webapi):
+ self.connection = Connection(webapi)
+
+ def createdircap(self):
+ return self.connection.post(u'/uri', params={u't': u'mkdir'}).read()
+
+ def unlink(self, dircap, path=None):
+ path = self.fixwinpath(path, False)
+ self.connection.delete(u'/uri/%s%s' % (dircap, path))
+
+ def info(self, dircap, path):
+ path = self.fixwinpath(path, False)
+ meta = json.load(self.connection.get(u'/uri/%s%s' % (dircap, path), {u't': u'json'}))
+ return self._info(path, meta)
+
+ def fixwinpath(self, path, direction=True):
+ '''
+ No, Tahoe really does not support file streams...
+ This is ugly hack, because it is not Tahoe-specific.
+ Should be move to middleware if will be any.
+ '''
+ if platform.system() != 'Windows':
+ return path
+
+ if direction and ':' in path:
+ path = path.replace(':', '__colon__')
+ elif not direction and '__colon__' in path:
+ path = path.replace('__colon__', ':')
+ return path
+
+ def _info(self, path, data):
+ if isinstance(data, list):
+ type = data[0]
+ data = data[1]
+ elif isinstance(data, dict):
+ type = data['type']
+ else:
+ raise errors.ResourceInvalidError('Metadata in unknown format!')
+
+ if type == 'unknown':
+ raise errors.ResourceNotFoundError(path)
+
+ info = {'name': unicode(self.fixwinpath(path, True)),
+ 'type': type,
+ 'size': data.get('size', 0),
+ 'ctime': None,
+ 'uri': data.get('rw_uri', data.get('ro_uri'))}
+ if 'metadata' in data:
+ info['ctime'] = data['metadata'].get('ctime')
+
+ if info['type'] == 'dirnode':
+ info['st_mode'] = 0777 | statinfo.S_IFDIR
+ else:
+ info['st_mode'] = 0644
+
+ return info
+
+ def list(self, dircap, path=None):
+ path = self.fixwinpath(path, False)
+
+ data = json.load(self.connection.get(u'/uri/%s%s' % (dircap, path), {u't': u'json'}))
+
+ if len(data) < 2 or data[0] != 'dirnode':
+ raise errors.ResourceInvalidError('Metadata in unknown format!')
+
+ data = data[1]['children']
+ for i in data.keys():
+ x = self._info(i, data[i])
+ yield x
+
+ def mkdir(self, dircap, path):
+ path = self.fixwinpath(path, False)
+ path = pathsplit(path)
+
+ self.connection.post(u"/uri/%s%s" % (dircap, path[0]), data={u't': u'mkdir', u'name': path[1]})
+
+ def move(self, dircap, src, dst):
+ if src == '/' or dst == '/':
+ raise errors.UnsupportedError("Too dangerous operation, aborting")
+
+ src = self.fixwinpath(src, False)
+ dst = self.fixwinpath(dst, False)
+
+ src_tuple = pathsplit(src)
+ dst_tuple = pathsplit(dst)
+
+ if src_tuple[0] == dst_tuple[0]:
+ # Move inside one directory
+ self.connection.post(u"/uri/%s%s" % (dircap, src_tuple[0]), data={u't': u'rename',
+ u'from_name': src_tuple[1], u'to_name': dst_tuple[1]})
+ return
+
+ # Move to different directory. Firstly create link on dst, then remove from src
+ try:
+ self.info(dircap, dst)
+ except errors.ResourceNotFoundError:
+ pass
+ else:
+ self.unlink(dircap, dst)
+
+ uri = self.info(dircap, src)['uri']
+ self.connection.put(u"/uri/%s%s" % (dircap, dst), data=uri, params={u't': u'uri'})
+ if uri != self.info(dircap, dst)['uri']:
+ raise errors.OperationFailedError('Move failed')
+
+ self.unlink(dircap, src)
diff --git a/fs/expose/django_storage.py b/fs/expose/django_storage.py
index 9a84b2a..636c3d5 100644
--- a/fs/expose/django_storage.py
+++ b/fs/expose/django_storage.py
@@ -28,7 +28,7 @@ class FSStorage(Storage):
"""
:param fs: an FS object
:param base_url: The url to prepend to the path
-
+
"""
if fs is None:
fs = settings.DEFAULT_FILE_STORAGE_FS
diff --git a/fs/expose/fuse/fuse.py b/fs/expose/fuse/fuse.py
index e15a5d5..ed1fbc4 100644
--- a/fs/expose/fuse/fuse.py
+++ b/fs/expose/fuse/fuse.py
@@ -113,7 +113,7 @@ elif _system == 'Linux':
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
-
+
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
@@ -294,12 +294,12 @@ class FUSE(object):
"""This class is the lower level interface and should not be subclassed
under normal use. Its methods are called by fuse.
Assumes API version 2.6 or later."""
-
+
def __init__(self, operations, mountpoint, raw_fi=False, **kwargs):
"""Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc."""
-
+
self.operations = operations
self.raw_fi = raw_fi
args = ['fuse']
@@ -315,7 +315,7 @@ class FUSE(object):
for key, val in kwargs.items()))
args.append(mountpoint)
argv = (c_char_p * len(args))(*args)
-
+
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
@@ -326,7 +326,7 @@ class FUSE(object):
del self.operations # Invoke the destructor
if err:
raise RuntimeError(err)
-
+
def _wrapper_(self, func, *args, **kwargs):
"""Decorator for the methods that follow"""
try:
@@ -336,40 +336,40 @@ class FUSE(object):
except:
print_exc()
return -EFAULT
-
+
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
-
+
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path)
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
-
+
def mknod(self, path, mode, dev):
return self.operations('mknod', path, mode, dev)
-
+
def mkdir(self, path, mode):
return self.operations('mkdir', path, mode)
-
+
def unlink(self, path):
return self.operations('unlink', path)
-
+
def rmdir(self, path):
return self.operations('rmdir', path)
-
+
def symlink(self, source, target):
return self.operations('symlink', target, source)
-
+
def rename(self, old, new):
return self.operations('rename', old, new)
-
+
def link(self, source, target):
return self.operations('link', target, source)
-
+
def chmod(self, path, mode):
return self.operations('chmod', path, mode)
-
+
def chown(self, path, uid, gid):
# Check if any of the arguments is a -1 that has overflowed
if c_uid_t(uid + 1).value == 0:
@@ -377,10 +377,10 @@ class FUSE(object):
if c_gid_t(gid + 1).value == 0:
gid = -1
return self.operations('chown', path, uid, gid)
-
+
def truncate(self, path, length):
return self.operations('truncate', path, length)
-
+
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
@@ -388,7 +388,7 @@ class FUSE(object):
else:
fi.fh = self.operations('open', path, fi.flags)
return 0
-
+
def read(self, path, buf, size, offset, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
ret = self.operations('read', path, size, offset, fh)
@@ -397,12 +397,12 @@ class FUSE(object):
data = create_string_buffer(ret[:size], size)
memmove(buf, data, size)
return size
-
+
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('write', path, data, offset, fh)
-
+
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path)
@@ -410,23 +410,23 @@ class FUSE(object):
if hasattr(stv, key):
setattr(stv, key, val)
return 0
-
+
def flush(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('flush', path, fh)
-
+
def release(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('release', path, fh)
-
+
def fsync(self, path, datasync, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('fsync', path, datasync, fh)
-
+
def setxattr(self, path, name, value, size, options, *args):
data = string_at(value, size)
return self.operations('setxattr', path, name, data, options, *args)
-
+
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path, name, *args)
retsize = len(ret)
@@ -436,7 +436,7 @@ class FUSE(object):
return -ERANGE
memmove(value, buf, retsize)
return retsize
-
+
def listxattr(self, path, namebuf, size):
ret = self.operations('listxattr', path)
buf = create_string_buffer('\x00'.join(ret)) if ret else ''
@@ -446,15 +446,15 @@ class FUSE(object):
return -ERANGE
memmove(namebuf, buf, bufsize)
return bufsize
-
+
def removexattr(self, path, name):
return self.operations('removexattr', path, name)
-
+
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir', path)
return 0
-
+
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path, fip.contents.fh):
@@ -470,24 +470,24 @@ class FUSE(object):
if filler(buf, name, st, offset) != 0:
break
return 0
-
+
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path, fip.contents.fh)
-
+
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path, datasync, fip.contents.fh)
-
+
def init(self, conn):
return self.operations('init', '/')
-
+
def destroy(self, private_data):
return self.operations('destroy', '/')
-
+
def access(self, path, amode):
return self.operations('access', path, amode)
-
+
def create(self, path, mode, fip):
fi = fip.contents
if self.raw_fi:
@@ -495,11 +495,11 @@ class FUSE(object):
else:
fi.fh = self.operations('create', path, mode)
return 0
-
+
def ftruncate(self, path, length, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('truncate', path, length, fh)
-
+
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
@@ -507,11 +507,11 @@ class FUSE(object):
attrs = self.operations('getattr', path, fh)
set_st_attrs(st, attrs)
return 0
-
+
def lock(self, path, fip, cmd, lock):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('lock', path, fh, cmd, lock)
-
+
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
@@ -520,7 +520,7 @@ class FUSE(object):
else:
times = None
return self.operations('utimens', path, times)
-
+
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path, blocksize, idx)
@@ -529,46 +529,46 @@ class Operations(object):
"""This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise a FuseOSError exception
on error.
-
+
When in doubt of what an operation should do, check the FUSE header
file or the corresponding system call man page."""
-
+
def __call__(self, op, *args):
if not hasattr(self, op):
raise FuseOSError(EFAULT)
return getattr(self, op)(*args)
-
+
def access(self, path, amode):
return 0
-
+
bmap = None
-
+
def chmod(self, path, mode):
raise FuseOSError(EROFS)
-
+
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
-
+
def create(self, path, mode, fi=None):
"""When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0."""
raise FuseOSError(EROFS)
-
+
def destroy(self, path):
"""Called on filesystem destruction. Path is always /"""
pass
-
+
def flush(self, path, fh):
return 0
-
+
def fsync(self, path, datasync, fh):
return 0
-
+
def fsyncdir(self, path, datasync, fh):
return 0
-
+
def getattr(self, path, fh=None):
"""Returns a dictionary with keys identical to the stat C structure
of stat(2).
@@ -576,33 +576,33 @@ class Operations(object):
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories."""
-
+
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
-
+
def getxattr(self, path, name, position=0):
raise FuseOSError(ENOTSUP)
-
+
def init(self, path):
"""Called on filesystem initialization. Path is always /
Use it instead of __init__ if you start threads on initialization."""
pass
-
+
def link(self, target, source):
raise FuseOSError(EROFS)
-
+
def listxattr(self, path):
return []
-
+
lock = None
-
+
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
-
+
def mknod(self, path, mode, dev):
raise FuseOSError(EROFS)
-
+
def open(self, path, flags):
"""When raw_fi is False (default case), open should return a numerical
file handle.
@@ -610,60 +610,60 @@ class Operations(object):
open(self, path, fi)
and the file handle should be set directly."""
return 0
-
+
def opendir(self, path):
"""Returns a numerical file handle."""
return 0
-
+
def read(self, path, size, offset, fh):
"""Returns a string containing the data requested."""
raise FuseOSError(EIO)
-
+
def readdir(self, path, fh):
"""Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr."""
return ['.', '..']
-
+
def readlink(self, path):
raise FuseOSError(ENOENT)
-
+
def release(self, path, fh):
return 0
-
+
def releasedir(self, path, fh):
return 0
-
+
def removexattr(self, path, name):
raise FuseOSError(ENOTSUP)
-
+
def rename(self, old, new):
raise FuseOSError(EROFS)
-
+
def rmdir(self, path):
raise FuseOSError(EROFS)
-
+
def setxattr(self, path, name, value, options, position=0):
raise FuseOSError(ENOTSUP)
-
+
def statfs(self, path):
"""Returns a dictionary with keys identical to the statvfs C structure
of statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512)."""
return {}
-
+
def symlink(self, target, source):
raise FuseOSError(EROFS)
-
+
def truncate(self, path, length, fh=None):
raise FuseOSError(EROFS)
-
+
def unlink(self, path):
raise FuseOSError(EROFS)
-
+
def utimens(self, path, times=None):
"""Times is a (atime, mtime) tuple. If None use current time."""
return 0
-
+
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
diff --git a/fs/expose/fuse/fuse_ctypes.py b/fs/expose/fuse/fuse_ctypes.py
index 375a819..4e247f0 100644
--- a/fs/expose/fuse/fuse_ctypes.py
+++ b/fs/expose/fuse/fuse_ctypes.py
@@ -1,9 +1,9 @@
# Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com>
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -104,7 +104,7 @@ if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
-
+
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
@@ -117,7 +117,7 @@ elif _system == 'Linux':
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
-
+
_machine = machine()
if _machine == 'x86_64':
c_stat._fields_ = [
@@ -296,12 +296,12 @@ class FUSE(object):
"""This class is the lower level interface and should not be subclassed
under normal use. Its methods are called by fuse.
Assumes API version 2.6 or later."""
-
+
def __init__(self, operations, mountpoint, raw_fi=False, **kwargs):
"""Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc."""
-
+
self.operations = operations
self.raw_fi = raw_fi
args = ['fuse']
@@ -317,7 +317,7 @@ class FUSE(object):
for key, val in kwargs.items()))
args.append(mountpoint)
argv = (c_char_p * len(args))(*args)
-
+
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
@@ -326,7 +326,7 @@ class FUSE(object):
_libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
del self.operations # Invoke the destructor
-
+
def _wrapper_(self, func, *args, **kwargs):
"""Decorator for the methods that follow"""
try:
@@ -336,46 +336,46 @@ class FUSE(object):
except:
print_exc()
return -EFAULT
-
+
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
-
+
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path)
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
-
+
def mknod(self, path, mode, dev):
return self.operations('mknod', path, mode, dev)
-
+
def mkdir(self, path, mode):
return self.operations('mkdir', path, mode)
-
+
def unlink(self, path):
return self.operations('unlink', path)
-
+
def rmdir(self, path):
return self.operations('rmdir', path)
-
+
def symlink(self, source, target):
return self.operations('symlink', target, source)
-
+
def rename(self, old, new):
return self.operations('rename', old, new)
-
+
def link(self, source, target):
return self.operations('link', target, source)
-
+
def chmod(self, path, mode):
return self.operations('chmod', path, mode)
-
+
def chown(self, path, uid, gid):
return self.operations('chown', path, uid, gid)
-
+
def truncate(self, path, length):
return self.operations('truncate', path, length)
-
+
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
@@ -383,7 +383,7 @@ class FUSE(object):
else:
fi.fh = self.operations('open', path, fi.flags)
return 0
-
+
def read(self, path, buf, size, offset, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
ret = self.operations('read', path, size, offset, fh)
@@ -391,12 +391,12 @@ class FUSE(object):
strbuf = create_string_buffer(ret)
memmove(buf, strbuf, len(strbuf))
return len(ret)
-
+
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('write', path, data, offset, fh)
-
+
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path)
@@ -404,23 +404,23 @@ class FUSE(object):
if hasattr(stv, key):
setattr(stv, key, val)
return 0
-
+
def flush(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('flush', path, fh)
-
+
def release(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('release', path, fh)
-
+
def fsync(self, path, datasync, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('fsync', path, datasync, fh)
-
+
def setxattr(self, path, name, value, size, options, *args):
data = string_at(value, size)
return self.operations('setxattr', path, name, data, options, *args)
-
+
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path, name, *args)
retsize = len(ret)
@@ -430,7 +430,7 @@ class FUSE(object):
return -ERANGE
memmove(value, buf, retsize)
return retsize
-
+
def listxattr(self, path, namebuf, size):
ret = self.operations('listxattr', path)
if ret:
@@ -443,15 +443,15 @@ class FUSE(object):
return -ERANGE
memmove(namebuf, buf, bufsize)
return bufsize
-
+
def removexattr(self, path, name):
return self.operations('removexattr', path, name)
-
+
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir', path)
return 0
-
+
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path, fip.contents.fh):
@@ -467,24 +467,24 @@ class FUSE(object):
if filler(buf, name, st, offset) != 0:
break
return 0
-
+
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path, fip.contents.fh)
-
+
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path, datasync, fip.contents.fh)
-
+
def init(self, conn):
return self.operations('init', '/')
-
+
def destroy(self, private_data):
return self.operations('destroy', '/')
-
+
def access(self, path, amode):
return self.operations('access', path, amode)
-
+
def create(self, path, mode, fip):
fi = fip.contents
if self.raw_fi:
@@ -492,11 +492,11 @@ class FUSE(object):
else:
fi.fh = self.operations('create', path, mode)
return 0
-
+
def ftruncate(self, path, length, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('truncate', path, length, fh)
-
+
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
@@ -504,11 +504,11 @@ class FUSE(object):
attrs = self.operations('getattr', path, fh)
set_st_attrs(st, attrs)
return 0
-
+
def lock(self, path, fip, cmd, lock):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('lock', path, fh, cmd, lock)
-
+
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
@@ -517,7 +517,7 @@ class FUSE(object):
else:
times = None
return self.operations('utimens', path, times)
-
+
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path, blocksize, idx)
@@ -526,46 +526,46 @@ class Operations(object):
"""This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise an OSError exception on
error.
-
+
When in doubt of what an operation should do, check the FUSE header
file or the corresponding system call man page."""
-
+
def __call__(self, op, *args):
if not hasattr(self, op):
raise OSError(EFAULT, '')
return getattr(self, op)(*args)
-
+
def access(self, path, amode):
return 0
-
+
bmap = None
-
+
def chmod(self, path, mode):
raise OSError(EROFS, '')
-
+
def chown(self, path, uid, gid):
raise OSError(EROFS, '')
-
+
def create(self, path, mode, fi=None):
"""When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0."""
raise OSError(EROFS, '')
-
+
def destroy(self, path):
"""Called on filesystem destruction. Path is always /"""
pass
-
+
def flush(self, path, fh):
return 0
-
+
def fsync(self, path, datasync, fh):
return 0
-
+
def fsyncdir(self, path, datasync, fh):
return 0
-
+
def getattr(self, path, fh=None):
"""Returns a dictionary with keys identical to the stat C structure
of stat(2).
@@ -573,33 +573,33 @@ class Operations(object):
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories."""
-
+
if path != '/':
raise OSError(ENOENT, '')
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
-
+
def getxattr(self, path, name, position=0):
raise OSError(ENOTSUP, '')
-
+
def init(self, path):
"""Called on filesystem initialization. Path is always /
Use it instead of __init__ if you start threads on initialization."""
pass
-
+
def link(self, target, source):
raise OSError(EROFS, '')
-
+
def listxattr(self, path):
return []
-
+
lock = None
-
+
def mkdir(self, path, mode):
raise OSError(EROFS, '')
-
+
def mknod(self, path, mode, dev):
raise OSError(EROFS, '')
-
+
def open(self, path, flags):
"""When raw_fi is False (default case), open should return a numerical
file handle.
@@ -607,60 +607,60 @@ class Operations(object):
open(self, path, fi)
and the file handle should be set directly."""
return 0
-
+
def opendir(self, path):
"""Returns a numerical file handle."""
return 0
-
+
def read(self, path, size, offset, fh):
"""Returns a string containing the data requested."""
raise OSError(ENOENT, '')
-
+
def readdir(self, path, fh):
"""Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr."""
return ['.', '..']
-
+
def readlink(self, path):
raise OSError(ENOENT, '')
-
+
def release(self, path, fh):
return 0
-
+
def releasedir(self, path, fh):
return 0
-
+
def removexattr(self, path, name):
raise OSError(ENOTSUP, '')
-
+
def rename(self, old, new):
raise OSError(EROFS, '')
-
+
def rmdir(self, path):
raise OSError(EROFS, '')
-
+
def setxattr(self, path, name, value, options, position=0):
raise OSError(ENOTSUP, '')
-
+
def statfs(self, path):
"""Returns a dictionary with keys identical to the statvfs C structure
of statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512)."""
return {}
-
+
def symlink(self, target, source):
raise OSError(EROFS, '')
-
+
def truncate(self, path, length, fh=None):
raise OSError(EROFS, '')
-
+
def unlink(self, path):
raise OSError(EROFS, '')
-
+
def utimens(self, path, times=None):
"""Times is a (atime, mtime) tuple. If None use current time."""
return 0
-
+
def write(self, path, data, offset, fh):
raise OSError(EROFS, '')
diff --git a/fs/expose/importhook.py b/fs/expose/importhook.py
index e3d2d8e..cc8837f 100644
--- a/fs/expose/importhook.py
+++ b/fs/expose/importhook.py
@@ -240,4 +240,4 @@ class FSImportHook(object):
info = self._get_module_info(fullname)
(path,type,ispkg) = info
return path
-
+
diff --git a/fs/expose/serve/packetstream.py b/fs/expose/serve/packetstream.py
index 6f24fd9..0449937 100644
--- a/fs/expose/serve/packetstream.py
+++ b/fs/expose/serve/packetstream.py
@@ -12,21 +12,21 @@ def encode(header='', payload=''):
def textsize(s):
if s:
return str(len(s))
- return ''
+ return ''
return '%i,%i:%s%s' % (textsize(header), textsize(payload), header, payload)
class FileEncoder(object):
-
+
def __init__(self, f):
self.f = f
-
+
def write(self, header='', payload=''):
fwrite = self.f.write
def textsize(s):
if s:
return str(len(s))
- return ''
+ return ''
fwrite('%s,%s:' % (textsize(header), textsize(payload)))
if header:
fwrite(header)
@@ -35,11 +35,11 @@ class FileEncoder(object):
class JSONFileEncoder(FileEncoder):
-
+
def write(self, header=None, payload=''):
if header is None:
super(JSONFileEncoder, self).write('', payload)
- else:
+ else:
header_json = dumps(header, separators=(',', ':'))
super(JSONFileEncoder, self).write(header_json, payload)
@@ -51,12 +51,12 @@ class PreludeError(DecoderError):
pass
class Decoder(object):
-
+
STAGE_PRELUDE, STAGE_SIZE, STAGE_HEADER, STAGE_PAYLOAD = range(4)
MAX_PRELUDE = 255
-
+
def __init__(self, no_prelude=False, prelude_callback=None):
-
+
self.prelude_callback = prelude_callback
self.stream_broken = False
self.expecting_bytes = None
@@ -64,49 +64,49 @@ class Decoder(object):
self._prelude = []
self._size = []
self._expecting_bytes = None
-
+
self.header_size = None
self.payload_size = None
-
+
self._header_bytes = None
self._payload_bytes = None
-
+
self._header_data = []
self._payload_data = []
-
+
self.header = None
self.payload = None
-
+
if no_prelude:
self.stage = self.STAGE_SIZE
-
-
+
+
def feed(self, data):
-
+
if self.stream_broken:
raise DecoderError('Stream is broken')
-
+
STAGE_PRELUDE, STAGE_SIZE, STAGE_HEADER, STAGE_PAYLOAD = range(4)
-
+
size_append = self._size.append
header_append = self._header_data.append
payload_append = self._payload_data.append
datafind = data.find
-
+
def reset_packet():
self.expecting_bytes = None
del self._header_data[:]
del self._payload_data[:]
self.header = None
self.payload = None
-
+
data_len = len(data)
data_pos = 0
- expecting_bytes = self.expecting_bytes
+ expecting_bytes = self.expecting_bytes
stage = self.stage
-
+
if stage == STAGE_PRELUDE:
- max_find = min(len(data), data_pos + self.MAX_PRELUDE)
+ max_find = min(len(data), data_pos + self.MAX_PRELUDE)
cr_pos = datafind('\n', data_pos, max_find)
if cr_pos == -1:
self._prelude.append(data[data_pos:])
@@ -119,53 +119,53 @@ class Decoder(object):
if sum(len(s) for s in self._prelude) > self.MAX_PRELUDE:
self.stream_broken = True
raise PreludeError('Prelude not found')
- data_pos = cr_pos + 1
+ data_pos = cr_pos + 1
prelude = ''.join(self._prelude)
del self._prelude[:]
reset_packet()
if not self.on_prelude(prelude):
self.broken = True
return
- stage = STAGE_SIZE
-
+ stage = STAGE_SIZE
+
while data_pos < data_len:
-
+
if stage == STAGE_HEADER:
bytes_to_read = min(data_len - data_pos, expecting_bytes)
header_append(data[data_pos:data_pos + bytes_to_read])
data_pos += bytes_to_read
- expecting_bytes -= bytes_to_read
- if not expecting_bytes:
- self.header = ''.join(self._header_data)
+ expecting_bytes -= bytes_to_read
+ if not expecting_bytes:
+ self.header = ''.join(self._header_data)
if not self.payload_size:
yield self.header, ''
reset_packet()
expecting_bytes = None
stage = STAGE_SIZE
- else:
+ else:
stage = STAGE_PAYLOAD
expecting_bytes = self.payload_size
-
+
elif stage == STAGE_PAYLOAD:
- bytes_to_read = min(data_len - data_pos, expecting_bytes)
+ bytes_to_read = min(data_len - data_pos, expecting_bytes)
payload_append(data[data_pos:data_pos + bytes_to_read])
data_pos += bytes_to_read
- expecting_bytes -= bytes_to_read
- if not expecting_bytes:
+ expecting_bytes -= bytes_to_read
+ if not expecting_bytes:
self.payload = ''.join(self._payload_data)
yield self.header, self.payload
reset_packet()
stage = STAGE_SIZE
expecting_bytes = None
-
+
elif stage == STAGE_SIZE:
term_pos = datafind(':', data_pos)
if term_pos == -1:
- size_append(data[data_pos:])
+ size_append(data[data_pos:])
break
else:
size_append(data[data_pos:term_pos])
- data_pos = term_pos + 1
+ data_pos = term_pos + 1
size = ''.join(self._size)
del self._size[:]
@@ -173,30 +173,30 @@ class Decoder(object):
header_size, payload_size = size.split(',', 1)
else:
header_size = size
- payload_size = ''
+ payload_size = ''
try:
self.header_size = int(header_size or '0')
self.payload_size = int(payload_size or '0')
except ValueError:
self.stream_broken = False
raise DecoderError('Invalid size in packet (%s)' % size)
-
+
if self.header_size:
- expecting_bytes = self.header_size
+ expecting_bytes = self.header_size
stage = STAGE_HEADER
elif self.payload_size:
- expecting_bytes = self.payload_size
+ expecting_bytes = self.payload_size
stage = STAGE_PAYLOAD
else:
# A completely empty packet, permitted, if a little odd
yield '', ''
- reset_packet()
+ reset_packet()
expecting_bytes = None
- self.expecting_bytes = expecting_bytes
+ self.expecting_bytes = expecting_bytes
self.stage = stage
-
-
+
+
def on_prelude(self, prelude):
if self.prelude_callback and not self.prelude_callback(self, prelude):
return False
@@ -206,7 +206,7 @@ class Decoder(object):
class JSONDecoder(Decoder):
-
+
def feed(self, data):
for header, payload in Decoder.feed(self, data):
if header:
@@ -215,9 +215,9 @@ class JSONDecoder(Decoder):
header = {}
yield header, payload
-
+
if __name__ == "__main__":
-
+
f = StringIO()
encoder = JSONFileEncoder(f)
encoder.write(dict(a=1, b=2), 'Payload')
@@ -225,29 +225,29 @@ if __name__ == "__main__":
encoder.write(None, 'Payload')
encoder.write(dict(a=1))
encoder.write()
-
+
stream = 'prelude\n' + f.getvalue()
-
+
#print stream
-
+
# packets = ['Prelude string\n',
# encode('header', 'payload'),
# encode('header number 2', 'second payload'),
# encode('', '')]
-#
+#
# stream = ''.join(packets)
-
+
decoder = JSONDecoder()
-
+
stream = 'pyfs/0.1\n59,13:{"type":"rpc","method":"ping","client_ref":"-1221142848:1"}Hello, World!'
-
+
fdata = StringIO(stream)
-
+
while 1:
data = fdata.read(3)
if not data:
break
for header, payload in decoder.feed(data):
print "Header:", repr(header)
- print "Payload:", repr(payload)
- \ No newline at end of file
+ print "Payload:", repr(payload)
+
diff --git a/fs/expose/serve/server.py b/fs/expose/serve/server.py
index 7ac8c1f..d870175 100644
--- a/fs/expose/serve/server.py
+++ b/fs/expose/serve/server.py
@@ -9,24 +9,24 @@ from packetstream import JSONDecoder, JSONFileEncoder
class _SocketFile(object):
def __init__(self, socket):
self.socket = socket
-
+
def read(self, size):
try:
return self.socket.recv(size)
except socket.error:
return ''
-
+
def write(self, data):
self.socket.sendall(data)
def remote_call(method_name=None):
- method = method_name
+ method = method_name
def deco(f):
if not hasattr(f, '_remote_call_names'):
- f._remote_call_names = []
+ f._remote_call_names = []
f._remote_call_names.append(method or f.__name__)
- return f
+ return f
return deco
@@ -36,34 +36,34 @@ class RemoteResponse(Exception):
self.payload = payload
class ConnectionHandlerBase(threading.Thread):
-
+
_methods = {}
-
+
def __init__(self, server, connection_id, socket, address):
super(ConnectionHandlerBase, self).__init__()
self.server = server
- self.connection_id = connection_id
+ self.connection_id = connection_id
self.socket = socket
self.transport = _SocketFile(socket)
- self.address = address
+ self.address = address
self.encoder = JSONFileEncoder(self.transport)
- self.decoder = JSONDecoder(prelude_callback=self.on_stream_prelude)
-
+ self.decoder = JSONDecoder(prelude_callback=self.on_stream_prelude)
+
self._lock = threading.RLock()
self.socket_error = None
-
- if not self._methods:
+
+ if not self._methods:
for method_name in dir(self):
method = getattr(self, method_name)
if callable(method) and hasattr(method, '_remote_call_names'):
for name in method._remote_call_names:
-
+
self._methods[name] = method
-
+
print self._methods
-
- self.fs = None
-
+
+ self.fs = None
+
def run(self):
self.transport.write('pyfs/1.0\n')
while True:
@@ -74,27 +74,27 @@ class ConnectionHandlerBase(threading.Thread):
self.socket_error = socket_error
break
print "data", repr(data)
- if data:
- for packet in self.decoder.feed(data):
+ if data:
+ for packet in self.decoder.feed(data):
print repr(packet)
self.on_packet(*packet)
else:
- break
+ break
self.on_connection_close()
-
+
def close(self):
with self._lock:
self.socket.close()
-
+
def on_connection_close(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.server.on_connection_close(self.connection_id)
-
+
def on_stream_prelude(self, packet_stream, prelude):
print "prelude", prelude
return True
-
+
def on_packet(self, header, payload):
print '-' * 30
print repr(header)
@@ -111,7 +111,7 @@ class ConnectionHandlerBase(threading.Thread):
remote['response'] = response
self.encoder.write(remote, '')
except RemoteResponse, response:
- self.encoder.write(response.header, response.payload)
+ self.encoder.write(response.header, response.payload)
class RemoteFSConnection(ConnectionHandlerBase):
@@ -122,9 +122,9 @@ class RemoteFSConnection(ConnectionHandlerBase):
self.resource = resource
from fs.memoryfs import MemoryFS
self.fs = MemoryFS()
-
+
class Server(object):
-
+
def __init__(self, addr='', port=3000, connection_factory=RemoteFSConnection):
self.addr = addr
self.port = port
@@ -133,41 +133,41 @@ class Server(object):
self.connection_id = 0
self.threads = {}
self._lock = threading.RLock()
-
+
def serve_forever(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- sock.bind((self.addr, self.port))
-
+ sock.bind((self.addr, self.port))
+
sock.listen(5)
-
+
try:
while True:
clientsocket, address = sock.accept()
self.on_connect(clientsocket, address)
except KeyboardInterrupt:
pass
-
+
try:
self._close_graceful()
except KeyboardInterrupt:
self._close_harsh()
-
+
def _close_graceful(self):
"""Tell all threads to exit and wait for them"""
with self._lock:
for connection in self.threads.itervalues():
- connection.close()
+ connection.close()
for connection in self.threads.itervalues():
connection.join()
self.threads.clear()
-
+
def _close_harsh(self):
with self._lock:
for connection in self.threads.itervalues():
connection.close()
self.threads.clear()
-
+
def on_connect(self, clientsocket, address):
print "Connection from", address
with self._lock:
@@ -175,17 +175,17 @@ class Server(object):
thread = self.connection_factory(self,
self.connection_id,
clientsocket,
- address)
- self.threads[self.connection_id] = thread
- thread.start()
-
+ address)
+ self.threads[self.connection_id] = thread
+ thread.start()
+
def on_connection_close(self, connection_id):
pass
#with self._lock:
# self.threads[connection_id].join()
# del self.threads[connection_id]
-
+
if __name__ == "__main__":
server = Server()
server.serve_forever()
- \ No newline at end of file
+
diff --git a/fs/expose/serve/threadpool.py b/fs/expose/serve/threadpool.py
index f448a12..5c2ecd5 100644
--- a/fs/expose/serve/threadpool.py
+++ b/fs/expose/serve/threadpool.py
@@ -10,48 +10,48 @@ def make_job(job_callable, *args, **kwargs):
class _PoolThread(threading.Thread):
""" Internal thread class that runs jobs. """
-
+
def __init__(self, queue, name):
super(_PoolThread, self).__init__()
self.queue = queue
self.name = name
-
+
def __str__(self):
return self.name
-
+
def run(self):
-
+
while True:
try:
- _priority, job = self.queue.get()
- except queue.Empty:
+ _priority, job = self.queue.get()
+ except queue.Empty:
break
-
- if job is None:
+
+ if job is None:
break
-
+
if callable(job):
try:
- job()
+ job()
except Exception, e:
- print e
+ print e
self.queue.task_done()
-
+
class ThreadPool(object):
-
+
def __init__(self, num_threads, size=None, name=''):
-
+
self.num_threads = num_threads
self.name = name
self.queue = queue.PriorityQueue(size)
self.job_no = 0
-
+
self.threads = [_PoolThread(self.queue, '%s #%i' % (name, i)) for i in xrange(num_threads)]
-
+
for thread in self.threads:
thread.start()
-
+
def _make_priority_key(self, i):
no = self.job_no
self.job_no += 1
@@ -62,38 +62,38 @@ class ThreadPool(object):
def job():
return job_callable(*args, **kwargs)
self.queue.put( (self._make_priority_key(1), job), True )
- return self.job_no
-
+ return self.job_no
+
def flush_quit(self):
- """ Quit after all tasks on the queue have been processed. """
+ """ Quit after all tasks on the queue have been processed. """
for thread in self.threads:
- self.queue.put( (self._make_priority_key(1), None) )
+ self.queue.put( (self._make_priority_key(1), None) )
for thread in self.threads:
thread.join()
-
+
def quit(self):
""" Quit as soon as possible, potentially leaving tasks on the queue. """
for thread in self.threads:
- self.queue.put( (self._make_priority_key(0), None) )
+ self.queue.put( (self._make_priority_key(0), None) )
for thread in self.threads:
thread.join()
if __name__ == "__main__":
import time
-
+
def job(n):
print "Starting #%i" % n
time.sleep(1)
print "Ending #%i" % n
-
+
pool = ThreadPool(5, 'test thread')
-
+
for n in range(20):
pool.job(job, n)
-
+
pool.flush_quit()
-
- \ No newline at end of file
+
+
diff --git a/fs/memoryfs.py b/fs/memoryfs.py
index cde0096..5688c40 100644
--- a/fs/memoryfs.py
+++ b/fs/memoryfs.py
@@ -1,694 +1,694 @@
-#!/usr/bin/env python
-"""
-fs.memoryfs
-===========
-
-A Filesystem that exists in memory only. Which makes them extremely fast, but non-permanent.
-
-If you open a file from a `memoryfs` you will get back a StringIO object from the standard library.
-
-
-"""
-
-import datetime
-import stat
-from fs.path import iteratepath, pathsplit, normpath
-from fs.base import *
-from fs.errors import *
-from fs import _thread_synchronize_default
-from fs.filelike import StringIO
-from fs import iotools
-from os import SEEK_END
-import threading
-
-import six
-from six import b
-
-
-def _check_mode(mode, mode_chars):
- for c in mode_chars:
- if c not in mode:
- return False
- return True
-
-
-class MemoryFile(object):
-
- def seek_and_lock(f):
- def deco(self, *args, **kwargs):
- try:
- self._lock.acquire()
- self.mem_file.seek(self.pos)
- ret = f(self, *args, **kwargs)
- self.pos = self.mem_file.tell()
- return ret
- finally:
- self._lock.release()
- return deco
-
- def __init__(self, path, memory_fs, mem_file, mode, lock):
- self.closed = False
- self.path = path
- self.memory_fs = memory_fs
- self.mem_file = mem_file
- self.mode = mode
- self._lock = lock
-
- self.pos = 0
-
- if _check_mode(mode, 'a'):
- lock.acquire()
- try:
- self.mem_file.seek(0, SEEK_END)
- self.pos = self.mem_file.tell()
- finally:
- lock.release()
-
- elif _check_mode(mode, 'w'):
- lock.acquire()
- try:
- self.mem_file.seek(0)
- self.mem_file.truncate()
- finally:
- lock.release()
-
- assert self.mem_file is not None, "self.mem_file should have a value"
-
- def __str__(self):
- return "<MemoryFile in %s %s>" % (self.memory_fs, self.path)
-
- def __repr__(self):
- return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
-
- def __unicode__(self):
- return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
-
- def __del__(self):
- if not self.closed:
- self.close()
-
- def flush(self):
- pass
-
- def __iter__(self):
- if 'r' not in self.mode and '+' not in self.mode:
- raise IOError("File not open for reading")
- self.mem_file.seek(self.pos)
- for line in self.mem_file:
- yield line
-
- @seek_and_lock
- def next(self):
- if 'r' not in self.mode and '+' not in self.mode:
- raise IOError("File not open for reading")
- return self.mem_file.next()
-
- @seek_and_lock
- def readline(self, *args, **kwargs):
- if 'r' not in self.mode and '+' not in self.mode:
- raise IOError("File not open for reading")
- return self.mem_file.readline(*args, **kwargs)
-
- def close(self):
- do_close = False
- self._lock.acquire()
- try:
- do_close = not self.closed and self.mem_file is not None
- if do_close:
- self.closed = True
- finally:
- self._lock.release()
- if do_close:
- self.memory_fs._on_close_memory_file(self, self.path)
-
- @seek_and_lock
- def read(self, size=None):
- if 'r' not in self.mode and '+' not in self.mode:
- raise IOError("File not open for reading")
- if size is None:
- size = -1
- return self.mem_file.read(size)
-
- @seek_and_lock
- def seek(self, *args, **kwargs):
- return self.mem_file.seek(*args, **kwargs)
-
- @seek_and_lock
- def tell(self):
- return self.pos
-
- @seek_and_lock
- def truncate(self, *args, **kwargs):
- if 'r' in self.mode and '+' not in self.mode:
- raise IOError("File not open for writing")
- return self.mem_file.truncate(*args, **kwargs)
-
- #@seek_and_lock
- def write(self, data):
- if 'r' in self.mode and '+' not in self.mode:
- raise IOError("File not open for writing")
- self.memory_fs._on_modify_memory_file(self.path)
- self._lock.acquire()
- try:
- self.mem_file.seek(self.pos)
- self.mem_file.write(data)
- self.pos = self.mem_file.tell()
- finally:
- self._lock.release()
-
- @seek_and_lock
- def writelines(self, *args, **kwargs):
- return self.mem_file.writelines(*args, **kwargs)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.close()
- return False
-
-
-class DirEntry(object):
-
- def sync(f):
- def deco(self, *args, **kwargs):
- if self.lock is not None:
- try:
- self.lock.acquire()
- return f(self, *args, **kwargs)
- finally:
- self.lock.release()
- else:
- return f(self, *args, **kwargs)
- return deco
-
- def __init__(self, type, name, contents=None):
-
- assert type in ("dir", "file"), "Type must be dir or file!"
-
- self.type = type
- self.name = name
-
- if contents is None and type == "dir":
- contents = {}
-
- self.open_files = []
- self.contents = contents
- self.mem_file = None
- self.created_time = datetime.datetime.now()
- self.modified_time = self.created_time
- self.accessed_time = self.created_time
-
- self.xattrs = {}
-
- self.lock = None
- if self.type == 'file':
- self.mem_file = StringIO()
- self.lock = threading.RLock()
-
- def get_value(self):
- self.lock.acquire()
- try:
- return self.mem_file.getvalue()
- finally:
- self.lock.release()
- data = property(get_value)
-
- def desc_contents(self):
- if self.isfile():
- return "<file %s>" % self.name
- elif self.isdir():
- return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems())
-
- def isdir(self):
- return self.type == "dir"
-
- def isfile(self):
- return self.type == "file"
-
- def __str__(self):
- return "%s: %s" % (self.name, self.desc_contents())
-
- @sync
- def __getstate__(self):
- state = self.__dict__.copy()
- state.pop('lock')
- if self.mem_file is not None:
- state['mem_file'] = self.data
- return state
-
- def __setstate__(self, state):
- self.__dict__.update(state)
- if self.type == 'file':
- self.lock = threading.RLock()
- else:
- self.lock = None
- if self.mem_file is not None:
- data = self.mem_file
- self.mem_file = StringIO()
- self.mem_file.write(data)
-
-
-class MemoryFS(FS):
- """An in-memory filesystem.
-
- """
-
- _meta = {'thread_safe': True,
- 'network': False,
- 'virtual': False,
- 'read_only': False,
- 'unicode_paths': True,
- 'case_insensitive_paths': False,
- 'atomic.move': False,
- 'atomic.copy': False,
- 'atomic.makedir': True,
- 'atomic.rename': True,
- 'atomic.setcontents': False}
-
- def _make_dir_entry(self, *args, **kwargs):
- return self.dir_entry_factory(*args, **kwargs)
-
- def __init__(self, file_factory=None):
- super(MemoryFS, self).__init__(thread_synchronize=_thread_synchronize_default)
-
- self.dir_entry_factory = DirEntry
- self.file_factory = file_factory or MemoryFile
- if not callable(self.file_factory):
- raise ValueError("file_factory should be callable")
-
- self.root = self._make_dir_entry('dir', 'root')
-
- def __str__(self):
- return "<MemoryFS>"
-
- def __repr__(self):
- return "MemoryFS()"
-
- def __unicode__(self):
- return "<MemoryFS>"
-
- @synchronize
- def _get_dir_entry(self, dirpath):
- dirpath = normpath(dirpath)
- current_dir = self.root
- for path_component in iteratepath(dirpath):
- if current_dir.contents is None:
- return None
- dir_entry = current_dir.contents.get(path_component, None)
- if dir_entry is None:
- return None
- current_dir = dir_entry
- return current_dir
-
- @synchronize
- def _dir_entry(self, path):
- dir_entry = self._get_dir_entry(path)
- if dir_entry is None:
- raise ResourceNotFoundError(path)
- return dir_entry
-
- @synchronize
- def desc(self, path):
- if self.isdir(path):
- return "Memory dir"
- elif self.isfile(path):
- return "Memory file object"
- else:
- return "No description available"
-
- @synchronize
- def isdir(self, path):
- path = normpath(path)
- if path in ('', '/'):
- return True
- dir_item = self._get_dir_entry(path)
- if dir_item is None:
- return False
- return dir_item.isdir()
-
- @synchronize
- def isfile(self, path):
- path = normpath(path)
- if path in ('', '/'):
- return False
- dir_item = self._get_dir_entry(path)
- if dir_item is None:
- return False
- return dir_item.isfile()
-
- @synchronize
- def exists(self, path):
- path = normpath(path)
- if path in ('', '/'):
- return True
- return self._get_dir_entry(path) is not None
-
- @synchronize
- def makedir(self, dirname, recursive=False, allow_recreate=False):
- if not dirname and not allow_recreate:
- raise PathError(dirname)
- fullpath = normpath(dirname)
- if fullpath in ('', '/'):
- if allow_recreate:
- return
- raise DestinationExistsError(dirname)
- dirpath, dirname = pathsplit(dirname.rstrip('/'))
-
- if recursive:
- parent_dir = self._get_dir_entry(dirpath)
- if parent_dir is not None:
- if parent_dir.isfile():
- raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
- else:
- if not allow_recreate:
- if dirname in parent_dir.contents:
- raise DestinationExistsError(dirname, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
-
- current_dir = self.root
- for path_component in iteratepath(dirpath)[:-1]:
- dir_item = current_dir.contents.get(path_component, None)
- if dir_item is None:
- break
- if not dir_item.isdir():
- raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
- current_dir = dir_item
-
- current_dir = self.root
- for path_component in iteratepath(dirpath):
- dir_item = current_dir.contents.get(path_component, None)
- if dir_item is None:
- new_dir = self._make_dir_entry("dir", path_component)
- current_dir.contents[path_component] = new_dir
- current_dir = new_dir
- else:
- current_dir = dir_item
-
- parent_dir = current_dir
-
- else:
- parent_dir = self._get_dir_entry(dirpath)
- if parent_dir is None:
- raise ParentDirectoryMissingError(dirname, msg="Could not make dir, as parent dir does not exist: %(path)s")
-
- dir_item = parent_dir.contents.get(dirname, None)
- if dir_item is not None:
- if dir_item.isdir():
- if not allow_recreate:
- raise DestinationExistsError(dirname)
- else:
- raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
-
- if dir_item is None:
- parent_dir.contents[dirname] = self._make_dir_entry("dir", dirname)
-
-
- #@synchronize
- #def _orphan_files(self, file_dir_entry):
- # for f in file_dir_entry.open_files[:]:
- # f.close()
-
-
- @synchronize
- @iotools.filelike_to_stream
- def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
- path = normpath(path)
- filepath, filename = pathsplit(path)
- parent_dir_entry = self._get_dir_entry(filepath)
-
- if parent_dir_entry is None or not parent_dir_entry.isdir():
- raise ResourceNotFoundError(path)
-
- if 'r' in mode or 'a' in mode:
- if filename not in parent_dir_entry.contents:
- raise ResourceNotFoundError(path)
-
- file_dir_entry = parent_dir_entry.contents[filename]
- if file_dir_entry.isdir():
- raise ResourceInvalidError(path)
-
- file_dir_entry.accessed_time = datetime.datetime.now()
-
- mem_file = self.file_factory(path, self, file_dir_entry.mem_file, mode, file_dir_entry.lock)
- file_dir_entry.open_files.append(mem_file)
- return mem_file
-
- elif 'w' in mode:
- if filename not in parent_dir_entry.contents:
- file_dir_entry = self._make_dir_entry("file", filename)
- parent_dir_entry.contents[filename] = file_dir_entry
- else:
- file_dir_entry = parent_dir_entry.contents[filename]
-
- file_dir_entry.accessed_time = datetime.datetime.now()
-
- mem_file = self.file_factory(path, self, file_dir_entry.mem_file, mode, file_dir_entry.lock)
- file_dir_entry.open_files.append(mem_file)
- return mem_file
-
- if parent_dir_entry is None:
- raise ResourceNotFoundError(path)
-
- @synchronize
- def remove(self, path):
- dir_entry = self._get_dir_entry(path)
-
- if dir_entry is None:
- raise ResourceNotFoundError(path)
-
- if dir_entry.isdir():
- raise ResourceInvalidError(path, msg="That's a directory, not a file: %(path)s")
-
- pathname, dirname = pathsplit(path)
- parent_dir = self._get_dir_entry(pathname)
- del parent_dir.contents[dirname]
-
- @synchronize
- def removedir(self, path, recursive=False, force=False):
- path = normpath(path)
- if path in ('', '/'):
- raise RemoveRootError(path)
- dir_entry = self._get_dir_entry(path)
-
- if dir_entry is None:
- raise ResourceNotFoundError(path)
- if not dir_entry.isdir():
- raise ResourceInvalidError(path, msg="Can't remove resource, its not a directory: %(path)s" )
-
- if dir_entry.contents and not force:
- raise DirectoryNotEmptyError(path)
-
- if recursive:
- rpathname = path
- while rpathname:
- rpathname, dirname = pathsplit(rpathname)
- parent_dir = self._get_dir_entry(rpathname)
- if not dirname:
- raise RemoveRootError(path)
- del parent_dir.contents[dirname]
- # stop recursing if the directory has other contents
- if parent_dir.contents:
- break
- else:
- pathname, dirname = pathsplit(path)
- parent_dir = self._get_dir_entry(pathname)
- if not dirname:
- raise RemoveRootError(path)
- del parent_dir.contents[dirname]
-
- @synchronize
- def rename(self, src, dst):
- src = normpath(src)
- dst = normpath(dst)
- src_dir, src_name = pathsplit(src)
- src_entry = self._get_dir_entry(src)
- if src_entry is None:
- raise ResourceNotFoundError(src)
- open_files = src_entry.open_files[:]
- for f in open_files:
- f.flush()
- f.path = dst
-
- dst_dir,dst_name = pathsplit(dst)
- dst_entry = self._get_dir_entry(dst)
- if dst_entry is not None:
- raise DestinationExistsError(dst)
-
- src_dir_entry = self._get_dir_entry(src_dir)
- src_xattrs = src_dir_entry.xattrs.copy()
- dst_dir_entry = self._get_dir_entry(dst_dir)
- if dst_dir_entry is None:
- raise ParentDirectoryMissingError(dst)
- dst_dir_entry.contents[dst_name] = src_dir_entry.contents[src_name]
- dst_dir_entry.contents[dst_name].name = dst_name
- dst_dir_entry.xattrs.update(src_xattrs)
- del src_dir_entry.contents[src_name]
-
- @synchronize
- def settimes(self, path, accessed_time=None, modified_time=None):
- now = datetime.datetime.now()
- if accessed_time is None:
- accessed_time = now
- if modified_time is None:
- modified_time = now
-
- dir_entry = self._get_dir_entry(path)
- if dir_entry is not None:
- dir_entry.accessed_time = accessed_time
- dir_entry.modified_time = modified_time
- return True
- return False
-
- @synchronize
- def _on_close_memory_file(self, open_file, path):
- dir_entry = self._get_dir_entry(path)
- if dir_entry is not None and open_file in dir_entry.open_files:
- dir_entry.open_files.remove(open_file)
-
-
- @synchronize
- def _on_modify_memory_file(self, path):
- dir_entry = self._get_dir_entry(path)
- if dir_entry is not None:
- dir_entry.modified_time = datetime.datetime.now()
-
- @synchronize
- def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
- dir_entry = self._get_dir_entry(path)
- if dir_entry is None:
- raise ResourceNotFoundError(path)
- if dir_entry.isfile():
- raise ResourceInvalidError(path, msg="not a directory: %(path)s")
- paths = dir_entry.contents.keys()
- for (i,p) in enumerate(paths):
- if not isinstance(p,unicode):
- paths[i] = unicode(p)
- return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
-
- @synchronize
- def getinfo(self, path):
- dir_entry = self._get_dir_entry(path)
-
- if dir_entry is None:
- raise ResourceNotFoundError(path)
-
- info = {}
- info['created_time'] = dir_entry.created_time
- info['modified_time'] = dir_entry.modified_time
- info['accessed_time'] = dir_entry.accessed_time
-
- if dir_entry.isdir():
- info['st_mode'] = 0755 | stat.S_IFDIR
- else:
- info['size'] = len(dir_entry.data or b(''))
- info['st_mode'] = 0666 | stat.S_IFREG
-
- return info
-
- @synchronize
- def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=1024*64):
- src_dir_entry = self._get_dir_entry(src)
- if src_dir_entry is None:
- raise ResourceNotFoundError(src)
- src_xattrs = src_dir_entry.xattrs.copy()
- super(MemoryFS, self).copydir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
- dst_dir_entry = self._get_dir_entry(dst)
- if dst_dir_entry is not None:
- dst_dir_entry.xattrs.update(src_xattrs)
-
- @synchronize
- def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=1024*64):
- src_dir_entry = self._get_dir_entry(src)
- if src_dir_entry is None:
- raise ResourceNotFoundError(src)
- src_xattrs = src_dir_entry.xattrs.copy()
- super(MemoryFS, self).movedir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
- dst_dir_entry = self._get_dir_entry(dst)
- if dst_dir_entry is not None:
- dst_dir_entry.xattrs.update(src_xattrs)
-
- @synchronize
- def copy(self, src, dst, overwrite=False, chunk_size=1024*64):
- src_dir_entry = self._get_dir_entry(src)
- if src_dir_entry is None:
- raise ResourceNotFoundError(src)
- src_xattrs = src_dir_entry.xattrs.copy()
- super(MemoryFS, self).copy(src, dst, overwrite, chunk_size)
- dst_dir_entry = self._get_dir_entry(dst)
- if dst_dir_entry is not None:
- dst_dir_entry.xattrs.update(src_xattrs)
-
- @synchronize
- def move(self, src, dst, overwrite=False, chunk_size=1024*64):
- src_dir_entry = self._get_dir_entry(src)
- if src_dir_entry is None:
- raise ResourceNotFoundError(src)
- src_xattrs = src_dir_entry.xattrs.copy()
- super(MemoryFS, self).move(src, dst, overwrite, chunk_size)
- dst_dir_entry = self._get_dir_entry(dst)
- if dst_dir_entry is not None:
- dst_dir_entry.xattrs.update(src_xattrs)
-
- @synchronize
- def getcontents(self, path, mode="rb", encoding=None, errors=None, newline=None):
- dir_entry = self._get_dir_entry(path)
- if dir_entry is None:
- raise ResourceNotFoundError(path)
- if not dir_entry.isfile():
- raise ResourceInvalidError(path, msg="not a file: %(path)s")
- data = dir_entry.data or b('')
- if 'b' not in mode:
- return iotools.decode_binary(data, encoding=encoding, errors=errors, newline=newline)
- return data
-
- @synchronize
- def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=1024*64):
- if isinstance(data, six.binary_type):
- if not self.exists(path):
- self.open(path, 'wb').close()
- dir_entry = self._get_dir_entry(path)
- if not dir_entry.isfile():
- raise ResourceInvalidError('Not a directory %(path)s', path)
- new_mem_file = StringIO()
- new_mem_file.write(data)
- dir_entry.mem_file = new_mem_file
- return len(data)
-
- return super(MemoryFS, self).setcontents(path, data=data, encoding=encoding, errors=errors, chunk_size=chunk_size)
-
- # if isinstance(data, six.text_type):
- # return super(MemoryFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
- # if not self.exists(path):
- # self.open(path, 'wb').close()
-
- # dir_entry = self._get_dir_entry(path)
- # if not dir_entry.isfile():
- # raise ResourceInvalidError('Not a directory %(path)s', path)
- # new_mem_file = StringIO()
- # new_mem_file.write(data)
- # dir_entry.mem_file = new_mem_file
-
- @synchronize
- def setxattr(self, path, key, value):
- dir_entry = self._dir_entry(path)
- key = unicode(key)
- dir_entry.xattrs[key] = value
-
- @synchronize
- def getxattr(self, path, key, default=None):
- key = unicode(key)
- dir_entry = self._dir_entry(path)
- return dir_entry.xattrs.get(key, default)
-
- @synchronize
- def delxattr(self, path, key):
- dir_entry = self._dir_entry(path)
- try:
- del dir_entry.xattrs[key]
- except KeyError:
- pass
-
- @synchronize
- def listxattrs(self, path):
- dir_entry = self._dir_entry(path)
- return dir_entry.xattrs.keys()
+#!/usr/bin/env python
+"""
+fs.memoryfs
+===========
+
+A Filesystem that exists in memory only. Which makes them extremely fast, but non-permanent.
+
+If you open a file from a `memoryfs` you will get back a StringIO object from the standard library.
+
+
+"""
+
+import datetime
+import stat
+from fs.path import iteratepath, pathsplit, normpath
+from fs.base import *
+from fs.errors import *
+from fs import _thread_synchronize_default
+from fs.filelike import StringIO
+from fs import iotools
+from os import SEEK_END
+import threading
+
+import six
+from six import b
+
+
+def _check_mode(mode, mode_chars):
+ for c in mode_chars:
+ if c not in mode:
+ return False
+ return True
+
+
+class MemoryFile(object):
+
+ def seek_and_lock(f):
+ def deco(self, *args, **kwargs):
+ try:
+ self._lock.acquire()
+ self.mem_file.seek(self.pos)
+ ret = f(self, *args, **kwargs)
+ self.pos = self.mem_file.tell()
+ return ret
+ finally:
+ self._lock.release()
+ return deco
+
+ def __init__(self, path, memory_fs, mem_file, mode, lock):
+ self.closed = False
+ self.path = path
+ self.memory_fs = memory_fs
+ self.mem_file = mem_file
+ self.mode = mode
+ self._lock = lock
+
+ self.pos = 0
+
+ if _check_mode(mode, 'a'):
+ lock.acquire()
+ try:
+ self.mem_file.seek(0, SEEK_END)
+ self.pos = self.mem_file.tell()
+ finally:
+ lock.release()
+
+ elif _check_mode(mode, 'w'):
+ lock.acquire()
+ try:
+ self.mem_file.seek(0)
+ self.mem_file.truncate()
+ finally:
+ lock.release()
+
+ assert self.mem_file is not None, "self.mem_file should have a value"
+
+ def __str__(self):
+ return "<MemoryFile in %s %s>" % (self.memory_fs, self.path)
+
+ def __repr__(self):
+ return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
+
+ def __unicode__(self):
+ return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
+
+ def __del__(self):
+ if not self.closed:
+ self.close()
+
+ def flush(self):
+ pass
+
+ def __iter__(self):
+ if 'r' not in self.mode and '+' not in self.mode:
+ raise IOError("File not open for reading")
+ self.mem_file.seek(self.pos)
+ for line in self.mem_file:
+ yield line
+
+ @seek_and_lock
+ def next(self):
+ if 'r' not in self.mode and '+' not in self.mode:
+ raise IOError("File not open for reading")
+ return self.mem_file.next()
+
+ @seek_and_lock
+ def readline(self, *args, **kwargs):
+ if 'r' not in self.mode and '+' not in self.mode:
+ raise IOError("File not open for reading")
+ return self.mem_file.readline(*args, **kwargs)
+
+ def close(self):
+ do_close = False
+ self._lock.acquire()
+ try:
+ do_close = not self.closed and self.mem_file is not None
+ if do_close:
+ self.closed = True
+ finally:
+ self._lock.release()
+ if do_close:
+ self.memory_fs._on_close_memory_file(self, self.path)
+
+ @seek_and_lock
+ def read(self, size=None):
+ if 'r' not in self.mode and '+' not in self.mode:
+ raise IOError("File not open for reading")
+ if size is None:
+ size = -1
+ return self.mem_file.read(size)
+
+ @seek_and_lock
+ def seek(self, *args, **kwargs):
+ return self.mem_file.seek(*args, **kwargs)
+
+ @seek_and_lock
+ def tell(self):
+ return self.pos
+
+ @seek_and_lock
+ def truncate(self, *args, **kwargs):
+ if 'r' in self.mode and '+' not in self.mode:
+ raise IOError("File not open for writing")
+ return self.mem_file.truncate(*args, **kwargs)
+
+ #@seek_and_lock
+ def write(self, data):
+ if 'r' in self.mode and '+' not in self.mode:
+ raise IOError("File not open for writing")
+ self.memory_fs._on_modify_memory_file(self.path)
+ self._lock.acquire()
+ try:
+ self.mem_file.seek(self.pos)
+ self.mem_file.write(data)
+ self.pos = self.mem_file.tell()
+ finally:
+ self._lock.release()
+
+ @seek_and_lock
+ def writelines(self, *args, **kwargs):
+ return self.mem_file.writelines(*args, **kwargs)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+ return False
+
+
+class DirEntry(object):
+
+ def sync(f):
+ def deco(self, *args, **kwargs):
+ if self.lock is not None:
+ try:
+ self.lock.acquire()
+ return f(self, *args, **kwargs)
+ finally:
+ self.lock.release()
+ else:
+ return f(self, *args, **kwargs)
+ return deco
+
+ def __init__(self, type, name, contents=None):
+
+ assert type in ("dir", "file"), "Type must be dir or file!"
+
+ self.type = type
+ self.name = name
+
+ if contents is None and type == "dir":
+ contents = {}
+
+ self.open_files = []
+ self.contents = contents
+ self.mem_file = None
+ self.created_time = datetime.datetime.now()
+ self.modified_time = self.created_time
+ self.accessed_time = self.created_time
+
+ self.xattrs = {}
+
+ self.lock = None
+ if self.type == 'file':
+ self.mem_file = StringIO()
+ self.lock = threading.RLock()
+
+ def get_value(self):
+ self.lock.acquire()
+ try:
+ return self.mem_file.getvalue()
+ finally:
+ self.lock.release()
+ data = property(get_value)
+
+ def desc_contents(self):
+ if self.isfile():
+ return "<file %s>" % self.name
+ elif self.isdir():
+ return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems())
+
+ def isdir(self):
+ return self.type == "dir"
+
+ def isfile(self):
+ return self.type == "file"
+
+ def __str__(self):
+ return "%s: %s" % (self.name, self.desc_contents())
+
+ @sync
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state.pop('lock')
+ if self.mem_file is not None:
+ state['mem_file'] = self.data
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ if self.type == 'file':
+ self.lock = threading.RLock()
+ else:
+ self.lock = None
+ if self.mem_file is not None:
+ data = self.mem_file
+ self.mem_file = StringIO()
+ self.mem_file.write(data)
+
+
+class MemoryFS(FS):
+ """An in-memory filesystem.
+
+ """
+
+ _meta = {'thread_safe': True,
+ 'network': False,
+ 'virtual': False,
+ 'read_only': False,
+ 'unicode_paths': True,
+ 'case_insensitive_paths': False,
+ 'atomic.move': False,
+ 'atomic.copy': False,
+ 'atomic.makedir': True,
+ 'atomic.rename': True,
+ 'atomic.setcontents': False}
+
+ def _make_dir_entry(self, *args, **kwargs):
+ return self.dir_entry_factory(*args, **kwargs)
+
+ def __init__(self, file_factory=None):
+ super(MemoryFS, self).__init__(thread_synchronize=_thread_synchronize_default)
+
+ self.dir_entry_factory = DirEntry
+ self.file_factory = file_factory or MemoryFile
+ if not callable(self.file_factory):
+ raise ValueError("file_factory should be callable")
+
+ self.root = self._make_dir_entry('dir', 'root')
+
+ def __str__(self):
+ return "<MemoryFS>"
+
+ def __repr__(self):
+ return "MemoryFS()"
+
+ def __unicode__(self):
+ return "<MemoryFS>"
+
+ @synchronize
+ def _get_dir_entry(self, dirpath):
+ dirpath = normpath(dirpath)
+ current_dir = self.root
+ for path_component in iteratepath(dirpath):
+ if current_dir.contents is None:
+ return None
+ dir_entry = current_dir.contents.get(path_component, None)
+ if dir_entry is None:
+ return None
+ current_dir = dir_entry
+ return current_dir
+
+ @synchronize
+ def _dir_entry(self, path):
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
+ return dir_entry
+
+ @synchronize
+ def desc(self, path):
+ if self.isdir(path):
+ return "Memory dir"
+ elif self.isfile(path):
+ return "Memory file object"
+ else:
+ return "No description available"
+
+ @synchronize
+ def isdir(self, path):
+ path = normpath(path)
+ if path in ('', '/'):
+ return True
+ dir_item = self._get_dir_entry(path)
+ if dir_item is None:
+ return False
+ return dir_item.isdir()
+
+ @synchronize
+ def isfile(self, path):
+ path = normpath(path)
+ if path in ('', '/'):
+ return False
+ dir_item = self._get_dir_entry(path)
+ if dir_item is None:
+ return False
+ return dir_item.isfile()
+
+ @synchronize
+ def exists(self, path):
+ path = normpath(path)
+ if path in ('', '/'):
+ return True
+ return self._get_dir_entry(path) is not None
+
+ @synchronize
+ def makedir(self, dirname, recursive=False, allow_recreate=False):
+ if not dirname and not allow_recreate:
+ raise PathError(dirname)
+ fullpath = normpath(dirname)
+ if fullpath in ('', '/'):
+ if allow_recreate:
+ return
+ raise DestinationExistsError(dirname)
+ dirpath, dirname = pathsplit(dirname.rstrip('/'))
+
+ if recursive:
+ parent_dir = self._get_dir_entry(dirpath)
+ if parent_dir is not None:
+ if parent_dir.isfile():
+ raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
+ else:
+ if not allow_recreate:
+ if dirname in parent_dir.contents:
+ raise DestinationExistsError(dirname, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
+
+ current_dir = self.root
+ for path_component in iteratepath(dirpath)[:-1]:
+ dir_item = current_dir.contents.get(path_component, None)
+ if dir_item is None:
+ break
+ if not dir_item.isdir():
+ raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
+ current_dir = dir_item
+
+ current_dir = self.root
+ for path_component in iteratepath(dirpath):
+ dir_item = current_dir.contents.get(path_component, None)
+ if dir_item is None:
+ new_dir = self._make_dir_entry("dir", path_component)
+ current_dir.contents[path_component] = new_dir
+ current_dir = new_dir
+ else:
+ current_dir = dir_item
+
+ parent_dir = current_dir
+
+ else:
+ parent_dir = self._get_dir_entry(dirpath)
+ if parent_dir is None:
+ raise ParentDirectoryMissingError(dirname, msg="Could not make dir, as parent dir does not exist: %(path)s")
+
+ dir_item = parent_dir.contents.get(dirname, None)
+ if dir_item is not None:
+ if dir_item.isdir():
+ if not allow_recreate:
+ raise DestinationExistsError(dirname)
+ else:
+ raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
+
+ if dir_item is None:
+ parent_dir.contents[dirname] = self._make_dir_entry("dir", dirname)
+
+
+ #@synchronize
+ #def _orphan_files(self, file_dir_entry):
+ # for f in file_dir_entry.open_files[:]:
+ # f.close()
+
+
+ @synchronize
+ @iotools.filelike_to_stream
+ def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
+ path = normpath(path)
+ filepath, filename = pathsplit(path)
+ parent_dir_entry = self._get_dir_entry(filepath)
+
+ if parent_dir_entry is None or not parent_dir_entry.isdir():
+ raise ResourceNotFoundError(path)
+
+ if 'r' in mode or 'a' in mode:
+ if filename not in parent_dir_entry.contents:
+ raise ResourceNotFoundError(path)
+
+ file_dir_entry = parent_dir_entry.contents[filename]
+ if file_dir_entry.isdir():
+ raise ResourceInvalidError(path)
+
+ file_dir_entry.accessed_time = datetime.datetime.now()
+
+ mem_file = self.file_factory(path, self, file_dir_entry.mem_file, mode, file_dir_entry.lock)
+ file_dir_entry.open_files.append(mem_file)
+ return mem_file
+
+ elif 'w' in mode:
+ if filename not in parent_dir_entry.contents:
+ file_dir_entry = self._make_dir_entry("file", filename)
+ parent_dir_entry.contents[filename] = file_dir_entry
+ else:
+ file_dir_entry = parent_dir_entry.contents[filename]
+
+ file_dir_entry.accessed_time = datetime.datetime.now()
+
+ mem_file = self.file_factory(path, self, file_dir_entry.mem_file, mode, file_dir_entry.lock)
+ file_dir_entry.open_files.append(mem_file)
+ return mem_file
+
+ if parent_dir_entry is None:
+ raise ResourceNotFoundError(path)
+
+ @synchronize
+ def remove(self, path):
+ dir_entry = self._get_dir_entry(path)
+
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
+
+ if dir_entry.isdir():
+ raise ResourceInvalidError(path, msg="That's a directory, not a file: %(path)s")
+
+ pathname, dirname = pathsplit(path)
+ parent_dir = self._get_dir_entry(pathname)
+ del parent_dir.contents[dirname]
+
+ @synchronize
+ def removedir(self, path, recursive=False, force=False):
+ path = normpath(path)
+ if path in ('', '/'):
+ raise RemoveRootError(path)
+ dir_entry = self._get_dir_entry(path)
+
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
+ if not dir_entry.isdir():
+ raise ResourceInvalidError(path, msg="Can't remove resource, its not a directory: %(path)s" )
+
+ if dir_entry.contents and not force:
+ raise DirectoryNotEmptyError(path)
+
+ if recursive:
+ rpathname = path
+ while rpathname:
+ rpathname, dirname = pathsplit(rpathname)
+ parent_dir = self._get_dir_entry(rpathname)
+ if not dirname:
+ raise RemoveRootError(path)
+ del parent_dir.contents[dirname]
+ # stop recursing if the directory has other contents
+ if parent_dir.contents:
+ break
+ else:
+ pathname, dirname = pathsplit(path)
+ parent_dir = self._get_dir_entry(pathname)
+ if not dirname:
+ raise RemoveRootError(path)
+ del parent_dir.contents[dirname]
+
+ @synchronize
+ def rename(self, src, dst):
+ src = normpath(src)
+ dst = normpath(dst)
+ src_dir, src_name = pathsplit(src)
+ src_entry = self._get_dir_entry(src)
+ if src_entry is None:
+ raise ResourceNotFoundError(src)
+ open_files = src_entry.open_files[:]
+ for f in open_files:
+ f.flush()
+ f.path = dst
+
+ dst_dir,dst_name = pathsplit(dst)
+ dst_entry = self._get_dir_entry(dst)
+ if dst_entry is not None:
+ raise DestinationExistsError(dst)
+
+ src_dir_entry = self._get_dir_entry(src_dir)
+ src_xattrs = src_dir_entry.xattrs.copy()
+ dst_dir_entry = self._get_dir_entry(dst_dir)
+ if dst_dir_entry is None:
+ raise ParentDirectoryMissingError(dst)
+ dst_dir_entry.contents[dst_name] = src_dir_entry.contents[src_name]
+ dst_dir_entry.contents[dst_name].name = dst_name
+ dst_dir_entry.xattrs.update(src_xattrs)
+ del src_dir_entry.contents[src_name]
+
+ @synchronize
+ def settimes(self, path, accessed_time=None, modified_time=None):
+ now = datetime.datetime.now()
+ if accessed_time is None:
+ accessed_time = now
+ if modified_time is None:
+ modified_time = now
+
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is not None:
+ dir_entry.accessed_time = accessed_time
+ dir_entry.modified_time = modified_time
+ return True
+ return False
+
+ @synchronize
+ def _on_close_memory_file(self, open_file, path):
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is not None and open_file in dir_entry.open_files:
+ dir_entry.open_files.remove(open_file)
+
+
+ @synchronize
+ def _on_modify_memory_file(self, path):
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is not None:
+ dir_entry.modified_time = datetime.datetime.now()
+
+ @synchronize
+ def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
+ if dir_entry.isfile():
+ raise ResourceInvalidError(path, msg="not a directory: %(path)s")
+ paths = dir_entry.contents.keys()
+ for (i,p) in enumerate(paths):
+ if not isinstance(p,unicode):
+ paths[i] = unicode(p)
+ return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
+
+ @synchronize
+ def getinfo(self, path):
+ dir_entry = self._get_dir_entry(path)
+
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
+
+ info = {}
+ info['created_time'] = dir_entry.created_time
+ info['modified_time'] = dir_entry.modified_time
+ info['accessed_time'] = dir_entry.accessed_time
+
+ if dir_entry.isdir():
+ info['st_mode'] = 0755 | stat.S_IFDIR
+ else:
+ info['size'] = len(dir_entry.data or b(''))
+ info['st_mode'] = 0666 | stat.S_IFREG
+
+ return info
+
+ @synchronize
+ def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=1024*64):
+ src_dir_entry = self._get_dir_entry(src)
+ if src_dir_entry is None:
+ raise ResourceNotFoundError(src)
+ src_xattrs = src_dir_entry.xattrs.copy()
+ super(MemoryFS, self).copydir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
+ dst_dir_entry = self._get_dir_entry(dst)
+ if dst_dir_entry is not None:
+ dst_dir_entry.xattrs.update(src_xattrs)
+
+ @synchronize
+ def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=1024*64):
+ src_dir_entry = self._get_dir_entry(src)
+ if src_dir_entry is None:
+ raise ResourceNotFoundError(src)
+ src_xattrs = src_dir_entry.xattrs.copy()
+ super(MemoryFS, self).movedir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
+ dst_dir_entry = self._get_dir_entry(dst)
+ if dst_dir_entry is not None:
+ dst_dir_entry.xattrs.update(src_xattrs)
+
+ @synchronize
+ def copy(self, src, dst, overwrite=False, chunk_size=1024*64):
+ src_dir_entry = self._get_dir_entry(src)
+ if src_dir_entry is None:
+ raise ResourceNotFoundError(src)
+ src_xattrs = src_dir_entry.xattrs.copy()
+ super(MemoryFS, self).copy(src, dst, overwrite, chunk_size)
+ dst_dir_entry = self._get_dir_entry(dst)
+ if dst_dir_entry is not None:
+ dst_dir_entry.xattrs.update(src_xattrs)
+
+ @synchronize
+ def move(self, src, dst, overwrite=False, chunk_size=1024*64):
+ src_dir_entry = self._get_dir_entry(src)
+ if src_dir_entry is None:
+ raise ResourceNotFoundError(src)
+ src_xattrs = src_dir_entry.xattrs.copy()
+ super(MemoryFS, self).move(src, dst, overwrite, chunk_size)
+ dst_dir_entry = self._get_dir_entry(dst)
+ if dst_dir_entry is not None:
+ dst_dir_entry.xattrs.update(src_xattrs)
+
+ @synchronize
+ def getcontents(self, path, mode="rb", encoding=None, errors=None, newline=None):
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
+ if not dir_entry.isfile():
+ raise ResourceInvalidError(path, msg="not a file: %(path)s")
+ data = dir_entry.data or b('')
+ if 'b' not in mode:
+ return iotools.decode_binary(data, encoding=encoding, errors=errors, newline=newline)
+ return data
+
+ @synchronize
+ def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=1024*64):
+ if isinstance(data, six.binary_type):
+ if not self.exists(path):
+ self.open(path, 'wb').close()
+ dir_entry = self._get_dir_entry(path)
+ if not dir_entry.isfile():
+ raise ResourceInvalidError('Not a directory %(path)s', path)
+ new_mem_file = StringIO()
+ new_mem_file.write(data)
+ dir_entry.mem_file = new_mem_file
+ return len(data)
+
+ return super(MemoryFS, self).setcontents(path, data=data, encoding=encoding, errors=errors, chunk_size=chunk_size)
+
+ # if isinstance(data, six.text_type):
+ # return super(MemoryFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
+ # if not self.exists(path):
+ # self.open(path, 'wb').close()
+
+ # dir_entry = self._get_dir_entry(path)
+ # if not dir_entry.isfile():
+ # raise ResourceInvalidError('Not a directory %(path)s', path)
+ # new_mem_file = StringIO()
+ # new_mem_file.write(data)
+ # dir_entry.mem_file = new_mem_file
+
+ @synchronize
+ def setxattr(self, path, key, value):
+ dir_entry = self._dir_entry(path)
+ key = unicode(key)
+ dir_entry.xattrs[key] = value
+
+ @synchronize
+ def getxattr(self, path, key, default=None):
+ key = unicode(key)
+ dir_entry = self._dir_entry(path)
+ return dir_entry.xattrs.get(key, default)
+
+ @synchronize
+ def delxattr(self, path, key):
+ dir_entry = self._dir_entry(path)
+ try:
+ del dir_entry.xattrs[key]
+ except KeyError:
+ pass
+
+ @synchronize
+ def listxattrs(self, path):
+ dir_entry = self._dir_entry(path)
+ return dir_entry.xattrs.keys()
diff --git a/fs/remotefs.py b/fs/remotefs.py
index 77fdfc2..529a8f9 100644
--- a/fs/remotefs.py
+++ b/fs/remotefs.py
@@ -14,20 +14,20 @@ from six import b
class PacketHandler(threading.Thread):
-
+
def __init__(self, transport, prelude_callback=None):
super(PacketHandler, self).__init__()
self.transport = transport
self.encoder = packetstream.JSONFileEncoder(transport)
self.decoder = packetstream.JSONDecoder(prelude_callback=None)
-
- self.queues = defaultdict(queue.Queue)
+
+ self.queues = defaultdict(queue.Queue)
self._encoder_lock = threading.Lock()
self._queues_lock = threading.Lock()
self._call_id_lock = threading.Lock()
-
+
self.call_id = 0
-
+
def run(self):
decoder = self.decoder
read = self.transport.read
@@ -37,96 +37,96 @@ class PacketHandler(threading.Thread):
if not data:
print "No data"
break
- print "data", repr(data)
+ print "data", repr(data)
for header, payload in decoder.feed(data):
print repr(header)
print repr(payload)
on_packet(header, payload)
-
+
def _new_call_id(self):
with self._call_id_lock:
self.call_id += 1
return self.call_id
-
+
def get_thread_queue(self, queue_id=None):
if queue_id is None:
queue_id = threading.current_thread().ident
with self._queues_lock:
return self.queues[queue_id]
-
+
def send_packet(self, header, payload=''):
- call_id = self._new_call_id()
- queue_id = threading.current_thread().ident
- client_ref = "%i:%i" % (queue_id, call_id)
+ call_id = self._new_call_id()
+ queue_id = threading.current_thread().ident
+ client_ref = "%i:%i" % (queue_id, call_id)
header['client_ref'] = client_ref
-
+
with self._encoder_lock:
self.encoder.write(header, payload)
-
+
return call_id
-
+
def get_packet(self, call_id):
-
+
if call_id is not None:
- queue_id = threading.current_thread().ident
+ queue_id = threading.current_thread().ident
client_ref = "%i:%i" % (queue_id, call_id)
else:
client_ref = None
-
+
queue = self.get_thread_queue()
-
- while True:
+
+ while True:
header, payload = queue.get()
print repr(header)
print repr(payload)
if client_ref is not None and header.get('client_ref') != client_ref:
continue
- break
-
+ break
+
return header, payload
-
+
def on_packet(self, header, payload):
client_ref = header.get('client_ref', '')
queue_id, call_id = client_ref.split(':', 1)
queue_id = int(queue_id)
#queue_id = header.get('queue_id', '')
- queue = self.get_thread_queue(queue_id)
+ queue = self.get_thread_queue(queue_id)
queue.put((header, payload))
-
-
+
+
class _SocketFile(object):
def __init__(self, socket):
self.socket = socket
-
+
def read(self, size):
try:
return self.socket.recv(size)
- except:
- return b('')
-
+ except:
+ return b('')
+
def write(self, data):
self.socket.sendall(data)
-
+
def close(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
-
+
class _RemoteFile(object):
-
+
def __init__(self, path, connection):
self.path = path
- self.connection = connection
+ self.connection = connection
class RemoteFS(FS):
-
+
_meta = { 'thead_safe' : True,
'network' : True,
'virtual' : False,
'read_only' : False,
'unicode_paths' : True,
}
-
+
def __init__(self, addr='', port=3000, username=None, password=None, resource=None, transport=None):
self.addr = addr
self.port = port
@@ -136,56 +136,56 @@ class RemoteFS(FS):
self.transport = transport
if self.transport is None:
self.transport = self._open_connection()
- self.packet_handler = PacketHandler(self.transport)
- self.packet_handler.start()
-
+ self.packet_handler = PacketHandler(self.transport)
+ self.packet_handler.start()
+
self._remote_call('auth',
username=username,
password=password,
resource=resource)
-
+
def _open_connection(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.addr, self.port))
socket_file = _SocketFile(sock)
socket_file.write(b('pyfs/0.1\n'))
return socket_file
-
+
def _make_call(self, method_name, *args, **kwargs):
call = dict(type='rpc',
method=method_name,
args=args,
kwargs=kwargs)
- return call
-
+ return call
+
def _remote_call(self, method_name, *args, **kwargs):
call = self._make_call(method_name, *args, **kwargs)
call_id = self.packet_handler.send_packet(call)
header, payload = self.packet_handler.get_packet(call_id)
return header, payload
-
+
def ping(self, msg):
call_id = self.packet_handler.send_packet({'type':'rpc', 'method':'ping'}, msg)
header, payload = self.packet_handler.get_packet(call_id)
print "PING"
print header
print payload
-
+
def close(self):
self.transport.close()
self.packet_handler.join()
-
+
def open(self, path, mode="r", **kwargs):
pass
-
+
def exists(self, path):
remote = self._remote_call('exists', path)
return remote.get('response')
-
-
+
+
if __name__ == "__main__":
-
- rfs = RemoteFS()
+
+ rfs = RemoteFS()
rfs.close()
-
+
diff --git a/fs/tests/test_multifs.py b/fs/tests/test_multifs.py
index 59b272a..2d70f82 100644
--- a/fs/tests/test_multifs.py
+++ b/fs/tests/test_multifs.py
@@ -5,9 +5,9 @@ import unittest
from six import b
class TestMultiFS(unittest.TestCase):
-
+
def test_auto_close(self):
- """Test MultiFS auto close is working"""
+ """Test MultiFS auto close is working"""
multi_fs = MultiFS()
m1 = MemoryFS()
m2 = MemoryFS()
@@ -18,7 +18,7 @@ class TestMultiFS(unittest.TestCase):
multi_fs.close()
self.assert_(m1.closed)
self.assert_(m2.closed)
-
+
def test_no_auto_close(self):
"""Test MultiFS auto close can be disables"""
multi_fs = MultiFS(auto_close=False)
@@ -31,8 +31,8 @@ class TestMultiFS(unittest.TestCase):
multi_fs.close()
self.assert_(not m1.closed)
self.assert_(not m2.closed)
-
-
+
+
def test_priority(self):
"""Test priority order is working"""
m1 = MemoryFS()
@@ -46,7 +46,7 @@ class TestMultiFS(unittest.TestCase):
multi_fs.addfs("m2", m2)
multi_fs.addfs("m3", m3)
self.assert_(multi_fs.getcontents("name") == b("m3"))
-
+
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
@@ -57,8 +57,8 @@ class TestMultiFS(unittest.TestCase):
multi_fs.addfs("m1", m1)
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3)
- self.assert_(multi_fs.getcontents("name") == b("m2"))
-
+ self.assert_(multi_fs.getcontents("name") == b("m2"))
+
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
@@ -70,7 +70,7 @@ class TestMultiFS(unittest.TestCase):
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3, priority=10)
self.assert_(multi_fs.getcontents("name") == b("m3"))
-
+
m1 = MemoryFS()
m2 = MemoryFS()
m3 = MemoryFS()
@@ -82,4 +82,4 @@ class TestMultiFS(unittest.TestCase):
multi_fs.addfs("m2", m2, priority=10)
multi_fs.addfs("m3", m3, priority=10)
self.assert_(multi_fs.getcontents("name") == b("m1"))
-
+
diff --git a/fs/tests/test_s3fs.py b/fs/tests/test_s3fs.py
index cbc5f92..7b0e51c 100644
--- a/fs/tests/test_s3fs.py
+++ b/fs/tests/test_s3fs.py
@@ -17,8 +17,8 @@ from six import PY3
try:
from fs import s3fs
except ImportError:
- raise unittest.SkipTest("s3fs wasn't importable")
-
+ raise unittest.SkipTest("s3fs wasn't importable")
+
class TestS3FS(unittest.TestCase,FSTestCases,ThreadingTestCases):
@@ -27,7 +27,7 @@ class TestS3FS(unittest.TestCase,FSTestCases,ThreadingTestCases):
bucket = "test-s3fs.rfk.id.au"
- def setUp(self):
+ def setUp(self):
self.fs = s3fs.S3FS(self.bucket)
for k in self.fs._s3bukt.list():
self.fs._s3bukt.delete_key(k)
diff --git a/fs/tests/test_sqlitefs.py b/fs/tests/test_sqlitefs.py
index 14f5a55..f0a8ca9 100644
--- a/fs/tests/test_sqlitefs.py
+++ b/fs/tests/test_sqlitefs.py
@@ -10,8 +10,8 @@ import os
if SqliteFS:
class TestSqliteFS(unittest.TestCase, FSTestCases):
def setUp(self):
- self.fs = SqliteFS("sqlitefs.db")
+ self.fs = SqliteFS("sqlitefs.db")
def tearDown(self):
os.remove('sqlitefs.db')
-
- \ No newline at end of file
+
+
diff --git a/fs/tests/test_utils.py b/fs/tests/test_utils.py
index d2e435f..f0d336c 100644
--- a/fs/tests/test_utils.py
+++ b/fs/tests/test_utils.py
@@ -7,14 +7,14 @@ from fs import utils
from six import b
class TestUtils(unittest.TestCase):
-
+
def _make_fs(self, fs):
fs.setcontents("f1", b("file 1"))
fs.setcontents("f2", b("file 2"))
fs.setcontents("f3", b("file 3"))
fs.makedir("foo/bar", recursive=True)
fs.setcontents("foo/bar/fruit", b("apple"))
-
+
def _check_fs(self, fs):
self.assert_(fs.isfile("f1"))
self.assert_(fs.isfile("f2"))
@@ -24,72 +24,72 @@ class TestUtils(unittest.TestCase):
self.assertEqual(fs.getcontents("f1", "rb"), b("file 1"))
self.assertEqual(fs.getcontents("f2", "rb"), b("file 2"))
self.assertEqual(fs.getcontents("f3", "rb"), b("file 3"))
- self.assertEqual(fs.getcontents("foo/bar/fruit", "rb"), b("apple"))
-
+ self.assertEqual(fs.getcontents("foo/bar/fruit", "rb"), b("apple"))
+
def test_copydir_root(self):
"""Test copydir from root"""
fs1 = MemoryFS()
- self._make_fs(fs1)
+ self._make_fs(fs1)
fs2 = MemoryFS()
- utils.copydir(fs1, fs2)
+ utils.copydir(fs1, fs2)
self._check_fs(fs2)
-
+
fs1 = TempFS()
- self._make_fs(fs1)
+ self._make_fs(fs1)
fs2 = TempFS()
- utils.copydir(fs1, fs2)
+ utils.copydir(fs1, fs2)
self._check_fs(fs2)
-
+
def test_copydir_indir(self):
- """Test copydir in a directory"""
+ """Test copydir in a directory"""
fs1 = MemoryFS()
fs2 = MemoryFS()
- self._make_fs(fs1)
- utils.copydir(fs1, (fs2, "copy"))
+ self._make_fs(fs1)
+ utils.copydir(fs1, (fs2, "copy"))
self._check_fs(fs2.opendir("copy"))
fs1 = TempFS()
fs2 = TempFS()
- self._make_fs(fs1)
- utils.copydir(fs1, (fs2, "copy"))
+ self._make_fs(fs1)
+ utils.copydir(fs1, (fs2, "copy"))
self._check_fs(fs2.opendir("copy"))
-
+
def test_movedir_indir(self):
- """Test movedir in a directory"""
+ """Test movedir in a directory"""
fs1 = MemoryFS()
fs2 = MemoryFS()
fs1sub = fs1.makeopendir("from")
- self._make_fs(fs1sub)
- utils.movedir((fs1, "from"), (fs2, "copy"))
- self.assert_(not fs1.exists("from"))
+ self._make_fs(fs1sub)
+ utils.movedir((fs1, "from"), (fs2, "copy"))
+ self.assert_(not fs1.exists("from"))
self._check_fs(fs2.opendir("copy"))
fs1 = TempFS()
fs2 = TempFS()
fs1sub = fs1.makeopendir("from")
- self._make_fs(fs1sub)
+ self._make_fs(fs1sub)
utils.movedir((fs1, "from"), (fs2, "copy"))
- self.assert_(not fs1.exists("from"))
+ self.assert_(not fs1.exists("from"))
self._check_fs(fs2.opendir("copy"))
-
+
def test_movedir_root(self):
- """Test movedir to root dir"""
+ """Test movedir to root dir"""
fs1 = MemoryFS()
fs2 = MemoryFS()
fs1sub = fs1.makeopendir("from")
- self._make_fs(fs1sub)
+ self._make_fs(fs1sub)
utils.movedir((fs1, "from"), fs2)
- self.assert_(not fs1.exists("from"))
+ self.assert_(not fs1.exists("from"))
self._check_fs(fs2)
fs1 = TempFS()
fs2 = TempFS()
fs1sub = fs1.makeopendir("from")
- self._make_fs(fs1sub)
+ self._make_fs(fs1sub)
utils.movedir((fs1, "from"), fs2)
- self.assert_(not fs1.exists("from"))
+ self.assert_(not fs1.exists("from"))
self._check_fs(fs2)
-
+
def test_remove_all(self):
"""Test remove_all function"""
fs = TempFS()
@@ -97,9 +97,9 @@ class TestUtils(unittest.TestCase):
fs.setcontents("f2", b("file 2"))
fs.setcontents("f3", b("file 3"))
fs.makedir("foo/bar", recursive=True)
- fs.setcontents("foo/bar/fruit", b("apple"))
+ fs.setcontents("foo/bar/fruit", b("apple"))
fs.setcontents("foo/baz", b("baz"))
-
+
utils.remove_all(fs, "foo/bar")
self.assert_(not fs.exists("foo/bar/fruit"))
self.assert_(fs.exists("foo/bar"))
@@ -111,5 +111,5 @@ class TestUtils(unittest.TestCase):
self.assert_(not fs.exists("foo"))
self.assert_(not fs.exists("f1"))
self.assert_(fs.isdirempty('/'))
-
-
+
+
diff --git a/fs/tests/test_wrapfs.py b/fs/tests/test_wrapfs.py
index 153026a..ab674eb 100644
--- a/fs/tests/test_wrapfs.py
+++ b/fs/tests/test_wrapfs.py
@@ -13,7 +13,7 @@ import shutil
import tempfile
from fs import osfs
-from fs.errors import *
+from fs.errors import *
from fs.path import *
from fs.utils import remove_all
from fs import wrapfs
@@ -22,9 +22,9 @@ import six
from six import PY3, b
class TestWrapFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
-
+
#__test__ = False
-
+
def setUp(self):
self.temp_dir = tempfile.mkdtemp(u"fstest")
self.fs = wrapfs.WrapFS(osfs.OSFS(self.temp_dir))
@@ -39,7 +39,7 @@ class TestWrapFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
from fs.wrapfs.lazyfs import LazyFS
class TestLazyFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
-
+
def setUp(self):
self.temp_dir = tempfile.mkdtemp(u"fstest")
self.fs = LazyFS((osfs.OSFS,(self.temp_dir,)))
@@ -56,7 +56,7 @@ from fs.wrapfs.limitsizefs import LimitSizeFS
class TestLimitSizeFS(TestWrapFS):
_dont_retest = TestWrapFS._dont_retest + ("test_big_file",)
-
+
def setUp(self):
super(TestLimitSizeFS,self).setUp()
self.fs = LimitSizeFS(self.fs,1024*1024*2) # 2MB limit
@@ -78,7 +78,7 @@ class TestLimitSizeFS(TestWrapFS):
self.assertTrue(total_written < 1024*1024*2 + 1030)
break
else:
- self.assertTrue(False,"StorageSpaceError not raised")
+ self.assertTrue(False,"StorageSpaceError not raised")
from fs.wrapfs.hidedotfilesfs import HideDotFilesFS
diff --git a/fs/tests/test_xattr.py b/fs/tests/test_xattr.py
index 7be5560..142ddd8 100644
--- a/fs/tests/test_xattr.py
+++ b/fs/tests/test_xattr.py
@@ -11,7 +11,7 @@ from fs.path import *
from fs.errors import *
from fs.tests import FSTestCases
-from six import b
+from six import b
class XAttrTestCases:
"""Testcases for filesystems providing extended attribute support.
@@ -108,7 +108,7 @@ class XAttrTestCases:
# Check that removing a file without xattrs still works
self.fs.createfile("myfile2")
self.fs.remove("myfile2")
-
+
def test_remove_dir(self):
def listxattrs(path):
return list(self.fs.listxattrs(path))
diff --git a/fs/tests/zipfs_binary_test.py b/fs/tests/zipfs_binary_test.py
index 2a1e8bc..716cc4d 100644
--- a/fs/tests/zipfs_binary_test.py
+++ b/fs/tests/zipfs_binary_test.py
@@ -1,45 +1,45 @@
-"""
-Test case for ZipFS binary file reading/writing
-Passes ok on Linux, fails on Windows (tested: Win7, 64-bit):
-
-AssertionError: ' \r\n' != ' \n'
-"""
-
-import unittest
-from fs.zipfs import ZipFS
-import os
-
-from six import b
-
-class ZipFsBinaryWriteRead(unittest.TestCase):
- test_content = b(chr(32) + chr(10))
-
- def setUp(self):
- self.z = ZipFS('test.zip', 'w')
-
- def tearDown(self):
- try:
- os.remove('test.zip')
- except:
- pass
-
- def test_binary_write_read(self):
- # GIVEN zipfs
- z = self.z
-
- # WHEN binary data is written to a test file in zipfs
- f = z.open('test.data', 'wb')
- f.write(self.test_content)
- f.close()
- z.close()
-
- # THEN the same binary data is retrieved when opened again
- z = ZipFS('test.zip', 'r')
- f = z.open('test.data', 'rb')
- content = f.read()
- f.close()
- z.close()
- self.assertEqual(content, self.test_content)
-
-if __name__ == '__main__':
- unittest.main()
+"""
+Test case for ZipFS binary file reading/writing
+Passes ok on Linux, fails on Windows (tested: Win7, 64-bit):
+
+AssertionError: ' \r\n' != ' \n'
+"""
+
+import unittest
+from fs.zipfs import ZipFS
+import os
+
+from six import b
+
+class ZipFsBinaryWriteRead(unittest.TestCase):
+ test_content = b(chr(32) + chr(10))
+
+ def setUp(self):
+ self.z = ZipFS('test.zip', 'w')
+
+ def tearDown(self):
+ try:
+ os.remove('test.zip')
+ except:
+ pass
+
+ def test_binary_write_read(self):
+ # GIVEN zipfs
+ z = self.z
+
+ # WHEN binary data is written to a test file in zipfs
+ f = z.open('test.data', 'wb')
+ f.write(self.test_content)
+ f.close()
+ z.close()
+
+ # THEN the same binary data is retrieved when opened again
+ z = ZipFS('test.zip', 'r')
+ f = z.open('test.data', 'rb')
+ content = f.read()
+ f.close()
+ z.close()
+ self.assertEqual(content, self.test_content)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/fs/wrapfs/debugfs.py b/fs/wrapfs/debugfs.py
index 5810097..6e4d7d5 100644
--- a/fs/wrapfs/debugfs.py
+++ b/fs/wrapfs/debugfs.py
@@ -1,143 +1,143 @@
-'''
- @author: Marek Palatinus <marek@palatinus.cz>
- @license: Public domain
-
- DebugFS is a wrapper around filesystems to help developers
- debug their work. I wrote this class mainly for debugging
- TahoeLAFS and for fine tuning TahoeLAFS over Dokan with higher-level
- aplications like Total Comander, Winamp etc. Did you know
- that Total Commander need to open file before it delete them? :-)
-
- I hope DebugFS can be helpful also for other filesystem developers,
- especially for those who are trying to implement their first one (like me).
-
- DebugFS prints to stdout (by default) all attempts to
- filesystem interface, prints parameters and results.
-
- Basic usage:
- fs = DebugFS(OSFS('~'), identifier='OSFS@home', \
- skip=('_lock', 'listdir', 'listdirinfo'))
- print fs.listdir('.')
- print fs.unsupportedfunction()
-
- Error levels:
- DEBUG: Print everything (asking for methods, calls, response, exception)
- INFO: Print calls, responses, exception
- ERROR: Print only exceptions
- CRITICAL: Print only exceptions not derived from fs.errors.FSError
-
- How to change error level:
- import logging
- logger = logging.getLogger('fs.debugfs')
- logger.setLevel(logging.CRITICAL)
- fs = DebugFS(OSFS('~')
- print fs.listdir('.')
-
-'''
-import logging
-from logging import DEBUG, INFO, ERROR, CRITICAL
-import sys
-
-import fs
-from fs.errors import FSError
-
-logger = fs.getLogger('fs.debugfs')
-logger.setLevel(logging.DEBUG)
-logger.addHandler(logging.StreamHandler())
-
-class DebugFS(object):
- def __init__(self, fs, identifier=None, skip=(), verbose=True):
- '''
- fs - Reference to object to debug
- identifier - Custom string-like object will be added
- to each log line as identifier.
- skip - list of method names which DebugFS should not log
- '''
- self.__wrapped_fs = fs
- self.__identifier = identifier
- self.__skip = skip
- self.__verbose = verbose
- super(DebugFS, self).__init__()
-
- def __log(self, level, message):
- if self.__identifier:
- logger.log(level, '(%s) %s' % (self.__identifier, message))
- else:
- logger.log(level, message)
-
- def __parse_param(self, value):
- if isinstance(value, basestring):
- if len(value) > 60:
- value = "%s ... (length %d)" % (repr(value[:60]), len(value))
- else:
- value = repr(value)
- elif isinstance(value, list):
- value = "%s (%d items)" % (repr(value[:3]), len(value))
- elif isinstance(value, dict):
- items = {}
- for k, v in value.items()[:3]:
- items[k] = v
- value = "%s (%d items)" % (repr(items), len(value))
- else:
- value = repr(value)
- return value
-
- def __parse_args(self, *arguments, **kwargs):
- args = [self.__parse_param(a) for a in arguments]
- for k, v in kwargs.items():
- args.append("%s=%s" % (k, self.__parse_param(v)))
-
- args = ','.join(args)
- if args: args = "(%s)" % args
- return args
-
- def __report(self, msg, key, value, *arguments, **kwargs):
- if key in self.__skip: return
- args = self.__parse_args(*arguments, **kwargs)
- value = self.__parse_param(value)
- self.__log(INFO, "%s %s%s -> %s" % (msg, str(key), args, value))
-
- def __getattr__(self, key):
-
- if key.startswith('__'):
- # Internal calls, nothing interesting
- return object.__getattribute__(self, key)
-
- try:
- attr = getattr(self.__wrapped_fs, key)
- except AttributeError, e:
- self.__log(DEBUG, "Asking for not implemented method %s" % key)
- raise e
- except Exception, e:
- self.__log(CRITICAL, "Exception %s: %s" % \
- (e.__class__.__name__, str(e)))
- raise e
-
- if not callable(attr):
- if key not in self.__skip:
- self.__report("Get attribute", key, attr)
- return attr
-
- def _method(*args, **kwargs):
- try:
- value = attr(*args, **kwargs)
- self.__report("Call method", key, value, *args, **kwargs)
- except FSError, e:
- self.__log(ERROR, "Call method %s%s -> Exception %s: %s" % \
- (key, self.__parse_args(*args, **kwargs), \
- e.__class__.__name__, str(e)))
- (exc_type,exc_inst,tb) = sys.exc_info()
- raise e, None, tb
- except Exception, e:
- self.__log(CRITICAL,
- "Call method %s%s -> Non-FS exception %s: %s" %\
- (key, self.__parse_args(*args, **kwargs), \
- e.__class__.__name__, str(e)))
- (exc_type,exc_inst,tb) = sys.exc_info()
- raise e, None, tb
- return value
-
- if self.__verbose:
- if key not in self.__skip:
- self.__log(DEBUG, "Asking for method %s" % key)
- return _method
+'''
+ @author: Marek Palatinus <marek@palatinus.cz>
+ @license: Public domain
+
+ DebugFS is a wrapper around filesystems to help developers
+ debug their work. I wrote this class mainly for debugging
+ TahoeLAFS and for fine tuning TahoeLAFS over Dokan with higher-level
+ aplications like Total Comander, Winamp etc. Did you know
+ that Total Commander need to open file before it delete them? :-)
+
+ I hope DebugFS can be helpful also for other filesystem developers,
+ especially for those who are trying to implement their first one (like me).
+
+ DebugFS prints to stdout (by default) all attempts to
+ filesystem interface, prints parameters and results.
+
+ Basic usage:
+ fs = DebugFS(OSFS('~'), identifier='OSFS@home', \
+ skip=('_lock', 'listdir', 'listdirinfo'))
+ print fs.listdir('.')
+ print fs.unsupportedfunction()
+
+ Error levels:
+ DEBUG: Print everything (asking for methods, calls, response, exception)
+ INFO: Print calls, responses, exception
+ ERROR: Print only exceptions
+ CRITICAL: Print only exceptions not derived from fs.errors.FSError
+
+ How to change error level:
+ import logging
+ logger = logging.getLogger('fs.debugfs')
+ logger.setLevel(logging.CRITICAL)
+ fs = DebugFS(OSFS('~')
+ print fs.listdir('.')
+
+'''
+import logging
+from logging import DEBUG, INFO, ERROR, CRITICAL
+import sys
+
+import fs
+from fs.errors import FSError
+
+logger = fs.getLogger('fs.debugfs')
+logger.setLevel(logging.DEBUG)
+logger.addHandler(logging.StreamHandler())
+
+class DebugFS(object):
+ def __init__(self, fs, identifier=None, skip=(), verbose=True):
+ '''
+ fs - Reference to object to debug
+ identifier - Custom string-like object will be added
+ to each log line as identifier.
+ skip - list of method names which DebugFS should not log
+ '''
+ self.__wrapped_fs = fs
+ self.__identifier = identifier
+ self.__skip = skip
+ self.__verbose = verbose
+ super(DebugFS, self).__init__()
+
+ def __log(self, level, message):
+ if self.__identifier:
+ logger.log(level, '(%s) %s' % (self.__identifier, message))
+ else:
+ logger.log(level, message)
+
+ def __parse_param(self, value):
+ if isinstance(value, basestring):
+ if len(value) > 60:
+ value = "%s ... (length %d)" % (repr(value[:60]), len(value))
+ else:
+ value = repr(value)
+ elif isinstance(value, list):
+ value = "%s (%d items)" % (repr(value[:3]), len(value))
+ elif isinstance(value, dict):
+ items = {}
+ for k, v in value.items()[:3]:
+ items[k] = v
+ value = "%s (%d items)" % (repr(items), len(value))
+ else:
+ value = repr(value)
+ return value
+
+ def __parse_args(self, *arguments, **kwargs):
+ args = [self.__parse_param(a) for a in arguments]
+ for k, v in kwargs.items():
+ args.append("%s=%s" % (k, self.__parse_param(v)))
+
+ args = ','.join(args)
+ if args: args = "(%s)" % args
+ return args
+
+ def __report(self, msg, key, value, *arguments, **kwargs):
+ if key in self.__skip: return
+ args = self.__parse_args(*arguments, **kwargs)
+ value = self.__parse_param(value)
+ self.__log(INFO, "%s %s%s -> %s" % (msg, str(key), args, value))
+
+ def __getattr__(self, key):
+
+ if key.startswith('__'):
+ # Internal calls, nothing interesting
+ return object.__getattribute__(self, key)
+
+ try:
+ attr = getattr(self.__wrapped_fs, key)
+ except AttributeError, e:
+ self.__log(DEBUG, "Asking for not implemented method %s" % key)
+ raise e
+ except Exception, e:
+ self.__log(CRITICAL, "Exception %s: %s" % \
+ (e.__class__.__name__, str(e)))
+ raise e
+
+ if not callable(attr):
+ if key not in self.__skip:
+ self.__report("Get attribute", key, attr)
+ return attr
+
+ def _method(*args, **kwargs):
+ try:
+ value = attr(*args, **kwargs)
+ self.__report("Call method", key, value, *args, **kwargs)
+ except FSError, e:
+ self.__log(ERROR, "Call method %s%s -> Exception %s: %s" % \
+ (key, self.__parse_args(*args, **kwargs), \
+ e.__class__.__name__, str(e)))
+ (exc_type,exc_inst,tb) = sys.exc_info()
+ raise e, None, tb
+ except Exception, e:
+ self.__log(CRITICAL,
+ "Call method %s%s -> Non-FS exception %s: %s" %\
+ (key, self.__parse_args(*args, **kwargs), \
+ e.__class__.__name__, str(e)))
+ (exc_type,exc_inst,tb) = sys.exc_info()
+ raise e, None, tb
+ return value
+
+ if self.__verbose:
+ if key not in self.__skip:
+ self.__log(DEBUG, "Asking for method %s" % key)
+ return _method
diff --git a/fs/xattrs.py b/fs/xattrs.py
index 1089455..8250ded 100644
--- a/fs/xattrs.py
+++ b/fs/xattrs.py
@@ -39,7 +39,7 @@ def ensure_xattrs(fs):
Given an FS object, this function returns an equivalent FS that has support
for extended attributes. This may be the original object if they are
supported natively, or a wrapper class is they must be simulated.
-
+
:param fs: An FS object that must have xattrs
"""
try:
@@ -59,7 +59,7 @@ class SimulateXAttr(WrapFS):
* setxattr: set an xattr of a path by name
* delxattr: delete an xattr of a path by name
- For each file in the underlying FS, this class maintains a corresponding
+ For each file in the underlying FS, this class maintains a corresponding
'.xattrs.FILENAME' file containing its extended attributes. Extended
attributes of a directory are stored in the file '.xattrs' within the
directory itself.
@@ -202,4 +202,4 @@ class SimulateXAttr(WrapFS):
except ResourceNotFoundError:
pass
-
+