summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas O'Dowd <tpodowd@geminimobile.com>2012-02-10 18:25:10 +0900
committerThomas O'Dowd <tpodowd@geminimobile.com>2012-02-10 18:25:10 +0900
commit42efefad7c7b6cd333ad75e2df69096a26d042aa (patch)
tree0db6bab02f8b2055064e8775064db5bf4f2bc6e4
parent96fa697ed0411266a288e00f1081bf0ddc6810e6 (diff)
downloadboto-42efefad7c7b6cd333ad75e2df69096a26d042aa.tar.gz
Google Storage key related fixes for file pointer
- Add size parameter to gs/key/set_contents_from_file(). This makes the command work similar to s3/key/set_contents_from_file() - Add size parameter to s3/key/set_contents_from_stream(). size parameter should be useful here. - NOTE: did not add size to */key/set_content_from_(string|filename) functions as it doesn't make so much sense. String would be chopped by a user when he called it and you have the issue of multi-bytes and giving a filename normally uploads the full file also.
-rw-r--r--boto/gs/key.py38
-rw-r--r--boto/s3/key.py15
2 files changed, 40 insertions, 13 deletions
diff --git a/boto/gs/key.py b/boto/gs/key.py
index de6e6f44..27edabab 100644
--- a/boto/gs/key.py
+++ b/boto/gs/key.py
@@ -110,7 +110,7 @@ class Key(S3Key):
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
- res_upload_handler=None):
+ res_upload_handler=None, size=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
@@ -158,12 +158,23 @@ class Key(S3Key):
:param res_upload_handler: If provided, this handler will perform the
upload.
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read from
+ the file pointer (fp). This is useful when uploading
+ a file in multiple parts where you are splitting the
+ file up into different ranges to be uploaded. If not
+ specified, the default behaviour is to read all bytes
+ from the file pointer. Less bytes may be available.
+
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
+ if res_upload_handler and size:
+ # could use size instead of file_length if provided but...
+ raise BotoClientError('Resumable Uploads with size not supported.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
@@ -171,25 +182,34 @@ class Key(S3Key):
self.path = fp.name
if self.bucket != None:
if not md5:
- md5 = self.compute_md5(fp)
+ # compute_md5() and also set self.size to actual
+ # size of the bytes read computing the md5.
+ md5 = self.compute_md5(fp, size)
+ # adjust size if required
+ size = self.size
+ elif size:
+ self.size = size
else:
- # Even if md5 is provided, still need to set size of content.
- fp.seek(0, 2)
- self.size = fp.tell()
- fp.seek(0)
+ # If md5 is provided, still need to size so
+ # calculate based on bytes to end of content
+ spos = fp.tell()
+ fp.seek(0, os.SEEK_END)
+ self.size = fp.tell() - spos
+ fp.seek(spos)
+ size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
+
if self.name == None:
self.name = self.md5
if not replace:
- k = self.bucket.lookup(self.name)
- if k:
+ if self.bucket.lookup(self.name):
return
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
- self.send_file(fp, headers, cb, num_cb)
+ self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
diff --git a/boto/s3/key.py b/boto/s3/key.py
index 5ed1a8d2..206bcb6e 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -692,7 +692,8 @@ class Key(object):
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
- reduced_redundancy=False, query_args=None):
+ reduced_redundancy=False, query_args=None,
+ size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
@@ -738,6 +739,13 @@ class Key(object):
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read from
+ the file pointer (fp). This is useful when uploading
+ a file in multiple parts where you are splitting the
+ file up into different ranges to be uploaded. If not
+ specified, the default behaviour is to read all bytes
+ from the file pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
@@ -765,7 +773,7 @@ class Key(object):
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
- chunked_transfer=True)
+ chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
@@ -841,8 +849,7 @@ class Key(object):
from the file pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
- if headers is None:
- headers = {}
+ headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key: