summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Lindsley <daniel@toastdriven.com>2013-06-18 14:46:38 -0700
committerDaniel Lindsley <daniel@toastdriven.com>2013-06-18 14:46:38 -0700
commit25be2b7de7065a32439fa13d0e88fc30b4788381 (patch)
tree81a323865a2215383828fd4c6776416fc688efb6
parent02815ba84359cfa48f07583086f3777b154d1424 (diff)
parent8e6c3263aac3dac23ebeb8fa3b90797e2c485783 (diff)
downloadboto-2.9.6.tar.gz
Merge branch 'release-2.9.6'2.9.6
* release-2.9.6: (33 commits) Bumping version to 2.9.6 Added release notes for v2.9.6. Added SigV4 support to SNS. Fix annoying typo in basic DynamoDB example Safely re-introduced the change from SHA: dec541. Revert "Preserve trailing / when canonicalizing URI path for signature V4" Preserve trailing / when canonicalizing URI path for signature V4 Added a failing testcase for trailing path slashes in SigV4. Add new CloudSearch regions. Closes #1465. Pass generation query param to boto.gs.Key.open_read() Factored out how query args are constructed when fetching all keys. Added an exception to signal a retry should occur. Added the ``ap-northeast-1`` region to Redshift. Update connection.py: get_path when suppress_consec_slashes is False Added a failing test for overeager ``suppress_consec_slashes``. Added ``retrieve_inventory_job`` to ``Vault``. Fixed a bug where 400s w/ timeouts were not honored. RangeKey was missing from the import and there was two hashkeys in Table.create Deal with empty facets queries on cloudsearch (re #1366) Fix AttributeErrors thrown when LoadBalancerZones is used by adding endElement stub ...
-rw-r--r--README.rst4
-rw-r--r--boto/__init__.py2
-rw-r--r--boto/auth.py5
-rw-r--r--boto/cloudsearch/__init__.py10
-rw-r--r--boto/cloudsearch/search.py3
-rw-r--r--boto/connection.py18
-rw-r--r--boto/dynamodb2/items.py3
-rw-r--r--boto/ec2/autoscale/__init__.py28
-rw-r--r--boto/ec2/autoscale/scheduled.py18
-rw-r--r--boto/ec2/elb/loadbalancer.py2
-rw-r--r--boto/ec2/instancestatus.py2
-rw-r--r--boto/ec2/networkinterface.py12
-rw-r--r--boto/exception.py15
-rw-r--r--boto/glacier/vault.py23
-rw-r--r--boto/glacier/writer.py20
-rw-r--r--boto/gs/key.py31
-rw-r--r--boto/redshift/__init__.py3
-rw-r--r--boto/s3/bucket.py39
-rw-r--r--boto/s3/connection.py12
-rw-r--r--boto/s3/key.py71
-rw-r--r--boto/sns/connection.py2
-rw-r--r--boto/utils.py2
-rw-r--r--docs/source/migrations/dynamodb_v1_to_v2.rst3
-rw-r--r--docs/source/releasenotes/v2.9.5.rst2
-rw-r--r--docs/source/releasenotes/v2.9.6.rst56
-rw-r--r--tests/integration/s3/mock_storage_service.py6
-rw-r--r--tests/integration/s3/test_key.py12
-rw-r--r--tests/integration/storage_uri/__init__.py0
-rw-r--r--tests/integration/storage_uri/test_storage_uri.py63
-rw-r--r--tests/unit/auth/test_sigv4.py18
-rw-r--r--tests/unit/cloudsearch/test_search.py64
-rw-r--r--tests/unit/dynamodb2/test_table.py10
-rw-r--r--tests/unit/ec2/autoscale/test_group.py38
-rw-r--r--tests/unit/ec2/elb/__init__.py0
-rw-r--r--tests/unit/ec2/elb/test_loadbalancer.py33
-rw-r--r--tests/unit/ec2/test_instancestatus.py32
-rw-r--r--tests/unit/glacier/test_vault.py51
-rw-r--r--tests/unit/glacier/test_writer.py45
-rw-r--r--tests/unit/s3/test_bucket.py52
-rw-r--r--tests/unit/s3/test_key.py51
-rw-r--r--tests/unit/sns/test_connection.py8
-rw-r--r--tests/unit/test_connection.py23
42 files changed, 806 insertions, 86 deletions
diff --git a/README.rst b/README.rst
index 77c64aa5..98e92ccf 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.9.5
+boto 2.9.6
-Released: 28-May-2013
+Released: 18-June-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
diff --git a/boto/__init__.py b/boto/__init__.py
index 21666707..c32e4968 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -36,7 +36,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.9.5'
+__version__ = '2.9.6'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
diff --git a/boto/auth.py b/boto/auth.py
index cd7ac68f..0aa299f9 100644
--- a/boto/auth.py
+++ b/boto/auth.py
@@ -384,10 +384,13 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
return ';'.join(l)
def canonical_uri(self, http_request):
+ path = http_request.auth_path
# Normalize the path.
- normalized = posixpath.normpath(http_request.auth_path)
+ normalized = posixpath.normpath(path)
# Then urlencode whatever's left.
encoded = urllib.quote(normalized)
+ if len(path) > 1 and path.endswith('/'):
+ encoded += '/'
return encoded
def payload(self, http_request):
diff --git a/boto/cloudsearch/__init__.py b/boto/cloudsearch/__init__.py
index 5ba1060e..01fa41df 100644
--- a/boto/cloudsearch/__init__.py
+++ b/boto/cloudsearch/__init__.py
@@ -38,6 +38,16 @@ def regions():
RegionInfo(name='eu-west-1',
endpoint='cloudsearch.eu-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='us-west-1',
+ endpoint='cloudsearch.us-west-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='us-west-2',
+ endpoint='cloudsearch.us-west-2.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='cloudsearch.ap-southeast-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+
]
diff --git a/boto/cloudsearch/search.py b/boto/cloudsearch/search.py
index 813f4a40..69a1981e 100644
--- a/boto/cloudsearch/search.py
+++ b/boto/cloudsearch/search.py
@@ -54,7 +54,8 @@ class SearchResults(object):
self.facets = {}
if 'facets' in attrs:
for (facet, values) in attrs['facets'].iteritems():
- self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints']))
+ if 'constraints' in values:
+ self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints']))
self.num_pages_needed = ceil(self.hits / self.query.real_size)
diff --git a/boto/connection.py b/boto/connection.py
index 97e9c980..1f7392c1 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -67,8 +67,10 @@ import boto.handler
import boto.cacerts
from boto import config, UserAgent
-from boto.exception import AWSConnectionError, BotoClientError
+from boto.exception import AWSConnectionError
+from boto.exception import BotoClientError
from boto.exception import BotoServerError
+from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.resultset import ResultSet
@@ -598,7 +600,7 @@ class AWSAuthConnection(object):
# https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8
# You can override that behavior with the suppress_consec_slashes param.
if not self.suppress_consec_slashes:
- return self.path + re.sub('^/*', "", path)
+ return self.path + re.sub('^(/*)/', "\\1", path)
pos = path.find('?')
if pos >= 0:
params = path[pos:]
@@ -878,6 +880,11 @@ class AWSAuthConnection(object):
scheme == 'https')
response = None
continue
+ except PleaseRetryException, e:
+ boto.log.debug('encountered a retry exception: %s' % e)
+ connection = self.new_http_connection(request.host,
+ self.is_secure)
+ response = e.response
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
@@ -894,7 +901,7 @@ class AWSAuthConnection(object):
# If we made it here, it's because we have exhausted our retries
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
- # Otherwise, raise the exception that must have already h#appened.
+ # Otherwise, raise the exception that must have already happened.
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
@@ -930,13 +937,14 @@ class AWSAuthConnection(object):
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, override_num_retries=None,
- params=None):
+ params=None, retry_handler=None):
"""Makes a request to the server, with stock multiple-retry logic."""
if params is None:
params = {}
http_request = self.build_base_http_request(method, path, auth_path,
params, headers, data, host)
- return self._mexe(http_request, sender, override_num_retries)
+ return self._mexe(http_request, sender, override_num_retries,
+ retry_handler=retry_handler)
def close(self):
"""(Optional) Close any open HTTP connections. This is non-destructive,
diff --git a/boto/dynamodb2/items.py b/boto/dynamodb2/items.py
index b3013150..8df51026 100644
--- a/boto/dynamodb2/items.py
+++ b/boto/dynamodb2/items.py
@@ -102,6 +102,9 @@ class Item(object):
def items(self):
return self._data.items()
+ def get(self, key, default=None):
+ return self._data.get(key, default)
+
def __iter__(self):
for key in self._data:
yield self._data[key]
diff --git a/boto/ec2/autoscale/__init__.py b/boto/ec2/autoscale/__init__.py
index 277c26db..17a89e11 100644
--- a/boto/ec2/autoscale/__init__.py
+++ b/boto/ec2/autoscale/__init__.py
@@ -552,9 +552,11 @@ class AutoScaleConnection(AWSQueryConnection):
'ScalingProcesses')
return self.get_status('ResumeProcesses', params)
- def create_scheduled_group_action(self, as_group, name, time,
+ def create_scheduled_group_action(self, as_group, name, time=None,
desired_capacity=None,
- min_size=None, max_size=None):
+ min_size=None, max_size=None,
+ start_time=None, end_time=None,
+ recurrence=None):
"""
Creates a scheduled scaling action for a Auto Scaling group. If you
leave a parameter unspecified, the corresponding value remains
@@ -567,7 +569,7 @@ class AutoScaleConnection(AWSQueryConnection):
:param name: Scheduled action name.
:type time: datetime.datetime
- :param time: The time for this action to start.
+ :param time: The time for this action to start. (Depracated)
:type desired_capacity: int
:param desired_capacity: The number of EC2 instances that should
@@ -578,10 +580,26 @@ class AutoScaleConnection(AWSQueryConnection):
:type max_size: int
:param max_size: The minimum size for the new auto scaling group.
+
+ :type start_time: datetime.datetime
+ :param start_time: The time for this action to start. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.
+
+ :type end_time: datetime.datetime
+ :param end_time: The time for this action to end. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.
+
+ :type recurrence: string
+ :param recurrence: The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format. EXAMPLE: '0 10 * * *'
"""
params = {'AutoScalingGroupName': as_group,
- 'ScheduledActionName': name,
- 'Time': time.isoformat()}
+ 'ScheduledActionName': name}
+ if start_time is not None:
+ params['StartTime'] = start_time.isoformat()
+ if end_time is not None:
+ params['EndTime'] = end_time.isoformat()
+ if recurrence is not None:
+ params['Recurrence'] = recurrence
+ if time:
+ params['Time'] = time.isoformat()
if desired_capacity is not None:
params['DesiredCapacity'] = desired_capacity
if min_size is not None:
diff --git a/boto/ec2/autoscale/scheduled.py b/boto/ec2/autoscale/scheduled.py
index d8f051c1..8e307c20 100644
--- a/boto/ec2/autoscale/scheduled.py
+++ b/boto/ec2/autoscale/scheduled.py
@@ -28,7 +28,11 @@ class ScheduledUpdateGroupAction(object):
self.connection = connection
self.name = None
self.action_arn = None
+ self.as_group = None
self.time = None
+ self.start_time = None
+ self.end_time = None
+ self.recurrence = None
self.desired_capacity = None
self.max_size = None
self.min_size = None
@@ -44,17 +48,31 @@ class ScheduledUpdateGroupAction(object):
self.desired_capacity = value
elif name == 'ScheduledActionName':
self.name = value
+ elif name == 'AutoScalingGroupName':
+ self.as_group = value
elif name == 'MaxSize':
self.max_size = int(value)
elif name == 'MinSize':
self.min_size = int(value)
elif name == 'ScheduledActionARN':
self.action_arn = value
+ elif name == 'Recurrence':
+ self.recurrence = value
elif name == 'Time':
try:
self.time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ elif name == 'StartTime':
+ try:
+ self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
+ except ValueError:
+ self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ elif name == 'EndTime':
+ try:
+ self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
+ except ValueError:
+ self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
else:
setattr(self, name, value)
diff --git a/boto/ec2/elb/loadbalancer.py b/boto/ec2/elb/loadbalancer.py
index 7b80d79f..7b6afc7d 100644
--- a/boto/ec2/elb/loadbalancer.py
+++ b/boto/ec2/elb/loadbalancer.py
@@ -42,6 +42,8 @@ class LoadBalancerZones(object):
if name == 'AvailabilityZones':
return self.zones
+ def endElement(self, name, value, connection):
+ pass
class LoadBalancer(object):
"""
diff --git a/boto/ec2/instancestatus.py b/boto/ec2/instancestatus.py
index 166732eb..b09b55ee 100644
--- a/boto/ec2/instancestatus.py
+++ b/boto/ec2/instancestatus.py
@@ -207,6 +207,6 @@ class InstanceStatusSet(list):
return None
def endElement(self, name, value, connection):
- if name == 'NextToken':
+ if name == 'nextToken':
self.next_token = value
setattr(self, name, value)
diff --git a/boto/ec2/networkinterface.py b/boto/ec2/networkinterface.py
index f044add3..5c6088f4 100644
--- a/boto/ec2/networkinterface.py
+++ b/boto/ec2/networkinterface.py
@@ -193,8 +193,8 @@ class NetworkInterfaceCollection(list):
self.extend(interfaces)
def build_list_params(self, params, prefix=''):
- for i, spec in enumerate(self, 1):
- full_prefix = '%sNetworkInterface.%s.' % (prefix, i)
+ for i, spec in enumerate(self):
+ full_prefix = '%sNetworkInterface.%s.' % (prefix, i+1)
if spec.network_interface_id is not None:
params[full_prefix + 'NetworkInterfaceId'] = \
str(spec.network_interface_id)
@@ -215,13 +215,13 @@ class NetworkInterfaceCollection(list):
params[full_prefix + 'PrivateIpAddress'] = \
str(spec.private_ip_address)
if spec.groups is not None:
- for j, group_id in enumerate(spec.groups, 1):
- query_param_key = '%sSecurityGroupId.%s' % (full_prefix, j)
+ for j, group_id in enumerate(spec.groups):
+ query_param_key = '%sSecurityGroupId.%s' % (full_prefix, j+1)
params[query_param_key] = str(group_id)
if spec.private_ip_addresses is not None:
- for k, ip_addr in enumerate(spec.private_ip_addresses, 1):
+ for k, ip_addr in enumerate(spec.private_ip_addresses):
query_param_key_prefix = (
- '%sPrivateIpAddresses.%s' % (full_prefix, k))
+ '%sPrivateIpAddresses.%s' % (full_prefix, k+1))
params[query_param_key_prefix + '.PrivateIpAddress'] = \
str(ip_addr.private_ip_address)
if ip_addr.primary is not None:
diff --git a/boto/exception.py b/boto/exception.py
index 9beee960..64618368 100644
--- a/boto/exception.py
+++ b/boto/exception.py
@@ -474,3 +474,18 @@ class TooManyRecordsException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
+
+
+class PleaseRetryException(Exception):
+ """
+ Indicates a request should be retried.
+ """
+ def __init__(self, message, response=None):
+ self.message = message
+ self.response = response
+
+ def __repr__(self):
+ return 'PleaseRetryException("%s", %s)' % (
+ self.message,
+ self.response
+ )
diff --git a/boto/glacier/vault.py b/boto/glacier/vault.py
index ac019ac9..0186dbd3 100644
--- a/boto/glacier/vault.py
+++ b/boto/glacier/vault.py
@@ -315,8 +315,8 @@ class Vault(object):
sends notification when the job is completed and the output
is ready for you to download.
- :rtype: :class:`boto.glacier.job.Job`
- :return: A Job object representing the retrieval job.
+ :rtype: str
+ :return: The ID of the job
"""
job_data = {'Type': 'inventory-retrieval'}
if sns_topic is not None:
@@ -327,6 +327,25 @@ class Vault(object):
response = self.layer1.initiate_job(self.name, job_data)
return response['JobId']
+ def retrieve_inventory_job(self, **kwargs):
+ """
+ Identical to ``retrieve_inventory``, but returns a ``Job`` instance
+ instead of just the job ID.
+
+ :type description: str
+ :param description: An optional description for the job.
+
+ :type sns_topic: str
+ :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
+ sends notification when the job is completed and the output
+ is ready for you to download.
+
+ :rtype: :class:`boto.glacier.job.Job`
+ :return: A Job object representing the retrieval job.
+ """
+ job_id = self.retrieve_inventory(**kwargs)
+ return self.get_job(job_id)
+
def delete_archive(self, archive_id):
"""
This operation deletes an archive from the vault.
diff --git a/boto/glacier/writer.py b/boto/glacier/writer.py
index df978e2c..ad0ab265 100644
--- a/boto/glacier/writer.py
+++ b/boto/glacier/writer.py
@@ -234,6 +234,26 @@ class Writer(object):
return self.uploader.archive_id
@property
+ def current_tree_hash(self):
+ """
+ Returns the current tree hash for the data that's been written
+ **so far**.
+
+ Only once the writing is complete is the final tree hash returned.
+ """
+ return tree_hash(self.uploader._tree_hashes)
+
+ @property
+ def current_uploaded_size(self):
+ """
+ Returns the current uploaded size for the data that's been written
+ **so far**.
+
+ Only once the writing is complete is the final uploaded size returned.
+ """
+ return self.uploader._uploaded_size
+
+ @property
def upload_id(self):
return self.uploader.upload_id
diff --git a/boto/gs/key.py b/boto/gs/key.py
index 7261b49f..1ced4ce9 100644
--- a/boto/gs/key.py
+++ b/boto/gs/key.py
@@ -118,6 +118,37 @@ class Key(S3Key):
elif key == 'x-goog-component-count':
self.component_count = int(value)
+ def open_read(self, headers=None, query_args='',
+ override_num_retries=None, response_headers=None):
+ """
+ Open this key for reading
+
+ :type headers: dict
+ :param headers: Headers to pass in the web request
+
+ :type query_args: string
+ :param query_args: Arguments to pass in the query string
+ (ie, 'torrent')
+
+ :type override_num_retries: int
+ :param override_num_retries: If not None will override configured
+ num_retries parameter for underlying GET.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
+ """
+ # For GCS we need to include the object generation in the query args.
+ # The rest of the processing is handled in the parent class.
+ if self.generation:
+ if query_args:
+ query_args += '&'
+ query_args += 'generation=%s' % self.generation
+ super(Key, self).open_read(headers=headers, query_args=query_args,
+ override_num_retries=override_num_retries,
+ response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
diff --git a/boto/redshift/__init__.py b/boto/redshift/__init__.py
index 68b7275a..fca2a790 100644
--- a/boto/redshift/__init__.py
+++ b/boto/redshift/__init__.py
@@ -42,6 +42,9 @@ def regions():
RegionInfo(name='eu-west-1',
endpoint='redshift.eu-west-1.amazonaws.com',
connection_cls=cls),
+ RegionInfo(name='ap-northeast-1',
+ endpoint='redshift.ap-northeast-1.amazonaws.com',
+ connection_cls=cls),
]
diff --git a/boto/s3/bucket.py b/boto/s3/bucket.py
index 89f0fcb3..335e9faa 100644
--- a/boto/s3/bucket.py
+++ b/boto/s3/bucket.py
@@ -301,24 +301,35 @@ class Bucket(object):
upload_id_marker,
headers)
+ def _get_all_query_args(self, params, initial_query_string=''):
+ pairs = []
+
+ if initial_query_string:
+ pairs.append(initial_query_string)
+
+ for key, value in params.items():
+ key = key.replace('_', '-')
+ if key == 'maxkeys':
+ key = 'max-keys'
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ if value is not None and value != '':
+ pairs.append('%s=%s' % (
+ urllib.quote(key),
+ urllib.quote(str(value)
+ )))
+
+ return '&'.join(pairs)
+
def _get_all(self, element_map, initial_query_string='',
headers=None, **params):
- l = []
- for k, v in params.items():
- k = k.replace('_', '-')
- if k == 'maxkeys':
- k = 'max-keys'
- if isinstance(v, unicode):
- v = v.encode('utf-8')
- if v is not None and v != '':
- l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))
- if len(l):
- s = initial_query_string + '&' + '&'.join(l)
- else:
- s = initial_query_string
+ query_args = self._get_all_query_args(
+ params,
+ initial_query_string=initial_query_string
+ )
response = self.connection.make_request('GET', self.name,
headers=headers,
- query_args=s)
+ query_args=query_args)
body = response.read()
boto.log.debug(body)
if response.status == 200:
diff --git a/boto/s3/connection.py b/boto/s3/connection.py
index 74b4fdb8..583fa168 100644
--- a/boto/s3/connection.py
+++ b/boto/s3/connection.py
@@ -524,7 +524,8 @@ class S3Connection(AWSAuthConnection):
response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
- query_args=None, sender=None, override_num_retries=None):
+ query_args=None, sender=None, override_num_retries=None,
+ retry_handler=None):
if isinstance(bucket, self.bucket_class):
bucket = bucket.name
if isinstance(key, Key):
@@ -539,6 +540,9 @@ class S3Connection(AWSAuthConnection):
boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
boto.log.debug('auth_path=%s' % auth_path)
- return AWSAuthConnection.make_request(self, method, path, headers,
- data, host, auth_path, sender,
- override_num_retries=override_num_retries)
+ return AWSAuthConnection.make_request(
+ self, method, path, headers,
+ data, host, auth_path, sender,
+ override_num_retries=override_num_retries,
+ retry_handler=retry_handler
+ )
diff --git a/boto/s3/key.py b/boto/s3/key.py
index 327feae3..7fead3a5 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -34,6 +34,7 @@ import urllib
import boto.utils
from boto.exception import BotoClientError
from boto.exception import StorageDataError
+from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
@@ -824,20 +825,13 @@ class Key(object):
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
- if ((response.status == 500 or response.status == 503 or
- response.getheader('location')) and not chunked_transfer):
- # we'll try again.
- return response
- elif response.status >= 200 and response.status <= 299:
- self.etag = response.getheader('etag')
- if self.etag != '"%s"' % self.md5:
- raise provider.storage_data_error(
- 'ETag from S3 did not match computed MD5')
- return response
- else:
+
+ if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
+ return response
+
if not headers:
headers = {}
else:
@@ -876,13 +870,58 @@ class Key(object):
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
- resp = self.bucket.connection.make_request('PUT', self.bucket.name,
- self.name, headers,
- sender=sender,
- query_args=query_args)
+ resp = self.bucket.connection.make_request(
+ 'PUT',
+ self.bucket.name,
+ self.name,
+ headers,
+ sender=sender,
+ query_args=query_args
+ )
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
+ def should_retry(self, response, chunked_transfer=False):
+ provider = self.bucket.connection.provider
+
+ if not chunked_transfer:
+ if response.status in [500, 503]:
+ # 500 & 503 can be plain retries.
+ return True
+
+ if response.getheader('location'):
+ # If there's a redirect, plain retry.
+ return True
+
+ if 200 <= response.status <= 299:
+ self.etag = response.getheader('etag')
+
+ if self.etag != '"%s"' % self.md5:
+ raise provider.storage_data_error(
+ 'ETag from S3 did not match computed MD5')
+
+ return True
+
+ if response.status == 400:
+ # The 400 must be trapped so the retry handler can check to
+ # see if it was a timeout.
+ # If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
+ # out.
+ body = response.read()
+ err = provider.storage_response_error(
+ response.status,
+ response.reason,
+ body
+ )
+
+ if err.error_code in ['RequestTimeout']:
+ raise PleaseRetryException(
+ "Saw %s, retrying" % err.error_code,
+ response=response
+ )
+
+ return False
+
def compute_md5(self, fp, size=None):
"""
:type fp: file
@@ -1026,7 +1065,7 @@ class Key(object):
the second representing the size of the to be transmitted
object.
- :type cb: int
+ :type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
diff --git a/boto/sns/connection.py b/boto/sns/connection.py
index 1f29c195..0cdc628d 100644
--- a/boto/sns/connection.py
+++ b/boto/sns/connection.py
@@ -55,7 +55,7 @@ class SNSConnection(AWSQueryConnection):
validate_certs=validate_certs)
def _required_auth_capability(self):
- return ['sns']
+ return ['hmac-v4']
def get_all_topics(self, next_token=None):
"""
diff --git a/boto/utils.py b/boto/utils.py
index e5c083f3..97fdd2df 100644
--- a/boto/utils.py
+++ b/boto/utils.py
@@ -172,7 +172,7 @@ def merge_meta(headers, metadata, provider=None):
for k in metadata.keys():
if k.lower() in ['cache-control', 'content-md5', 'content-type',
'content-encoding', 'content-disposition',
- 'date', 'expires']:
+ 'expires']:
final_headers[k] = metadata[k]
else:
final_headers[metadata_prefix + k] = metadata[k]
diff --git a/docs/source/migrations/dynamodb_v1_to_v2.rst b/docs/source/migrations/dynamodb_v1_to_v2.rst
index 422d8d13..d945e17b 100644
--- a/docs/source/migrations/dynamodb_v1_to_v2.rst
+++ b/docs/source/migrations/dynamodb_v1_to_v2.rst
@@ -35,11 +35,12 @@ DynamoDB v1::
DynamoDB v2::
>>> from boto.dynamodb2.fields import HashKey
+ >>> from boto.dynamodb2.fields import RangeKey
>>> from boto.dynamodb2.table import Table
>>> table = Table.create('messages', schema=[
... HashKey('forum_name'),
- ... HashKey('subject'),
+ ... RangeKey('subject'),
... ], throughput={
... 'read': 10,
... 'write': 10,
diff --git a/docs/source/releasenotes/v2.9.5.rst b/docs/source/releasenotes/v2.9.5.rst
index 84d6d63b..5df46bd8 100644
--- a/docs/source/releasenotes/v2.9.5.rst
+++ b/docs/source/releasenotes/v2.9.5.rst
@@ -29,4 +29,4 @@ Bugfixes
* Fixed an issue with ``EbsOptimized`` in EC2 Autoscale. (:issue:`1513`,
:sha:`424c41`)
* Fixed a missing instance variable bug in DynamoDB v2. (:issue:`1516`,
- :sha: `6fa8bf`)
+ :sha:`6fa8bf`)
diff --git a/docs/source/releasenotes/v2.9.6.rst b/docs/source/releasenotes/v2.9.6.rst
new file mode 100644
index 00000000..9e163fba
--- /dev/null
+++ b/docs/source/releasenotes/v2.9.6.rst
@@ -0,0 +1,56 @@
+boto v2.9.6
+===========
+
+:date: 2013/06/18
+
+This release adds large payload support to Amazon SNS/SQS (from 32k to 256k
+bodies), several minor API additions, new regions for Redshift/Cloudsearch &
+a host of bugfixes.
+
+
+Features
+--------
+
+* Added large body support to SNS/SQS. There's nothing to change in your
+ application code, but you can now send payloads of up to 256k in size.
+ (:sha:`b64947`)
+* Added ``Vault.retrieve_inventory_job`` to Glacier. (:issue:`1532`, :sha:`33de29`)
+* Added ``Item.get(...)`` support to DynamoDB v2. (:sha:`938cb6`)
+* Added the ``ap-northeast-1`` region to Redshift. (:sha:`d3eb61`)
+* Added all the current regions to Cloudsearch. (:issue:`1465`, :sha:`22b3b7`)
+
+
+Bugfixes
+--------
+
+* Fixed a bug where ``date`` metadata couldn't be set on an S3 key.
+ (:issue:`1519`, :sha:`1efde8`)
+* Fixed Python 2.5/Jython support in ``NetworkInterfaceCollection``.
+ (:issue:`1518`, :sha:`0d6af2`)
+* Fixed a XML parsing error with ``InstanceStatusSet``. (:issue:`1493`,
+ :sha:`55d4f6`)
+* Added a test case to try to demonstrate :issue:`443`. (:sha:`084dd5`)
+* Exposed the current tree-hash & upload size on Glacier's ``Writer``.
+ (:issue:`1520`, :sha:`ade462`)
+* Updated EC2 Autoscale to incorporate new cron-like parameters. (:issue:`1433`,
+ :sha:`266e25`, :sha:`871588` & :sha:`473e42`)
+* Fixed ``AttributeError`` being thrown from ``LoadBalancerZones``.
+ (:issue:`1524`, :sha:`215ffa`)
+* Fixed a bug with empty facets in Cloudsearch. (:issue:`1366`, :sha:`7a108e`)
+* Fixed an S3 timeout/retry bug where HTTP 400s weren't being honored.
+ (:issue:`1528`, :sha:`efd9af` & :sha:`16ae74`)
+* Fixed ``get_path`` when ``suppress_consec_slashes=False``. (:issue:`1522`,
+ :sha:`c5dffc`)
+* Factored out how some of S3's ``query_args`` are constructed. (:sha:`9f73de`)
+* Added the ``generation`` query param to ``gs.Key.open_read``. (:sha:`cb4427`)
+* Fixed a bug with the canonicalization of URLs with trailing slashes in
+ the SigV4 signer. (:issue:`1541`, :sha:`dec541`, :sha:`3f2b33`)
+* Several documentation improvements/fixes:
+
+ * Updated the release notes slightly. (:sha:`7b6079`)
+ * Corrected the ``num_cb`` param on ``set_contents_from_filename``.
+ (:issue:`1523`, :sha:`44be69`)
+ * Fixed some example code in the DDB migration guide. (:issue:`1525`,
+ :sha:`6210ca`)
+ * Fixed a typo in one of the DynamoDB v2 examples. (:issue:`1551`,
+ :sha:`b0df3e`)
diff --git a/tests/integration/s3/mock_storage_service.py b/tests/integration/s3/mock_storage_service.py
index 4b95e505..507695bf 100644
--- a/tests/integration/s3/mock_storage_service.py
+++ b/tests/integration/s3/mock_storage_service.py
@@ -253,6 +253,9 @@ class MockBucket(object):
def get_logging_config(self):
return {"Logging": {}}
+ def get_versioning_status(self, headers=NOT_IMPL):
+ return False
+
def get_acl(self, key_name='', headers=NOT_IMPL, version_id=NOT_IMPL):
if key_name:
# Return ACL for the key.
@@ -457,6 +460,9 @@ class MockBucketStorageUri(object):
def delete_bucket(self, headers=NOT_IMPL):
return self.connect().delete_bucket(self.bucket_name)
+ def get_versioning_config(self, headers=NOT_IMPL):
+ self.get_bucket().get_versioning_status(headers)
+
def has_version(self):
return (issubclass(type(self), MockBucketStorageUri)
and ((self.version_id is not None)
diff --git a/tests/integration/s3/test_key.py b/tests/integration/s3/test_key.py
index 8f7dd666..f329e06b 100644
--- a/tests/integration/s3/test_key.py
+++ b/tests/integration/s3/test_key.py
@@ -32,7 +32,7 @@ from boto.s3.key import Key
from boto.exception import S3ResponseError
-class S3KeyTest (unittest.TestCase):
+class S3KeyTest(unittest.TestCase):
s3 = True
def setUp(self):
@@ -373,3 +373,13 @@ class S3KeyTest (unittest.TestCase):
with self.assertRaises(key.provider.storage_response_error):
# Must start with a / or http
key.set_redirect('')
+
+ def test_setting_date(self):
+ key = self.bucket.new_key('test_date')
+ # This should actually set x-amz-meta-date & not fail miserably.
+ key.set_metadata('date', '20130524T155935Z')
+ key.set_contents_from_string('Some text here.')
+
+ check = self.bucket.get_key('test_date')
+ self.assertEqual(check.get_metadata('date'), u'20130524T155935Z')
+ self.assertTrue('x-amz-meta-date' in check._get_remote_metadata())
diff --git a/tests/integration/storage_uri/__init__.py b/tests/integration/storage_uri/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/storage_uri/__init__.py
diff --git a/tests/integration/storage_uri/test_storage_uri.py b/tests/integration/storage_uri/test_storage_uri.py
new file mode 100644
index 00000000..55dac1ad
--- /dev/null
+++ b/tests/integration/storage_uri/test_storage_uri.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for StorageUri
+"""
+
+from tests.unit import unittest
+import time
+import boto
+from boto.s3.connection import S3Connection, Location
+
+
+class StorageUriTest(unittest.TestCase):
+ s3 = True
+
+ def nuke_bucket(self, bucket):
+ for key in bucket:
+ key.delete()
+
+ bucket.delete()
+
+ def test_storage_uri_regionless(self):
+ # First, create a bucket in a different region.
+ conn = S3Connection(
+ host='s3-us-west-2.amazonaws.com'
+ )
+ bucket_name = 'keytest-%d' % int(time.time())
+ bucket = conn.create_bucket(bucket_name, location=Location.USWest2)
+ self.addCleanup(self.nuke_bucket, bucket)
+
+ # Now use ``storage_uri`` to try to make a new key.
+ # This would throw a 301 exception.
+ suri = boto.storage_uri('s3://%s/test' % bucket_name)
+ the_key = suri.new_key()
+ the_key.key = 'Test301'
+ the_key.set_contents_from_string(
+ 'This should store in a different region.'
+ )
+
+ # Check it a different way.
+ alt_conn = boto.connect_s3(host='s3-us-west-2.amazonaws.com')
+ alt_bucket = alt_conn.get_bucket(bucket_name)
+ alt_key = alt_bucket.get_key('Test301')
diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py
index b11b1450..2de6d724 100644
--- a/tests/unit/auth/test_sigv4.py
+++ b/tests/unit/auth/test_sigv4.py
@@ -73,6 +73,24 @@ class TestSigV4Handler(unittest.TestCase):
# This should be both normalized & urlencoded.
self.assertEqual(canonical_uri, 'x/x%20.html')
+ auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
+ Mock(), self.provider)
+ request = HTTPRequest(
+ 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
+ 'x/./././x/html/', None, {},
+ {'x-amz-glacier-version': '2012-06-01'}, '')
+ canonical_uri = auth.canonical_uri(request)
+ # Trailing slashes should be preserved.
+ self.assertEqual(canonical_uri, 'x/x/html/')
+
+ request = HTTPRequest(
+ 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
+ '/', None, {},
+ {'x-amz-glacier-version': '2012-06-01'}, '')
+ canonical_uri = auth.canonical_uri(request)
+ # There should not be two-slashes.
+ self.assertEqual(canonical_uri, '/')
+
def test_headers_to_sign(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
Mock(), self.provider)
diff --git a/tests/unit/cloudsearch/test_search.py b/tests/unit/cloudsearch/test_search.py
index b6c23dd4..7cadf659 100644
--- a/tests/unit/cloudsearch/test_search.py
+++ b/tests/unit/cloudsearch/test_search.py
@@ -12,7 +12,7 @@ HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com"
FULL_URL = 'http://%s/2011-02-01/search' % HOSTNAME
-class CloudSearchSearchTest(unittest.TestCase):
+class CloudSearchSearchBaseTest(unittest.TestCase):
hits = [
{
@@ -45,21 +45,6 @@ class CloudSearchSearchTest(unittest.TestCase):
},
]
- response = {
- 'rank': '-text_relevance',
- 'match-expr':"Test",
- 'hits': {
- 'found': 30,
- 'start': 0,
- 'hit':hits
- },
- 'info': {
- 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
- 'time-ms': 2,
- 'cpu-time-ms': 0
- }
-
- }
def get_args(self, requestline):
(_, request, _) = requestline.split(" ")
@@ -76,6 +61,23 @@ class CloudSearchSearchTest(unittest.TestCase):
def tearDown(self):
HTTPretty.disable()
+class CloudSearchSearchTest(CloudSearchSearchBaseTest):
+ response = {
+ 'rank': '-text_relevance',
+ 'match-expr':"Test",
+ 'hits': {
+ 'found': 30,
+ 'start': 0,
+ 'hit':CloudSearchSearchBaseTest.hits
+ },
+ 'info': {
+ 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
+ 'time-ms': 2,
+ 'cpu-time-ms': 0
+ }
+
+ }
+
def test_cloudsearch_qsearch(self):
search = SearchConnection(endpoint=HOSTNAME)
@@ -323,3 +325,33 @@ class CloudSearchSearchTest(unittest.TestCase):
self.assertEqual(results.next_page().query.start,
query1.start + query1.size)
self.assertEqual(query1.q, query2.q)
+
+class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest):
+ response = {
+ 'rank': '-text_relevance',
+ 'match-expr':"Test",
+ 'hits': {
+ 'found': 30,
+ 'start': 0,
+ 'hit':CloudSearchSearchBaseTest.hits
+ },
+ 'info': {
+ 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
+ 'time-ms': 2,
+ 'cpu-time-ms': 0
+ },
+ 'facets': {
+ 'tags': {},
+ 'animals': {'constraints': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value':'lions'}]},
+ }
+ }
+
+ def test_cloudsearch_search_facets(self):
+ #self.response['facets'] = {'tags': {}}
+
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ results = search.search(q='Test', facet=['tags'])
+
+ self.assertTrue('tags' not in results.facets)
+ self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'})
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index 705d14f0..fe7e5b95 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -228,6 +228,16 @@ class ItemTestCase(unittest.TestCase):
12345,
])
+ def test_get(self):
+ self.assertEqual(self.johndoe.get('username'), 'johndoe')
+ self.assertEqual(self.johndoe.get('first_name'), 'John')
+ self.assertEqual(self.johndoe.get('date_joined'), 12345)
+
+ # Test a missing key. No default yields ``None``.
+ self.assertEqual(self.johndoe.get('last_name'), None)
+ # This time with a default.
+ self.assertEqual(self.johndoe.get('last_name', True), True)
+
def test_items(self):
self.assertEqual(sorted(self.johndoe.items()), [
('date_joined', 12345),
diff --git a/tests/unit/ec2/autoscale/test_group.py b/tests/unit/ec2/autoscale/test_group.py
index 8c7baa83..28941545 100644
--- a/tests/unit/ec2/autoscale/test_group.py
+++ b/tests/unit/ec2/autoscale/test_group.py
@@ -21,6 +21,8 @@
# IN THE SOFTWARE.
#
+from datetime import datetime
+
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
@@ -60,6 +62,42 @@ class TestAutoScaleGroup(AWSMockServiceTestCase):
'TerminationPolicies.member.2': 'OldestLaunchConfiguration',
}, ignore_params_values=['Version'])
+class TestScheduledGroup(AWSMockServiceTestCase):
+ connection_class = AutoScaleConnection
+
+ def setUp(self):
+ super(TestScheduledGroup, self).setUp()
+
+ def default_body(self):
+ return """
+ <PutScheduledUpdateGroupActionResponse>
+ <ResponseMetadata>
+ <RequestId>requestid</RequestId>
+ </ResponseMetadata>
+ </PutScheduledUpdateGroupActionResponse>
+ """
+
+ def test_scheduled_group_creation(self):
+ self.set_http_response(status_code=200)
+ self.service_connection.create_scheduled_group_action('foo',
+ 'scheduled-foo',
+ desired_capacity=1,
+ start_time=datetime(2013, 1, 1, 22, 55, 31),
+ end_time=datetime(2013, 2, 1, 22, 55, 31),
+ min_size=1,
+ max_size=2,
+ recurrence='0 10 * * *')
+ self.assert_request_parameters({
+ 'Action': 'PutScheduledUpdateGroupAction',
+ 'AutoScalingGroupName': 'foo',
+ 'ScheduledActionName': 'scheduled-foo',
+ 'MaxSize': 2,
+ 'MinSize': 1,
+ 'DesiredCapacity': 1,
+ 'EndTime': '2013-02-01T22:55:31',
+ 'StartTime': '2013-01-01T22:55:31',
+ 'Recurrence': '0 10 * * *',
+ }, ignore_params_values=['Version'])
class TestParseAutoScaleGroupResponse(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
diff --git a/tests/unit/ec2/elb/__init__.py b/tests/unit/ec2/elb/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/ec2/elb/__init__.py
diff --git a/tests/unit/ec2/elb/test_loadbalancer.py b/tests/unit/ec2/elb/test_loadbalancer.py
new file mode 100644
index 00000000..d5e126c2
--- /dev/null
+++ b/tests/unit/ec2/elb/test_loadbalancer.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+import mock
+
+from boto.ec2.elb import ELBConnection
+
+DISABLE_RESPONSE = r"""<?xml version="1.0" encoding="UTF-8"?>
+<DisableAvailabilityZonesForLoadBalancerResult xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
+ <requestId>3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE</requestId>
+ <AvailabilityZones>
+ <member>sample-zone</member>
+ </AvailabilityZones>
+</DisableAvailabilityZonesForLoadBalancerResult>
+"""
+
+
+class TestInstanceStatusResponseParsing(unittest.TestCase):
+ def test_next_token(self):
+ elb = ELBConnection(aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key')
+ mock_response = mock.Mock()
+ mock_response.read.return_value = DISABLE_RESPONSE
+ mock_response.status = 200
+ elb.make_request = mock.Mock(return_value=mock_response)
+ disabled = elb.disable_availability_zones('mine', ['sample-zone'])
+ self.assertEqual(disabled, ['sample-zone'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/ec2/test_instancestatus.py b/tests/unit/ec2/test_instancestatus.py
new file mode 100644
index 00000000..67433b8a
--- /dev/null
+++ b/tests/unit/ec2/test_instancestatus.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+import mock
+
+from boto.ec2.connection import EC2Connection
+
+INSTANCE_STATUS_RESPONSE = r"""<?xml version="1.0" encoding="UTF-8"?>
+<DescribeInstanceStatusResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
+ <requestId>3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE</requestId>
+ <nextToken>page-2</nextToken>
+ <instanceStatusSet />
+</DescribeInstanceStatusResponse>
+"""
+
+
+class TestInstanceStatusResponseParsing(unittest.TestCase):
+ def test_next_token(self):
+ ec2 = EC2Connection(aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key')
+ mock_response = mock.Mock()
+ mock_response.read.return_value = INSTANCE_STATUS_RESPONSE
+ mock_response.status = 200
+ ec2.make_request = mock.Mock(return_value=mock_response)
+ all_statuses = ec2.get_all_instance_status()
+ self.assertEqual(all_statuses.next_token, 'page-2')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/glacier/test_vault.py b/tests/unit/glacier/test_vault.py
index a4ef008b..7861b63a 100644
--- a/tests/unit/glacier/test_vault.py
+++ b/tests/unit/glacier/test_vault.py
@@ -27,6 +27,8 @@ import mock
from mock import ANY
from boto.glacier import vault
+from boto.glacier.job import Job
+from boto.glacier.response import GlacierResponse
class TestVault(unittest.TestCase):
@@ -96,6 +98,55 @@ class TestVault(unittest.TestCase):
self.vault.create_archive_writer.assert_called_with(
description=mock.ANY, part_size=expected_part_size)
+ def test_retrieve_inventory(self):
+ class FakeResponse(object):
+ status = 202
+
+ def getheader(self, key, default=None):
+ if key == 'x-amz-job-id':
+ return 'HkF9p6'
+ elif key == 'Content-Type':
+ return 'application/json'
+
+ return 'something'
+
+ def read(self, amt=None):
+ return """{
+ "Action": "ArchiveRetrieval",
+ "ArchiveId": "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-EXAMPLEArchiveId",
+ "ArchiveSizeInBytes": 16777216,
+ "ArchiveSHA256TreeHash": "beb0fe31a1c7ca8c6c04d574ea906e3f97",
+ "Completed": false,
+ "CreationDate": "2012-05-15T17:21:39.339Z",
+ "CompletionDate": "2012-05-15T17:21:43.561Z",
+ "InventorySizeInBytes": null,
+ "JobDescription": "My ArchiveRetrieval Job",
+ "JobId": "HkF9p6",
+ "RetrievalByteRange": "0-16777215",
+ "SHA256TreeHash": "beb0fe31a1c7ca8c6c04d574ea906e3f97b31fd",
+ "SNSTopic": "arn:aws:sns:us-east-1:012345678901:mytopic",
+ "StatusCode": "InProgress",
+ "StatusMessage": "Operation in progress.",
+ "VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault"
+}"""
+
+ raw_resp = FakeResponse()
+ init_resp = GlacierResponse(raw_resp, [('x-amz-job-id', 'JobId')])
+ raw_resp_2 = FakeResponse()
+ desc_resp = GlacierResponse(raw_resp_2, [])
+
+ with mock.patch.object(self.vault.layer1, 'initiate_job',
+ return_value=init_resp):
+ with mock.patch.object(self.vault.layer1, 'describe_job',
+ return_value=desc_resp):
+ # The old/back-compat variant of the call.
+ self.assertEqual(self.vault.retrieve_inventory(), 'HkF9p6')
+
+ # The variant the returns a full ``Job`` object.
+ job = self.vault.retrieve_inventory_job()
+ self.assertTrue(isinstance(job, Job))
+ self.assertEqual(job.id, 'HkF9p6')
+
class TestConcurrentUploads(unittest.TestCase):
diff --git a/tests/unit/glacier/test_writer.py b/tests/unit/glacier/test_writer.py
index 41ddfb09..43757ebb 100644
--- a/tests/unit/glacier/test_writer.py
+++ b/tests/unit/glacier/test_writer.py
@@ -133,6 +133,51 @@ class TestWriter(unittest.TestCase):
self.writer.close()
self.assertEquals(sentinel.archive_id, self.writer.get_archive_id())
+ def test_current_tree_hash(self):
+ self.writer.write('1234')
+ self.writer.write('567')
+ hash_1 = self.writer.current_tree_hash
+ self.assertEqual(hash_1,
+ '\x0e\xb0\x11Z\x1d\x1f\n\x10|\xf76\xa6\xf5' +
+ '\x83\xd1\xd5"bU\x0c\x95\xa8<\xf5\x81\xef\x0e\x0f\x95\n\xb7k'
+ )
+
+ # This hash will be different, since the content has changed.
+ self.writer.write('22i3uy')
+ hash_2 = self.writer.current_tree_hash
+ self.assertEqual(hash_2,
+ '\x7f\xf4\x97\x82U]\x81R\x05#^\xe8\x1c\xd19' +
+ '\xe8\x1f\x9e\xe0\x1aO\xaad\xe5\x06"\xa5\xc0\xa8AdL'
+ )
+ self.writer.close()
+
+ # Check the final tree hash, post-close.
+ final_hash = self.writer.current_tree_hash
+ self.assertEqual(final_hash,
+ ';\x1a\xb8!=\xf0\x14#\x83\x11\xd5\x0b\x0f' +
+ '\xc7D\xe4\x8e\xd1W\x99z\x14\x06\xb9D\xd0\xf0*\x93\xa2\x8e\xf9'
+ )
+ # Then assert we don't get a different one on a subsequent call.
+ self.assertEqual(final_hash, self.writer.current_tree_hash)
+
+ def test_current_uploaded_size(self):
+ self.writer.write('1234')
+ self.writer.write('567')
+ size_1 = self.writer.current_uploaded_size
+ self.assertEqual(size_1, 4)
+
+ # This hash will be different, since the content has changed.
+ self.writer.write('22i3uy')
+ size_2 = self.writer.current_uploaded_size
+ self.assertEqual(size_2, 12)
+ self.writer.close()
+
+ # Get the final size, post-close.
+ final_size = self.writer.current_uploaded_size
+ self.assertEqual(final_size, 13)
+ # Then assert we don't get a different one on a subsequent call.
+ self.assertEqual(final_size, self.writer.current_uploaded_size)
+
def test_upload_id(self):
self.assertEquals(sentinel.upload_id, self.writer.upload_id)
diff --git a/tests/unit/s3/test_bucket.py b/tests/unit/s3/test_bucket.py
index de7e27cc..ac2d82bf 100644
--- a/tests/unit/s3/test_bucket.py
+++ b/tests/unit/s3/test_bucket.py
@@ -1,9 +1,11 @@
+# -*- coding: utf-8 -*-
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
+
class TestS3Bucket(AWSMockServiceTestCase):
connection_class = S3Connection
@@ -46,3 +48,53 @@ class TestS3Bucket(AWSMockServiceTestCase):
with self.assertRaises(ValueError):
key = bucket.delete_key('')
+
+ def test__get_all_query_args(self):
+ bukket = Bucket()
+
+ # Default.
+ qa = bukket._get_all_query_args({})
+ self.assertEqual(qa, '')
+
+ # Default with initial.
+ qa = bukket._get_all_query_args({}, 'initial=1')
+ self.assertEqual(qa, 'initial=1')
+
+ # Single param.
+ qa = bukket._get_all_query_args({
+ 'foo': 'true'
+ })
+ self.assertEqual(qa, 'foo=true')
+
+ # Single param with initial.
+ qa = bukket._get_all_query_args({
+ 'foo': 'true'
+ }, 'initial=1')
+ self.assertEqual(qa, 'initial=1&foo=true')
+
+ # Multiple params with all the weird cases.
+ multiple_params = {
+ 'foo': 'true',
+ # Ensure Unicode chars get encoded.
+ 'bar': '☃',
+ # Underscores are bad, m'kay?
+ 'some_other': 'thing',
+ # Change the variant of ``max-keys``.
+ 'maxkeys': 0,
+ # ``None`` values get excluded.
+ 'notthere': None,
+ # Empty values also get excluded.
+ 'notpresenteither': '',
+ }
+ qa = bukket._get_all_query_args(multiple_params)
+ self.assertEqual(
+ qa,
+ 'bar=%E2%98%83&max-keys=0&foo=true&some-other=thing'
+ )
+
+ # Multiple params with initial.
+ qa = bukket._get_all_query_args(multiple_params, 'initial=1')
+ self.assertEqual(
+ qa,
+ 'initial=1&bar=%E2%98%83&max-keys=0&foo=true&some-other=thing'
+ )
diff --git a/tests/unit/s3/test_key.py b/tests/unit/s3/test_key.py
index ce8f0845..5e249c17 100644
--- a/tests/unit/s3/test_key.py
+++ b/tests/unit/s3/test_key.py
@@ -20,10 +20,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
+from boto.exception import BotoServerError
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
@@ -71,5 +76,51 @@ class TestS3Key(AWSMockServiceTestCase):
self.assertIsNotNone(key)
+def counter(fn):
+ def _wrapper(*args, **kwargs):
+ _wrapper.count += 1
+ return fn(*args, **kwargs)
+ _wrapper.count = 0
+ return _wrapper
+
+
+class TestS3KeyRetries(AWSMockServiceTestCase):
+ connection_class = S3Connection
+
+ def setUp(self):
+ super(TestS3KeyRetries, self).setUp()
+
+ def test_500_retry(self):
+ self.set_http_response(status_code=500)
+ b = Bucket(self.service_connection, 'mybucket')
+ k = b.new_key('test_failure')
+ fail_file = StringIO('This will attempt to retry.')
+
+ try:
+ k.send_file(fail_file)
+ self.fail("This shouldn't ever succeed.")
+ except BotoServerError:
+ pass
+
+ def test_400_timeout(self):
+ weird_timeout_body = "<Error><Code>RequestTimeout</Code></Error>"
+ self.set_http_response(status_code=400, body=weird_timeout_body)
+ b = Bucket(self.service_connection, 'mybucket')
+ k = b.new_key('test_failure')
+ fail_file = StringIO('This will pretend to be chunk-able.')
+
+ # Decorate.
+ k.should_retry = counter(k.should_retry)
+ self.assertEqual(k.should_retry.count, 0)
+
+ try:
+ k.send_file(fail_file)
+ self.fail("This shouldn't ever succeed.")
+ except BotoServerError:
+ pass
+
+ self.assertTrue(k.should_retry.count, 1)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/sns/test_connection.py b/tests/unit/sns/test_connection.py
index 9eedf048..8cc064e5 100644
--- a/tests/unit/sns/test_connection.py
+++ b/tests/unit/sns/test_connection.py
@@ -58,11 +58,9 @@ class TestSNSConnection(AWSMockServiceTestCase):
'ContentType': 'JSON',
'Endpoint': 'arn:aws:sqs:us-east-1:idnum:queuename',
'Protocol': 'sqs',
- 'SignatureMethod': 'HmacSHA256',
- 'SignatureVersion': 2,
'TopicArn': 'topic_arn',
'Version': '2010-03-31',
- }, ignore_params_values=['AWSAccessKeyId', 'Timestamp'])
+ }, ignore_params_values=[])
# Verify that the queue policy was properly updated.
actual_policy = json.loads(queue.set_attribute.call_args[0][1])
@@ -85,11 +83,9 @@ class TestSNSConnection(AWSMockServiceTestCase):
'ContentType': 'JSON',
'Endpoint': 'arn:aws:sqs:us-east-1:idnum:queuename',
'Protocol': 'sqs',
- 'SignatureMethod': 'HmacSHA256',
- 'SignatureVersion': 2,
'TopicArn': 'topic_arn',
'Version': '2010-03-31',
- }, ignore_params_values=['AWSAccessKeyId', 'Timestamp'])
+ }, ignore_params_values=[])
actual_policy = json.loads(queue.set_attribute.call_args[0][1])
# Only a single statement should be part of the policy.
self.assertEqual(len(actual_policy['Statement']), 1)
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index d2c3e2aa..d71587fc 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -23,7 +23,7 @@ import urlparse
from tests.unit import unittest
from httpretty import HTTPretty
-from boto.connection import AWSQueryConnection
+from boto.connection import AWSQueryConnection, AWSAuthConnection
from boto.exception import BotoServerError
from boto.regioninfo import RegionInfo
from boto.compat import json
@@ -90,6 +90,27 @@ class MockAWSService(AWSQueryConnection):
security_token,
validate_certs=validate_certs)
+class TestAWSAuthConnection(unittest.TestCase):
+ def test_get_path(self):
+ conn = AWSAuthConnection(
+ 'mockservice.cc-zone-1.amazonaws.com',
+ aws_access_key_id='access_key',
+ aws_secret_access_key='secret',
+ suppress_consec_slashes=False
+ )
+ # Test some sample paths for mangling.
+ self.assertEqual(conn.get_path('/'), '/')
+ self.assertEqual(conn.get_path('image.jpg'), '/image.jpg')
+ self.assertEqual(conn.get_path('folder/image.jpg'), '/folder/image.jpg')
+ self.assertEqual(conn.get_path('folder//image.jpg'), '/folder//image.jpg')
+
+ # Ensure leading slashes aren't removed.
+ # See https://github.com/boto/boto/issues/1387
+ self.assertEqual(conn.get_path('/folder//image.jpg'), '/folder//image.jpg')
+ self.assertEqual(conn.get_path('/folder////image.jpg'), '/folder////image.jpg')
+ self.assertEqual(conn.get_path('///folder////image.jpg'), '///folder////image.jpg')
+
+
class TestAWSQueryConnection(unittest.TestCase):
def setUp(self):
self.region = RegionInfo(name='cc-zone-1',