summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordonPhillips <phjordon@amazon.com>2016-04-28 10:27:42 -0700
committerJordonPhillips <phjordon@amazon.com>2016-04-28 10:27:42 -0700
commitb915894a66996a265f47ab10285bb8b5695dfe9f (patch)
treeb616f3aa87c971f9626c264aafbd6c0813262fac
parent3f8f02caf92917eafeac830ca705694405f90307 (diff)
parenta7dd8db385567d9da72d4ee3144249be5d49ff06 (diff)
downloadboto-2.40.0.tar.gz
Merge branch 'release-2.40.0'2.40.0
-rw-r--r--.travis.yml4
-rw-r--r--README.rst2
-rw-r--r--boto/__init__.py2
-rw-r--r--boto/auth.py6
-rwxr-xr-xboto/cloudformation/stack.py10
-rw-r--r--boto/compat.py25
-rw-r--r--boto/connection.py4
-rw-r--r--boto/dynamodb2/items.py4
-rw-r--r--boto/ec2/connection.py16
-rw-r--r--boto/emr/connection.py15
-rw-r--r--boto/endpoints.json211
-rw-r--r--boto/exception.py12
-rw-r--r--boto/glacier/layer1.py2
-rw-r--r--boto/kinesis/layer1.py4
-rw-r--r--boto/provider.py44
-rw-r--r--boto/pyami/config.py52
-rw-r--r--boto/route53/connection.py31
-rw-r--r--boto/s3/bucketlistresultset.py4
-rw-r--r--boto/s3/connection.py5
-rw-r--r--boto/s3/key.py2
-rw-r--r--boto/s3/lifecycle.py107
-rw-r--r--boto/sqs/connection.py10
-rw-r--r--boto/utils.py6
-rw-r--r--docs/source/_templates/page.html1
-rw-r--r--docs/source/index.rst1
-rw-r--r--docs/source/releasenotes/v2.40.0.rst15
-rw-r--r--docs/source/s3_tut.rst45
-rw-r--r--scripts/rebuild-endpoints.py283
-rw-r--r--scripts/rebuild_endpoints.py53
-rw-r--r--tests/integration/s3/test_key.py13
-rw-r--r--tests/integration/sqs/test_connection.py11
-rw-r--r--tests/unit/auth/test_sigv4.py25
-rw-r--r--tests/unit/cloudformation/test_connection.py5
-rwxr-xr-xtests/unit/ec2/test_connection.py106
-rw-r--r--tests/unit/emr/test_connection.py118
-rw-r--r--tests/unit/glacier/test_layer1.py29
-rw-r--r--tests/unit/kinesis/test_kinesis.py6
-rw-r--r--tests/unit/provider/test_provider.py27
-rw-r--r--tests/unit/pyami/__init__.py0
-rw-r--r--tests/unit/pyami/test_config.py63
-rw-r--r--tests/unit/route53/test_connection.py12
-rw-r--r--tests/unit/s3/test_bucketlistresultset.py61
-rw-r--r--tests/unit/s3/test_connection.py79
-rw-r--r--tests/unit/s3/test_lifecycle.py103
-rw-r--r--tests/unit/sqs/test_connection.py8
-rw-r--r--tests/unit/test_connection.py22
46 files changed, 1326 insertions, 338 deletions
diff --git a/.travis.yml b/.travis.yml
index e7eee360..64a5397a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -9,6 +9,10 @@ python:
env:
- BOTO_CONFIG=/tmp/nowhere
before_install:
+ - if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
+ echo "No pull requests can be sent to the master branch" 1>&2;
+ exit 1;
+ fi
- sudo apt-get update
- sudo apt-get --reinstall install -qq language-pack-en language-pack-de
- sudo apt-get install swig
diff --git a/README.rst b/README.rst
index b819374c..89110734 100644
--- a/README.rst
+++ b/README.rst
@@ -1,7 +1,7 @@
####
boto
####
-boto 2.39.0
+boto 2.40.0
Released: 9-Apr-2015
diff --git a/boto/__init__.py b/boto/__init__.py
index 989fb4b7..a39b50df 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -38,7 +38,7 @@ import logging.config
from boto.compat import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.39.0'
+__version__ = '2.40.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
diff --git a/boto/auth.py b/boto/auth.py
index a3ce891c..36df7227 100644
--- a/boto/auth.py
+++ b/boto/auth.py
@@ -39,7 +39,7 @@ import hmac
import os
import posixpath
-from boto.compat import urllib, encodebytes
+from boto.compat import urllib, encodebytes, parse_qs_safe
from boto.auth_handler import AuthHandler
from boto.exception import BotoClientError
@@ -575,7 +575,7 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
# Because some quoting may have already been applied, let's back it out.
unquoted = urllib.parse.unquote(path.path)
# Requote, this time addressing all characters.
- encoded = urllib.parse.quote(unquoted)
+ encoded = urllib.parse.quote(unquoted, safe='/~')
return encoded
def canonical_query_string(self, http_request):
@@ -690,7 +690,7 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
modified_req.params = copy_params
raw_qs = parsed_path.query
- existing_qs = urllib.parse.parse_qs(
+ existing_qs = parse_qs_safe(
raw_qs,
keep_blank_values=True
)
diff --git a/boto/cloudformation/stack.py b/boto/cloudformation/stack.py
index 5dac0dd7..c00fefcf 100755
--- a/boto/cloudformation/stack.py
+++ b/boto/cloudformation/stack.py
@@ -16,10 +16,18 @@ class Stack(object):
self.tags = []
self.stack_id = None
self.stack_status = None
+ self.stack_status_reason = None
self.stack_name = None
- self.stack_name_reason = None
self.timeout_in_minutes = None
+ @property
+ def stack_name_reason(self):
+ return self.stack_status_reason
+
+ @stack_name_reason.setter
+ def stack_name_reason(self, value):
+ self.stack_status_reason = value
+
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.parameters = ResultSet([('member', Parameter)])
diff --git a/boto/compat.py b/boto/compat.py
index 43e2d2b4..d2571803 100644
--- a/boto/compat.py
+++ b/boto/compat.py
@@ -61,12 +61,14 @@ if six.PY3:
# StandardError was removed, so use the base exception type instead
StandardError = Exception
long_type = int
- from configparser import ConfigParser
+ from configparser import ConfigParser, NoOptionError, NoSectionError
unquote_str = unquote_plus
+ parse_qs_safe = parse_qs
else:
StandardError = StandardError
long_type = long
from ConfigParser import SafeConfigParser as ConfigParser
+ from ConfigParser import NoOptionError, NoSectionError
def unquote_str(value, encoding='utf-8'):
# In python2, unquote() gives us a string back that has the urldecoded
@@ -77,3 +79,24 @@ else:
# unquote it.
byte_string = value.encode(encoding)
return unquote_plus(byte_string).decode(encoding)
+
+ # These are the same default arguments for python3's
+ # urllib.parse.parse_qs.
+ def parse_qs_safe(qs, keep_blank_values=False, strict_parsing=False,
+ encoding='utf-8', errors='replace'):
+ """Parse a query handling unicode arguments properly in Python 2."""
+ is_text_type = isinstance(qs, six.text_type)
+ if is_text_type:
+ # URL encoding uses ASCII code points only.
+ qs = qs.encode('ascii')
+ qs_dict = parse_qs(qs, keep_blank_values, strict_parsing)
+ if is_text_type:
+ # Decode the parsed dictionary back to unicode.
+ result = {}
+ for (name, value) in qs_dict.items():
+ decoded_name = name.decode(encoding, errors)
+ decoded_value = [item.decode(encoding, errors)
+ for item in value]
+ result[decoded_name] = decoded_value
+ return result
+ return qs_dict
diff --git a/boto/connection.py b/boto/connection.py
index 32fecd68..2fef4487 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -368,7 +368,7 @@ class HTTPRequest(object):
for key in self.headers:
val = self.headers[key]
if isinstance(val, six.text_type):
- safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~'
+ safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~ '
self.headers[key] = quote(val.encode('utf-8'), safe)
setattr(self, '_headers_quoted', True)
@@ -1048,7 +1048,7 @@ class AWSAuthConnection(object):
if self.host_header and not boto.utils.find_matching_headers('host', headers):
headers['host'] = self.host_header
host = host or self.host
- if self.use_proxy:
+ if self.use_proxy and not self.skip_proxy(host):
if not auth_path:
auth_path = path
path = self.prefix_proxy_to_path(path, host)
diff --git a/boto/dynamodb2/items.py b/boto/dynamodb2/items.py
index b1b535f6..89dd6681 100644
--- a/boto/dynamodb2/items.py
+++ b/boto/dynamodb2/items.py
@@ -318,7 +318,7 @@ class Item(object):
Largely internal.
"""
- # This doesn't save on it's own. Rather, we prepare the datastructure
+ # This doesn't save on its own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
@@ -338,7 +338,7 @@ class Item(object):
Largely internal.
"""
- # This doesn't save on it's own. Rather, we prepare the datastructure
+ # This doesn't save on its own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
fields = set()
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index f1a49285..6fae668b 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -772,8 +772,7 @@ class EC2Connection(AWSQueryConnection):
with which to associate instances
:type user_data: string
- :param user_data: The Base64-encoded MIME user data to be made
- available to the instance(s) in this reservation.
+ :param user_data: The user data passed to the launched instances
:type instance_type: string
:param instance_type: The type of instance to run:
@@ -3825,9 +3824,9 @@ class EC2Connection(AWSQueryConnection):
:rtype: string
:return: The unique ID for the submitted modification request.
"""
- params = {
- 'ClientToken': client_token,
- }
+ params = {}
+ if client_token is not None:
+ params['ClientToken'] = client_token
if reserved_instance_ids is not None:
self.build_list_params(params, reserved_instance_ids,
'ReservedInstancesId')
@@ -4406,7 +4405,8 @@ class EC2Connection(AWSQueryConnection):
return self.get_list('DescribeInstanceTypes', params, [('item', InstanceType)], verb='POST')
def copy_image(self, source_region, source_image_id, name=None,
- description=None, client_token=None, dry_run=False):
+ description=None, client_token=None, dry_run=False,
+ encrypted=None, kms_key_id=None):
"""
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
@@ -4423,6 +4423,10 @@ class EC2Connection(AWSQueryConnection):
params['Description'] = description
if client_token is not None:
params['ClientToken'] = client_token
+ if encrypted is not None:
+ params['Encrypted'] = 'true' if encrypted else 'false'
+ if kms_key_id is not None:
+ params['KmsKeyId'] = kms_key_id
if dry_run:
params['DryRun'] = 'true'
return self.get_object('CopyImage', params, CopyImage,
diff --git a/boto/emr/connection.py b/boto/emr/connection.py
index 87eafbde..7afc4e07 100644
--- a/boto/emr/connection.py
+++ b/boto/emr/connection.py
@@ -48,9 +48,11 @@ class EmrConnection(AWSQueryConnection):
'elasticmapreduce.us-east-1.amazonaws.com')
ResponseError = EmrResponseError
- # Constants for AWS Console debugging
- DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
- DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
+
+
+ # Constants for AWS Console debugging
+ DebuggingJar = 's3://{region_name}.elasticmapreduce/libs/script-runner/script-runner.jar'
+ DebuggingArgs = 's3://{region_name}.elasticmapreduce/libs/state-pusher/0.1/fetch'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
@@ -411,7 +413,7 @@ class EmrConnection(AWSQueryConnection):
action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
enable_debugging=False,
hadoop_version=None,
- steps=[],
+ steps=None,
bootstrap_actions=[],
instance_groups=None,
additional_info=None,
@@ -508,6 +510,7 @@ class EmrConnection(AWSQueryConnection):
:rtype: str
:return: The jobflow id
"""
+ steps = steps or []
params = {}
if action_on_failure:
params['ActionOnFailure'] = action_on_failure
@@ -547,8 +550,8 @@ class EmrConnection(AWSQueryConnection):
debugging_step = JarStep(name='Setup Hadoop Debugging',
action_on_failure='TERMINATE_JOB_FLOW',
main_class=None,
- jar=self.DebuggingJar,
- step_args=self.DebuggingArgs)
+ jar=self.DebuggingJar.format(region_name=self.region.name),
+ step_args=self.DebuggingArgs.format(region_name=self.region.name))
steps.insert(0, debugging_step)
# Step args
diff --git a/boto/endpoints.json b/boto/endpoints.json
index ef23f0c1..3d23611b 100644
--- a/boto/endpoints.json
+++ b/boto/endpoints.json
@@ -5,19 +5,19 @@
"ap-southeast-1": "autoscaling.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "autoscaling.ap-southeast-2.amazonaws.com",
"cn-north-1": "autoscaling.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "autoscaling.eu-central-1.amazonaws.com",
"eu-west-1": "autoscaling.eu-west-1.amazonaws.com",
"sa-east-1": "autoscaling.sa-east-1.amazonaws.com",
"us-east-1": "autoscaling.us-east-1.amazonaws.com",
"us-gov-west-1": "autoscaling.us-gov-west-1.amazonaws.com",
"us-west-1": "autoscaling.us-west-1.amazonaws.com",
- "us-west-2": "autoscaling.us-west-2.amazonaws.com",
- "eu-central-1": "autoscaling.eu-central-1.amazonaws.com"
+ "us-west-2": "autoscaling.us-west-2.amazonaws.com"
},
"awslambda": {
- "us-east-1": "lambda.us-east-1.amazonaws.com",
- "us-west-2": "lambda.us-west-2.amazonaws.com",
+ "ap-northeast-1": "lambda.ap-northeast-1.amazonaws.com",
"eu-west-1": "lambda.eu-west-1.amazonaws.com",
- "ap-northeast-1": "lambda.ap-northeast-1.amazonaws.com"
+ "us-east-1": "lambda.us-east-1.amazonaws.com",
+ "us-west-2": "lambda.us-west-2.amazonaws.com"
},
"cloudformation": {
"ap-northeast-1": "cloudformation.ap-northeast-1.amazonaws.com",
@@ -25,68 +25,71 @@
"ap-southeast-1": "cloudformation.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "cloudformation.ap-southeast-2.amazonaws.com",
"cn-north-1": "cloudformation.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "cloudformation.eu-central-1.amazonaws.com",
"eu-west-1": "cloudformation.eu-west-1.amazonaws.com",
"sa-east-1": "cloudformation.sa-east-1.amazonaws.com",
"us-east-1": "cloudformation.us-east-1.amazonaws.com",
"us-gov-west-1": "cloudformation.us-gov-west-1.amazonaws.com",
"us-west-1": "cloudformation.us-west-1.amazonaws.com",
- "us-west-2": "cloudformation.us-west-2.amazonaws.com",
- "eu-central-1": "cloudformation.eu-central-1.amazonaws.com"
+ "us-west-2": "cloudformation.us-west-2.amazonaws.com"
},
"cloudfront": {
"ap-northeast-1": "cloudfront.amazonaws.com",
"ap-northeast-2": "cloudfront.amazonaws.com",
"ap-southeast-1": "cloudfront.amazonaws.com",
"ap-southeast-2": "cloudfront.amazonaws.com",
+ "eu-central-1": "cloudfront.amazonaws.com",
"eu-west-1": "cloudfront.amazonaws.com",
"sa-east-1": "cloudfront.amazonaws.com",
"us-east-1": "cloudfront.amazonaws.com",
"us-west-1": "cloudfront.amazonaws.com",
- "us-west-2": "cloudfront.amazonaws.com",
- "eu-central-1": "cloudfront.amazonaws.com"
+ "us-west-2": "cloudfront.amazonaws.com"
},
"cloudhsm": {
+ "ap-northeast-1": "cloudhsm.ap-northeast-1.amazonaws.com",
+ "ap-southeast-1": "cloudhsm.ap-southeast-1.amazonaws.com",
+ "ap-southeast-2": "cloudhsm.ap-southeast-2.amazonaws.com",
+ "eu-central-1": "cloudhsm.eu-central-1.amazonaws.com",
+ "eu-west-1": "cloudhsm.eu-west-1.amazonaws.com",
"us-east-1": "cloudhsm.us-east-1.amazonaws.com",
"us-gov-west-1": "cloudhsm.us-gov-west-1.amazonaws.com",
- "us-west-2": "cloudhsm.us-west-2.amazonaws.com",
- "eu-west-1": "cloudhsm.eu-west-1.amazonaws.com",
- "eu-central-1": "cloudhsm.eu-central-1.amazonaws.com",
- "ap-southeast-2": "cloudhsm.ap-southeast-2.amazonaws.com"
+ "us-west-2": "cloudhsm.us-west-2.amazonaws.com"
},
"cloudsearch": {
+ "ap-northeast-1": "cloudsearch.ap-northeast-1.amazonaws.com",
"ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "cloudsearch.ap-southeast-2.amazonaws.com",
- "ap-northeast-1": "cloudsearch.ap-northeast-1.amazonaws.com",
- "sa-east-1": "cloudsearch.sa-east-1.amazonaws.com",
+ "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com",
"eu-west-1": "cloudsearch.eu-west-1.amazonaws.com",
+ "sa-east-1": "cloudsearch.sa-east-1.amazonaws.com",
"us-east-1": "cloudsearch.us-east-1.amazonaws.com",
"us-west-1": "cloudsearch.us-west-1.amazonaws.com",
- "us-west-2": "cloudsearch.us-west-2.amazonaws.com",
- "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com"
+ "us-west-2": "cloudsearch.us-west-2.amazonaws.com"
},
"cloudsearchdomain": {
+ "ap-northeast-1": "cloudsearch.ap-northeast-1.amazonaws.com",
"ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "cloudsearch.ap-southeast-2.amazonaws.com",
- "ap-northeast-1": "cloudsearch.ap-northeast-1.amazonaws.com",
- "sa-east-1": "cloudsearch.sa-east-1.amazonaws.com",
+ "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com",
"eu-west-1": "cloudsearch.eu-west-1.amazonaws.com",
+ "sa-east-1": "cloudsearch.sa-east-1.amazonaws.com",
"us-east-1": "cloudsearch.us-east-1.amazonaws.com",
"us-west-1": "cloudsearch.us-west-1.amazonaws.com",
- "us-west-2": "cloudsearch.us-west-2.amazonaws.com",
- "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com"
+ "us-west-2": "cloudsearch.us-west-2.amazonaws.com"
},
"cloudtrail": {
"ap-northeast-1": "cloudtrail.ap-northeast-1.amazonaws.com",
"ap-northeast-2": "cloudtrail.ap-northeast-2.amazonaws.com",
"ap-southeast-1": "cloudtrail.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "cloudtrail.ap-southeast-2.amazonaws.com",
+ "cn-north-1": "cloudtrail.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "cloudtrail.eu-central-1.amazonaws.com",
"eu-west-1": "cloudtrail.eu-west-1.amazonaws.com",
"sa-east-1": "cloudtrail.sa-east-1.amazonaws.com",
"us-east-1": "cloudtrail.us-east-1.amazonaws.com",
"us-gov-west-1": "cloudtrail.us-gov-west-1.amazonaws.com",
"us-west-1": "cloudtrail.us-west-1.amazonaws.com",
- "us-west-2": "cloudtrail.us-west-2.amazonaws.com",
- "eu-central-1": "cloudtrail.eu-central-1.amazonaws.com"
+ "us-west-2": "cloudtrail.us-west-2.amazonaws.com"
},
"cloudwatch": {
"ap-northeast-1": "monitoring.ap-northeast-1.amazonaws.com",
@@ -94,27 +97,32 @@
"ap-southeast-1": "monitoring.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "monitoring.ap-southeast-2.amazonaws.com",
"cn-north-1": "monitoring.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "monitoring.eu-central-1.amazonaws.com",
"eu-west-1": "monitoring.eu-west-1.amazonaws.com",
"sa-east-1": "monitoring.sa-east-1.amazonaws.com",
"us-east-1": "monitoring.us-east-1.amazonaws.com",
"us-gov-west-1": "monitoring.us-gov-west-1.amazonaws.com",
"us-west-1": "monitoring.us-west-1.amazonaws.com",
- "us-west-2": "monitoring.us-west-2.amazonaws.com",
- "eu-central-1": "monitoring.eu-central-1.amazonaws.com"
+ "us-west-2": "monitoring.us-west-2.amazonaws.com"
},
"codedeploy": {
+ "ap-northeast-1": "codedeploy.ap-northeast-1.amazonaws.com",
+ "ap-northeast-2": "codedeploy.ap-northeast-2.amazonaws.com",
+ "ap-southeast-1": "codedeploy.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "codedeploy.ap-southeast-2.amazonaws.com",
+ "eu-central-1": "codedeploy.eu-central-1.amazonaws.com",
"eu-west-1": "codedeploy.eu-west-1.amazonaws.com",
"us-east-1": "codedeploy.us-east-1.amazonaws.com",
- "us-west-2": "codedeploy.us-west-2.amazonaws.com",
- "eu-west-1": "codedeploy.eu-west-1.amazonaws.com",
- "ap-southeast-2": "codedeploy.ap-southeast-2.amazonaws.com"
+ "us-west-1": "codedeploy.us-west-1.amazonaws.com",
+ "us-west-2": "codedeploy.us-west-2.amazonaws.com"
},
"cognito-identity": {
+ "ap-northeast-1": "cognito-identity.ap-northeast-1.amazonaws.com",
"eu-west-1": "cognito-identity.eu-west-1.amazonaws.com",
"us-east-1": "cognito-identity.us-east-1.amazonaws.com"
},
"cognito-sync": {
+ "ap-northeast-1": "cognito-sync.ap-northeast-1.amazonaws.com",
"eu-west-1": "cognito-sync.eu-west-1.amazonaws.com",
"us-east-1": "cognito-sync.us-east-1.amazonaws.com"
},
@@ -130,23 +138,24 @@
"us-west-2": "config.us-west-2.amazonaws.com"
},
"datapipeline": {
- "us-east-1": "datapipeline.us-east-1.amazonaws.com",
- "us-west-2": "datapipeline.us-west-2.amazonaws.com",
- "eu-west-1": "datapipeline.eu-west-1.amazonaws.com",
+ "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com",
"ap-southeast-2": "datapipeline.ap-southeast-2.amazonaws.com",
- "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com"
+ "eu-west-1": "datapipeline.eu-west-1.amazonaws.com",
+ "us-east-1": "datapipeline.us-east-1.amazonaws.com",
+ "us-west-2": "datapipeline.us-west-2.amazonaws.com"
},
"directconnect": {
"ap-northeast-1": "directconnect.ap-northeast-1.amazonaws.com",
"ap-northeast-2": "directconnect.ap-northeast-2.amazonaws.com",
"ap-southeast-1": "directconnect.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "directconnect.ap-southeast-2.amazonaws.com",
+ "cn-north-1": "directconnect.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "directconnect.eu-central-1.amazonaws.com",
"eu-west-1": "directconnect.eu-west-1.amazonaws.com",
"sa-east-1": "directconnect.sa-east-1.amazonaws.com",
"us-east-1": "directconnect.us-east-1.amazonaws.com",
"us-west-1": "directconnect.us-west-1.amazonaws.com",
- "us-west-2": "directconnect.us-west-2.amazonaws.com",
- "eu-central-1": "directconnect.eu-central-1.amazonaws.com"
+ "us-west-2": "directconnect.us-west-2.amazonaws.com"
},
"dynamodb": {
"ap-northeast-1": "dynamodb.ap-northeast-1.amazonaws.com",
@@ -154,13 +163,13 @@
"ap-southeast-1": "dynamodb.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "dynamodb.ap-southeast-2.amazonaws.com",
"cn-north-1": "dynamodb.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "dynamodb.eu-central-1.amazonaws.com",
"eu-west-1": "dynamodb.eu-west-1.amazonaws.com",
"sa-east-1": "dynamodb.sa-east-1.amazonaws.com",
"us-east-1": "dynamodb.us-east-1.amazonaws.com",
"us-gov-west-1": "dynamodb.us-gov-west-1.amazonaws.com",
"us-west-1": "dynamodb.us-west-1.amazonaws.com",
- "us-west-2": "dynamodb.us-west-2.amazonaws.com",
- "eu-central-1": "dynamodb.eu-central-1.amazonaws.com"
+ "us-west-2": "dynamodb.us-west-2.amazonaws.com"
},
"ec2": {
"ap-northeast-1": "ec2.ap-northeast-1.amazonaws.com",
@@ -168,20 +177,21 @@
"ap-southeast-1": "ec2.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "ec2.ap-southeast-2.amazonaws.com",
"cn-north-1": "ec2.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "ec2.eu-central-1.amazonaws.com",
"eu-west-1": "ec2.eu-west-1.amazonaws.com",
"sa-east-1": "ec2.sa-east-1.amazonaws.com",
"us-east-1": "ec2.us-east-1.amazonaws.com",
"us-gov-west-1": "ec2.us-gov-west-1.amazonaws.com",
"us-west-1": "ec2.us-west-1.amazonaws.com",
- "us-west-2": "ec2.us-west-2.amazonaws.com",
- "eu-central-1": "ec2.eu-central-1.amazonaws.com"
+ "us-west-2": "ec2.us-west-2.amazonaws.com"
},
"ec2containerservice": {
- "us-east-1": "ecs.us-east-1.amazonaws.com",
- "us-west-2": "ecs.us-west-2.amazonaws.com",
- "eu-west-1": "ecs.eu-west-1.amazonaws.com",
"ap-northeast-1": "ecs.ap-northeast-1.amazonaws.com",
- "ap-southeast-2": "ecs.ap-southeast-2.amazonaws.com"
+ "ap-southeast-2": "ecs.ap-southeast-2.amazonaws.com",
+ "eu-west-1": "ecs.eu-west-1.amazonaws.com",
+ "us-east-1": "ecs.us-east-1.amazonaws.com",
+ "us-west-1": "ecs.us-west-1.amazonaws.com",
+ "us-west-2": "ecs.us-west-2.amazonaws.com"
},
"elasticache": {
"ap-northeast-1": "elasticache.ap-northeast-1.amazonaws.com",
@@ -189,25 +199,26 @@
"ap-southeast-1": "elasticache.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "elasticache.ap-southeast-2.amazonaws.com",
"cn-north-1": "elasticache.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "elasticache.eu-central-1.amazonaws.com",
"eu-west-1": "elasticache.eu-west-1.amazonaws.com",
"sa-east-1": "elasticache.sa-east-1.amazonaws.com",
"us-east-1": "elasticache.us-east-1.amazonaws.com",
"us-gov-west-1": "elasticache.us-gov-west-1.amazonaws.com",
"us-west-1": "elasticache.us-west-1.amazonaws.com",
- "us-west-2": "elasticache.us-west-2.amazonaws.com",
- "eu-central-1": "elasticache.eu-central-1.amazonaws.com"
+ "us-west-2": "elasticache.us-west-2.amazonaws.com"
},
"elasticbeanstalk": {
"ap-northeast-1": "elasticbeanstalk.ap-northeast-1.amazonaws.com",
"ap-northeast-2": "elasticbeanstalk.ap-northeast-2.amazonaws.com",
"ap-southeast-1": "elasticbeanstalk.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "elasticbeanstalk.ap-southeast-2.amazonaws.com",
+ "cn-north-1": "elasticbeanstalk.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "elasticbeanstalk.eu-central-1.amazonaws.com",
"eu-west-1": "elasticbeanstalk.eu-west-1.amazonaws.com",
"sa-east-1": "elasticbeanstalk.sa-east-1.amazonaws.com",
"us-east-1": "elasticbeanstalk.us-east-1.amazonaws.com",
"us-west-1": "elasticbeanstalk.us-west-1.amazonaws.com",
- "us-west-2": "elasticbeanstalk.us-west-2.amazonaws.com",
- "eu-central-1": "elasticbeanstalk.eu-central-1.amazonaws.com"
+ "us-west-2": "elasticbeanstalk.us-west-2.amazonaws.com"
},
"elasticloadbalancing": {
"ap-northeast-1": "elasticloadbalancing.ap-northeast-1.amazonaws.com",
@@ -215,13 +226,13 @@
"ap-southeast-1": "elasticloadbalancing.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "elasticloadbalancing.ap-southeast-2.amazonaws.com",
"cn-north-1": "elasticloadbalancing.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "elasticloadbalancing.eu-central-1.amazonaws.com",
"eu-west-1": "elasticloadbalancing.eu-west-1.amazonaws.com",
"sa-east-1": "elasticloadbalancing.sa-east-1.amazonaws.com",
"us-east-1": "elasticloadbalancing.us-east-1.amazonaws.com",
"us-gov-west-1": "elasticloadbalancing.us-gov-west-1.amazonaws.com",
"us-west-1": "elasticloadbalancing.us-west-1.amazonaws.com",
- "us-west-2": "elasticloadbalancing.us-west-2.amazonaws.com",
- "eu-central-1": "elasticloadbalancing.eu-central-1.amazonaws.com"
+ "us-west-2": "elasticloadbalancing.us-west-2.amazonaws.com"
},
"elasticmapreduce": {
"ap-northeast-1": "ap-northeast-1.elasticmapreduce.amazonaws.com",
@@ -229,13 +240,13 @@
"ap-southeast-1": "ap-southeast-1.elasticmapreduce.amazonaws.com",
"ap-southeast-2": "ap-southeast-2.elasticmapreduce.amazonaws.com",
"cn-north-1": "elasticmapreduce.cn-north-1.amazonaws.com.cn",
- "eu-west-1": "elasticmapreduce.eu-west-1.amazonaws.com",
+ "eu-central-1": "elasticmapreduce.eu-central-1.amazonaws.com",
+ "eu-west-1": "eu-west-1.elasticmapreduce.amazonaws.com",
"sa-east-1": "sa-east-1.elasticmapreduce.amazonaws.com",
"us-east-1": "elasticmapreduce.us-east-1.amazonaws.com",
- "us-gov-west-1": "us-gov-west-1.elasticmapreduce.amazonaws.com",
+ "us-gov-west-1": "elasticmapreduce.us-gov-west-1.amazonaws.com",
"us-west-1": "us-west-1.elasticmapreduce.amazonaws.com",
- "us-west-2": "us-west-2.elasticmapreduce.amazonaws.com",
- "eu-central-1": "elasticmapreduce.eu-central-1.amazonaws.com"
+ "us-west-2": "us-west-2.elasticmapreduce.amazonaws.com"
},
"elastictranscoder": {
"ap-northeast-1": "elastictranscoder.ap-northeast-1.amazonaws.com",
@@ -250,12 +261,12 @@
"ap-northeast-2": "glacier.ap-northeast-2.amazonaws.com",
"ap-southeast-2": "glacier.ap-southeast-2.amazonaws.com",
"cn-north-1": "glacier.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "glacier.eu-central-1.amazonaws.com",
"eu-west-1": "glacier.eu-west-1.amazonaws.com",
"us-east-1": "glacier.us-east-1.amazonaws.com",
+ "us-gov-west-1": "glacier.us-gov-west-1.amazonaws.com",
"us-west-1": "glacier.us-west-1.amazonaws.com",
- "us-west-2": "glacier.us-west-2.amazonaws.com",
- "eu-central-1": "glacier.eu-central-1.amazonaws.com",
- "us-gov-west-1": "glacier.us-gov-west-1.amazonaws.com"
+ "us-west-2": "glacier.us-west-2.amazonaws.com"
},
"iam": {
"ap-northeast-1": "iam.amazonaws.com",
@@ -284,70 +295,73 @@
"us-west-2": "importexport.amazonaws.com"
},
"kinesis": {
- "us-east-1": "kinesis.us-east-1.amazonaws.com",
- "us-west-1": "kinesis.us-west-1.amazonaws.com",
- "us-west-2": "kinesis.us-west-2.amazonaws.com",
- "eu-west-1": "kinesis.eu-west-1.amazonaws.com",
- "ap-southeast-1": "kinesis.ap-southeast-1.amazonaws.com",
- "ap-southeast-2": "kinesis.ap-southeast-2.amazonaws.com",
"ap-northeast-1": "kinesis.ap-northeast-1.amazonaws.com",
"ap-northeast-2": "kinesis.ap-northeast-2.amazonaws.com",
- "eu-central-1": "kinesis.eu-central-1.amazonaws.com"
+ "ap-southeast-1": "kinesis.ap-southeast-1.amazonaws.com",
+ "ap-southeast-2": "kinesis.ap-southeast-2.amazonaws.com",
+ "cn-north-1": "kinesis.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "kinesis.eu-central-1.amazonaws.com",
+ "sa-east-1": "kinesis.sa-east-1.amazonaws.com",
+ "eu-west-1": "kinesis.eu-west-1.amazonaws.com",
+ "us-east-1": "kinesis.us-east-1.amazonaws.com",
+ "us-west-1": "kinesis.us-west-1.amazonaws.com",
+ "us-west-2": "kinesis.us-west-2.amazonaws.com"
},
"kms": {
+ "ap-northeast-1": "kms.ap-northeast-1.amazonaws.com",
+ "ap-northeast-2": "kms.ap-northeast-2.amazonaws.com",
+ "ap-southeast-1": "kms.ap-southeast-1.amazonaws.com",
+ "ap-southeast-2": "kms.ap-southeast-2.amazonaws.com",
+ "eu-central-1": "kms.eu-central-1.amazonaws.com",
+ "eu-west-1": "kms.eu-west-1.amazonaws.com",
+ "sa-east-1": "kms.sa-east-1.amazonaws.com",
"us-east-1": "kms.us-east-1.amazonaws.com",
"us-gov-west-1": "kms.us-gov-west-1.amazonaws.com",
"us-west-1": "kms.us-west-1.amazonaws.com",
- "us-west-2": "kms.us-west-2.amazonaws.com",
- "eu-west-1": "kms.eu-west-1.amazonaws.com",
- "eu-central-1": "kms.eu-central-1.amazonaws.com",
- "ap-southeast-2": "kms.ap-southeast-2.amazonaws.com",
- "ap-southeast-1": "kms.ap-southeast-1.amazonaws.com",
- "ap-northeast-1": "kms.ap-northeast-1.amazonaws.com",
- "ap-northeast-2": "kms.ap-northeast-2.amazonaws.com",
- "sa-east-1": "kms.sa-east-1.amazonaws.com"
+ "us-west-2": "kms.us-west-2.amazonaws.com"
},
"logs": {
- "us-east-1": "logs.us-east-1.amazonaws.com",
- "us-west-2": "logs.us-west-2.amazonaws.com",
- "us-west-1": "logs.us-west-1.amazonaws.com",
- "eu-west-1": "logs.eu-west-1.amazonaws.com",
- "eu-central-1": "logs.eu-central-1.amazonaws.com",
+ "ap-northeast-1": "logs.ap-northeast-1.amazonaws.com",
+ "ap-northeast-2": "logs.ap-northeast-2.amazonaws.com",
"ap-southeast-1": "logs.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "logs.ap-southeast-2.amazonaws.com",
- "ap-northeast-1": "logs.ap-northeast-1.amazonaws.com",
- "ap-northeast-2": "logs.ap-northeast-2.amazonaws.com"
- },
- "opsworks": {
- "us-east-1": "opsworks.us-east-1.amazonaws.com"
+ "eu-central-1": "logs.eu-central-1.amazonaws.com",
+ "eu-west-1": "logs.eu-west-1.amazonaws.com",
+ "us-east-1": "logs.us-east-1.amazonaws.com",
+ "us-west-1": "logs.us-west-1.amazonaws.com",
+ "us-west-2": "logs.us-west-2.amazonaws.com"
},
"machinelearning": {
+ "eu-west-1": "machinelearning.eu-west-1.amazonaws.com",
"us-east-1": "machinelearning.us-east-1.amazonaws.com"
},
+ "opsworks": {
+ "us-east-1": "opsworks.us-east-1.amazonaws.com"
+ },
"rds": {
"ap-northeast-1": "rds.ap-northeast-1.amazonaws.com",
"ap-northeast-2": "rds.ap-northeast-2.amazonaws.com",
"ap-southeast-1": "rds.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "rds.ap-southeast-2.amazonaws.com",
"cn-north-1": "rds.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "rds.eu-central-1.amazonaws.com",
"eu-west-1": "rds.eu-west-1.amazonaws.com",
"sa-east-1": "rds.sa-east-1.amazonaws.com",
"us-east-1": "rds.amazonaws.com",
"us-gov-west-1": "rds.us-gov-west-1.amazonaws.com",
"us-west-1": "rds.us-west-1.amazonaws.com",
- "us-west-2": "rds.us-west-2.amazonaws.com",
- "eu-central-1": "rds.eu-central-1.amazonaws.com"
+ "us-west-2": "rds.us-west-2.amazonaws.com"
},
"redshift": {
"ap-northeast-1": "redshift.ap-northeast-1.amazonaws.com",
"ap-northeast-2": "redshift.ap-northeast-2.amazonaws.com",
"ap-southeast-1": "redshift.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "redshift.ap-southeast-2.amazonaws.com",
+ "eu-central-1": "redshift.eu-central-1.amazonaws.com",
"eu-west-1": "redshift.eu-west-1.amazonaws.com",
"us-east-1": "redshift.us-east-1.amazonaws.com",
- "us-west-2": "redshift.us-west-2.amazonaws.com",
- "eu-central-1": "redshift.eu-central-1.amazonaws.com",
- "us-gov-west-1": "redshift.us-gov-west-1.amazonaws.com"
+ "us-gov-west-1": "redshift.us-gov-west-1.amazonaws.com",
+ "us-west-2": "redshift.us-west-2.amazonaws.com"
},
"route53": {
"ap-northeast-1": "route53.amazonaws.com",
@@ -366,17 +380,17 @@
},
"s3": {
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
- "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
+ "ap-northeast-2": "s3.ap-northeast-2.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
"ap-southeast-2": "s3-ap-southeast-2.amazonaws.com",
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "s3.eu-central-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com",
"sa-east-1": "s3-sa-east-1.amazonaws.com",
"us-east-1": "s3.amazonaws.com",
"us-gov-west-1": "s3-us-gov-west-1.amazonaws.com",
"us-west-1": "s3-us-west-1.amazonaws.com",
- "us-west-2": "s3-us-west-2.amazonaws.com",
- "eu-central-1": "s3.eu-central-1.amazonaws.com"
+ "us-west-2": "s3-us-west-2.amazonaws.com"
},
"sdb": {
"ap-northeast-1": "sdb.ap-northeast-1.amazonaws.com",
@@ -399,13 +413,13 @@
"ap-southeast-1": "sns.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "sns.ap-southeast-2.amazonaws.com",
"cn-north-1": "sns.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "sns.eu-central-1.amazonaws.com",
"eu-west-1": "sns.eu-west-1.amazonaws.com",
"sa-east-1": "sns.sa-east-1.amazonaws.com",
"us-east-1": "sns.us-east-1.amazonaws.com",
"us-gov-west-1": "sns.us-gov-west-1.amazonaws.com",
"us-west-1": "sns.us-west-1.amazonaws.com",
- "us-west-2": "sns.us-west-2.amazonaws.com",
- "eu-central-1": "sns.eu-central-1.amazonaws.com"
+ "us-west-2": "sns.us-west-2.amazonaws.com"
},
"sqs": {
"ap-northeast-1": "ap-northeast-1.queue.amazonaws.com",
@@ -413,24 +427,25 @@
"ap-southeast-1": "ap-southeast-1.queue.amazonaws.com",
"ap-southeast-2": "ap-southeast-2.queue.amazonaws.com",
"cn-north-1": "cn-north-1.queue.amazonaws.com.cn",
+ "eu-central-1": "eu-central-1.queue.amazonaws.com",
"eu-west-1": "eu-west-1.queue.amazonaws.com",
"sa-east-1": "sa-east-1.queue.amazonaws.com",
"us-east-1": "queue.amazonaws.com",
"us-gov-west-1": "us-gov-west-1.queue.amazonaws.com",
"us-west-1": "us-west-1.queue.amazonaws.com",
- "us-west-2": "us-west-2.queue.amazonaws.com",
- "eu-central-1": "eu-central-1.queue.amazonaws.com"
+ "us-west-2": "us-west-2.queue.amazonaws.com"
},
"storagegateway": {
"ap-northeast-1": "storagegateway.ap-northeast-1.amazonaws.com",
"ap-southeast-1": "storagegateway.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "storagegateway.ap-southeast-2.amazonaws.com",
+ "cn-north-1": "storagegateway.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "storagegateway.eu-central-1.amazonaws.com",
"eu-west-1": "storagegateway.eu-west-1.amazonaws.com",
"sa-east-1": "storagegateway.sa-east-1.amazonaws.com",
"us-east-1": "storagegateway.us-east-1.amazonaws.com",
"us-west-1": "storagegateway.us-west-1.amazonaws.com",
- "us-west-2": "storagegateway.us-west-2.amazonaws.com",
- "eu-central-1": "storagegateway.eu-central-1.amazonaws.com"
+ "us-west-2": "storagegateway.us-west-2.amazonaws.com"
},
"sts": {
"ap-northeast-1": "sts.amazonaws.com",
@@ -438,13 +453,13 @@
"ap-southeast-1": "sts.amazonaws.com",
"ap-southeast-2": "sts.amazonaws.com",
"cn-north-1": "sts.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "sts.amazonaws.com",
"eu-west-1": "sts.amazonaws.com",
"sa-east-1": "sts.amazonaws.com",
"us-east-1": "sts.amazonaws.com",
"us-gov-west-1": "sts.us-gov-west-1.amazonaws.com",
"us-west-1": "sts.amazonaws.com",
- "us-west-2": "sts.amazonaws.com",
- "eu-central-1": "sts.amazonaws.com"
+ "us-west-2": "sts.amazonaws.com"
},
"support": {
"us-east-1": "support.us-east-1.amazonaws.com"
@@ -455,12 +470,12 @@
"ap-southeast-1": "swf.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "swf.ap-southeast-2.amazonaws.com",
"cn-north-1": "swf.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "swf.eu-central-1.amazonaws.com",
"eu-west-1": "swf.eu-west-1.amazonaws.com",
"sa-east-1": "swf.sa-east-1.amazonaws.com",
"us-east-1": "swf.us-east-1.amazonaws.com",
"us-gov-west-1": "swf.us-gov-west-1.amazonaws.com",
"us-west-1": "swf.us-west-1.amazonaws.com",
- "us-west-2": "swf.us-west-2.amazonaws.com",
- "eu-central-1": "swf.eu-central-1.amazonaws.com"
+ "us-west-2": "swf.us-west-2.amazonaws.com"
}
}
diff --git a/boto/exception.py b/boto/exception.py
index 36c226fa..2f175979 100644
--- a/boto/exception.py
+++ b/boto/exception.py
@@ -571,3 +571,15 @@ class PleaseRetryException(Exception):
self.message,
self.response
)
+
+
+class InvalidInstanceMetadataError(Exception):
+ MSG = (
+ "You can set the 'metadata_service_num_attempts' "
+ "in your boto config file to increase the number "
+ "of times boto will attempt to retrieve "
+ "credentials from the instance metadata service."
+ )
+ def __init__(self, msg):
+ final_msg = msg + '\n' + self.MSG
+ super(InvalidInstanceMetadataError, self).__init__(final_msg)
diff --git a/boto/glacier/layer1.py b/boto/glacier/layer1.py
index 39136cf0..056d3b35 100644
--- a/boto/glacier/layer1.py
+++ b/boto/glacier/layer1.py
@@ -1273,7 +1273,7 @@ class Layer1(AWSAuthConnection):
'x-amz-sha256-tree-hash': tree_hash,
'Content-Range': 'bytes %d-%d/*' % byte_range}
response_headers = [('x-amz-sha256-tree-hash', u'TreeHash')]
- uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
+ uri = 'vaults/%s/multipart-uploads/%s' % (str(vault_name), upload_id)
return self.make_request('PUT', uri, headers=headers,
data=part_data, ok_responses=(204,),
response_headers=response_headers)
diff --git a/boto/kinesis/layer1.py b/boto/kinesis/layer1.py
index f1910ff4..a58048cc 100644
--- a/boto/kinesis/layer1.py
+++ b/boto/kinesis/layer1.py
@@ -410,7 +410,11 @@ class KinesisConnection(AWSQueryConnection):
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
+ :returns: A dictionary containing:
+
+ 1) a `ShardIterator` with the value being the shard-iterator object
"""
+
params = {
'StreamName': stream_name,
'ShardId': shard_id,
diff --git a/boto/provider.py b/boto/provider.py
index 349a7a6c..0eefe42e 100644
--- a/boto/provider.py
+++ b/boto/provider.py
@@ -34,6 +34,7 @@ import boto
from boto import config
from boto.compat import expanduser
from boto.pyami.config import Config
+from boto.exception import InvalidInstanceMetadataError
from boto.gs.acl import ACL
from boto.gs.acl import CannedACLStrings as CannedGSACLStrings
from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings
@@ -390,16 +391,47 @@ class Provider(object):
timeout=timeout, num_retries=attempts,
data='meta-data/iam/security-credentials/')
if metadata:
+ creds = self._get_credentials_from_metadata(metadata)
+ self._access_key = creds[0]
+ self._secret_key = creds[1]
+ self._security_token = creds[2]
+ expires_at = creds[3]
# I'm assuming there's only one role on the instance profile.
- security = list(metadata.values())[0]
- self._access_key = security['AccessKeyId']
- self._secret_key = self._convert_key_to_str(security['SecretAccessKey'])
- self._security_token = security['Token']
- expires_at = security['Expiration']
self._credential_expiry_time = datetime.strptime(
expires_at, "%Y-%m-%dT%H:%M:%SZ")
boto.log.debug("Retrieved credentials will expire in %s at: %s",
- self._credential_expiry_time - datetime.now(), expires_at)
+ self._credential_expiry_time - datetime.now(),
+ expires_at)
+
+ def _get_credentials_from_metadata(self, metadata):
+ # Given metadata, return a tuple of (access, secret, token, expiration)
+ # On errors, an InvalidInstanceMetadataError will be raised.
+ # The "metadata" is a lazy loaded dictionary means that it's possible
+ # to still encounter errors as we traverse through the metadata dict.
+ # We try to be careful and raise helpful error messages when this
+ # happens.
+ creds = list(metadata.values())[0]
+ if not isinstance(creds, dict):
+ # We want to special case a specific error condition which is
+ # where get_instance_metadata() returns an empty string on
+ # error conditions.
+ if creds == '':
+ msg = 'an empty string'
+ else:
+ msg = 'type: %s' % creds
+ raise InvalidInstanceMetadataError("Expected a dict type of "
+ "credentials instead received "
+ "%s" % (msg))
+ try:
+ access_key = creds['AccessKeyId']
+ secret_key = self._convert_key_to_str(creds['SecretAccessKey'])
+ security_token = creds['Token']
+ expires_at = creds['Expiration']
+ except KeyError as e:
+ raise InvalidInstanceMetadataError(
+ "Credentials from instance metadata missing "
+ "required key: %s" % e)
+ return access_key, secret_key, security_token, expires_at
def _convert_key_to_str(self, key):
if isinstance(key, six.text_type):
diff --git a/boto/pyami/config.py b/boto/pyami/config.py
index a2194898..e0b6e2f3 100644
--- a/boto/pyami/config.py
+++ b/boto/pyami/config.py
@@ -26,7 +26,7 @@ import warnings
import boto
-from boto.compat import expanduser, ConfigParser, StringIO
+from boto.compat import expanduser, ConfigParser, NoOptionError, NoSectionError, StringIO
# By default we use two locations for the boto configurations,
@@ -49,13 +49,11 @@ elif 'BOTO_PATH' in os.environ:
BotoConfigLocations.append(expanduser(path))
-class Config(ConfigParser):
+class Config(object):
def __init__(self, path=None, fp=None, do_load=True):
- # We don't use ``super`` here, because ``ConfigParser`` still uses
- # old-style classes.
- ConfigParser.__init__(self, {'working_dir': '/mnt/pyami',
- 'debug': '0'})
+ self._parser = ConfigParser({'working_dir': '/mnt/pyami',
+ 'debug': '0'})
if do_load:
if path:
self.load_from_path(path)
@@ -70,6 +68,21 @@ class Config(ConfigParser):
except IOError:
warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path)
+ def __setstate__(self, state):
+ # There's test that verify that (transitively) a Config
+ # object can be pickled. Now that we're storing a _parser
+ # attribute and relying on __getattr__ to proxy requests,
+ # we need to implement setstate to ensure we don't get
+ # into recursive loops when looking up _parser when
+ # this object is unpickled.
+ self._parser = state['_parser']
+
+ def __getattr__(self, name):
+ return getattr(self._parser, name)
+
+ def has_option(self, *args, **kwargs):
+ return self._parser.has_option(*args, **kwargs)
+
def load_credential_file(self, path):
"""Load a credential file as is setup like the Java utilities"""
c_data = StringIO()
@@ -116,21 +129,21 @@ class Config(ConfigParser):
def get_instance(self, name, default=None):
try:
val = self.get('Instance', name)
- except:
+ except (NoOptionError, NoSectionError):
val = default
return val
def get_user(self, name, default=None):
try:
val = self.get('User', name)
- except:
+ except (NoOptionError, NoSectionError):
val = default
return val
def getint_user(self, name, default=0):
try:
val = self.getint('User', name)
- except:
+ except (NoOptionError, NoSectionError):
val = default
return val
@@ -139,24 +152,21 @@ class Config(ConfigParser):
def get(self, section, name, default=None):
try:
- val = ConfigParser.get(self, section, name)
- except:
- val = default
- return val
+ return self._parser.get(section, name)
+ except (NoOptionError, NoSectionError):
+ return default
def getint(self, section, name, default=0):
try:
- val = ConfigParser.getint(self, section, name)
- except:
- val = int(default)
- return val
+ return self._parser.getint(section, name)
+ except (NoOptionError, NoSectionError):
+ return int(default)
def getfloat(self, section, name, default=0.0):
try:
- val = ConfigParser.getfloat(self, section, name)
- except:
- val = float(default)
- return val
+ return self._parser.getfloat(section, name)
+ except (NoOptionError, NoSectionError):
+ return float(default)
def getbool(self, section, name, default=False):
if self.has_option(section, name):
diff --git a/boto/route53/connection.py b/boto/route53/connection.py
index 23e05ea5..a7e73779 100644
--- a/boto/route53/connection.py
+++ b/boto/route53/connection.py
@@ -211,8 +211,8 @@ class Route53Connection(AWSAuthConnection):
associate to is required.
:type vpc_region: str
- :param vpc_id: When creating a private hosted zone, the region of
- the associated VPC is required.
+ :param vpc_region: When creating a private hosted zone, the region
+ of the associated VPC is required.
"""
if caller_ref is None:
@@ -527,8 +527,8 @@ class Route53Connection(AWSAuthConnection):
associate to is required.
:type vpc_region: str
- :param vpc_id: When creating a private hosted zone, the region of
- the associated VPC is required.
+ :param vpc_region: When creating a private hosted zone, the region
+ of the associated VPC is required.
"""
zone = self.create_hosted_zone(name, private_zone=private_zone,
vpc_id=vpc_id, vpc_region=vpc_region)
@@ -584,20 +584,25 @@ class Route53Connection(AWSAuthConnection):
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
- code = response.getheader('Code')
-
- if code:
+ body = response.read()
+
+ # We need to parse the error first
+ err = exception.DNSServerError(
+ response.status,
+ response.reason,
+ body)
+ if err.error_code:
# This is a case where we need to ignore a 400 error, as
# Route53 returns this. See
# http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
- if 'PriorRequestNotComplete' in code:
- error = 'PriorRequestNotComplete'
- elif 'Throttling' in code:
- error = 'Throttling'
- else:
+ if not err.error_code in (
+ 'PriorRequestNotComplete',
+ 'Throttling',
+ 'ServiceUnavailable',
+ 'RequestExpired'):
return status
msg = "%s, retry attempt %s" % (
- error,
+ err.error_code,
i
)
next_sleep = min(random.random() * (2 ** i),
diff --git a/boto/s3/bucketlistresultset.py b/boto/s3/bucketlistresultset.py
index e9044276..d95ab685 100644
--- a/boto/s3/bucketlistresultset.py
+++ b/boto/s3/bucketlistresultset.py
@@ -80,6 +80,8 @@ def versioned_bucket_lister(bucket, prefix='', delimiter='',
for k in rs:
yield k
key_marker = rs.next_key_marker
+ if key_marker and encoding_type == "url":
+ key_marker = unquote_str(key_marker)
version_id_marker = rs.next_version_id_marker
more_results= rs.is_truncated
@@ -126,6 +128,8 @@ def multipart_upload_lister(bucket, key_marker='',
for k in rs:
yield k
key_marker = rs.next_key_marker
+ if key_marker and encoding_type == "url":
+ key_marker = unquote_str(key_marker)
upload_id_marker = rs.next_upload_id_marker
more_results= rs.is_truncated
diff --git a/boto/s3/connection.py b/boto/s3/connection.py
index dc2aa443..f8b19791 100644
--- a/boto/s3/connection.py
+++ b/boto/s3/connection.py
@@ -367,6 +367,9 @@ class S3Connection(AWSAuthConnection):
if version_id is not None:
params['VersionId'] = version_id
+ if response_headers is not None:
+ params.update(response_headers)
+
http_request = self.build_base_http_request(method, path, auth_path,
headers=headers, host=host,
params=params)
@@ -377,7 +380,7 @@ class S3Connection(AWSAuthConnection):
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None):
- if self._auth_handler.capability[0] == 'hmac-v4-s3':
+ if self._auth_handler.capability[0] == 'hmac-v4-s3' and query_auth:
# Handle the special sigv4 case
return self.generate_url_sigv4(expires_in, method, bucket=bucket,
key=key, headers=headers, force_http=force_http,
diff --git a/boto/s3/key.py b/boto/s3/key.py
index de865258..b202a447 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -1552,7 +1552,7 @@ class Key(object):
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
- self.local_hashes[alg] = digesters[alg].digest()
+ self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
diff --git a/boto/s3/lifecycle.py b/boto/s3/lifecycle.py
index 8ceb8795..bd864596 100644
--- a/boto/s3/lifecycle.py
+++ b/boto/s3/lifecycle.py
@@ -54,14 +54,21 @@ class Rule(object):
else:
# None or object
self.expiration = expiration
- self.transition = transition
+
+ # retain backwards compatibility
+ if isinstance(transition, Transition):
+ self.transition = Transitions()
+ self.transition.append(transition)
+ elif transition:
+ self.transition = transition
+ else:
+ self.transition = Transitions()
def __repr__(self):
return '<Rule: %s>' % self.id
def startElement(self, name, attrs, connection):
if name == 'Transition':
- self.transition = Transition()
return self.transition
elif name == 'Expiration':
self.expiration = Expiration()
@@ -139,25 +146,13 @@ class Transition(object):
in ISO 8601 format.
:ivar storage_class: The storage class to transition to. Valid
- values are GLACIER.
-
+ values are GLACIER, STANDARD_IA.
"""
def __init__(self, days=None, date=None, storage_class=None):
self.days = days
self.date = date
self.storage_class = storage_class
- def startElement(self, name, attrs, connection):
- return None
-
- def endElement(self, name, value, connection):
- if name == 'Days':
- self.days = int(value)
- elif name == 'Date':
- self.date = value
- elif name == 'StorageClass':
- self.storage_class = value
-
def __repr__(self):
if self.days is None:
how_long = "on: %s" % self.date
@@ -175,6 +170,86 @@ class Transition(object):
s += '</Transition>'
return s
+class Transitions(list):
+ """
+ A container for the transitions associated with a Lifecycle's Rule configuration.
+ """
+ def __init__(self):
+ self.transition_properties = 3
+ self.current_transition_property = 1
+ self.temp_days = None
+ self.temp_date = None
+ self.temp_storage_class = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Days':
+ self.temp_days = int(value)
+ elif name == 'Date':
+ self.temp_date = value
+ elif name == 'StorageClass':
+ self.temp_storage_class = value
+
+ # the XML does not contain a <Transitions> tag
+ # but rather N number of <Transition> tags not
+ # structured in any sort of hierarchy.
+ if self.current_transition_property == self.transition_properties:
+ self.append(Transition(self.temp_days, self.temp_date, self.temp_storage_class))
+ self.temp_days = self.temp_date = self.temp_storage_class = None
+ self.current_transition_property = 1
+ else:
+ self.current_transition_property += 1
+
+ def to_xml(self):
+ """
+ Returns a string containing the XML version of the Lifecycle
+ configuration as defined by S3.
+ """
+ s = ''
+ for transition in self:
+ s += transition.to_xml()
+ return s
+
+ def add_transition(self, days=None, date=None, storage_class=None):
+ """
+ Add a transition to this Lifecycle configuration. This only adds
+ the rule to the local copy. To install the new rule(s) on
+ the bucket, you need to pass this Lifecycle config object
+ to the configure_lifecycle method of the Bucket object.
+
+ :ivar days: The number of days until the object should be moved.
+
+ :ivar date: The date when the object should be moved. Should be
+ in ISO 8601 format.
+
+ :ivar storage_class: The storage class to transition to. Valid
+ values are GLACIER, STANDARD_IA.
+ """
+ transition = Transition(days, date, storage_class)
+ self.append(transition)
+
+ def __first_or_default(self, prop):
+ for transition in self:
+ return getattr(transition, prop)
+ return None
+
+ # maintain backwards compatibility so that we can continue utilizing
+ # 'rule.transition.days' syntax
+ @property
+ def days(self):
+ return self.__first_or_default('days')
+
+ @property
+ def date(self):
+ return self.__first_or_default('date')
+
+ @property
+ def storage_class(self):
+ return self.__first_or_default('storage_class')
+
+
class Lifecycle(list):
"""
A container for the rules associated with a Lifecycle configuration.
@@ -228,7 +303,7 @@ class Lifecycle(list):
that are subject to the rule. The value must be a non-zero
positive integer. A Expiration object instance is also perfect.
- :type transition: Transition
+ :type transition: Transitions
:param transition: Indicates when an object transitions to a
different storage class.
"""
diff --git a/boto/sqs/connection.py b/boto/sqs/connection.py
index bd340d15..6a5adf64 100644
--- a/boto/sqs/connection.py
+++ b/boto/sqs/connection.py
@@ -425,19 +425,19 @@ class SQSConnection(AWSQueryConnection):
params[p_name] = name
if 'data_type' in attribute:
- p_name = '%s.%i.DataType' % (base, j + 1)
+ p_name = '%s.%i.Value.DataType' % (base, j + 1)
params[p_name] = attribute['data_type']
if 'string_value' in attribute:
- p_name = '%s.%i.StringValue' % (base, j + 1)
+ p_name = '%s.%i.Value.StringValue' % (base, j + 1)
params[p_name] = attribute['string_value']
if 'binary_value' in attribute:
- p_name = '%s.%i.BinaryValue' % (base, j + 1)
+ p_name = '%s.%i.Value.BinaryValue' % (base, j + 1)
params[p_name] = attribute['binary_value']
if 'string_list_value' in attribute:
- p_name = '%s.%i.StringListValue' % (base, j + 1)
+ p_name = '%s.%i.Value.StringListValue' % (base, j + 1)
params[p_name] = attribute['string_list_value']
if 'binary_list_value' in attribute:
- p_name = '%s.%i.BinaryListValue' % (base, j + 1)
+ p_name = '%s.%i.Value.BinaryListValue' % (base, j + 1)
params[p_name] = attribute['binary_list_value']
return self.get_object('SendMessageBatch', params, BatchResults,
diff --git a/boto/utils.py b/boto/utils.py
index 852aa5ab..39a8cf77 100644
--- a/boto/utils.py
+++ b/boto/utils.py
@@ -220,10 +220,10 @@ def retry_url(url, retry_on_404=True, num_retries=10, timeout=None):
if code == 404 and not retry_on_404:
return ''
except Exception as e:
- pass
- boto.log.exception('Caught exception reading instance data')
+ boto.log.exception('Caught exception reading instance data')
# If not on the last iteration of the loop then sleep.
if i + 1 != num_retries:
+ boto.log.debug('Sleeping before retrying')
time.sleep(min(2 ** i,
boto.config.get('Boto', 'max_retry_delay', 60)))
boto.log.error('Unable to read instance data, giving up')
@@ -393,6 +393,8 @@ def get_instance_metadata(version='latest', url='http://169.254.169.254',
metadata_url = _build_instance_metadata_url(url, version, data)
return _get_instance_metadata(metadata_url, num_retries=num_retries, timeout=timeout)
except urllib.error.URLError:
+ boto.log.exception("Exception caught when trying to retrieve "
+ "instance metadata for: %s", data)
return None
diff --git a/docs/source/_templates/page.html b/docs/source/_templates/page.html
index 8862f15a..72cf9b82 100644
--- a/docs/source/_templates/page.html
+++ b/docs/source/_templates/page.html
@@ -1,5 +1,6 @@
{% extends '!page.html' %}
{% block body %}
+<!--REGION_DISCLAIMER_DO_NOT_REMOVE-->
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>You are viewing the documentation for an older version of boto (boto2).<p>
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 6be34845..2b180ab1 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -152,6 +152,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.40.0
releasenotes/v2.39.0
releasenotes/v2.38.0
releasenotes/v2.37.0
diff --git a/docs/source/releasenotes/v2.40.0.rst b/docs/source/releasenotes/v2.40.0.rst
new file mode 100644
index 00000000..727e3925
--- /dev/null
+++ b/docs/source/releasenotes/v2.40.0.rst
@@ -0,0 +1,15 @@
+boto v2.40.0
+===========
+
+:date: 2016/04/28
+
+Fixes several bugs.
+
+Changes
+-------
+* ryansydnor-s3: Allow s3 bucket lifecycle policies with multiple transitions (:sha:`c6d5af3`)
+* Fixes upload parts for glacier (:issue:`3524`, :sha:`d1973a4`)
+* pslawski-unicode-parse-qs: Move utility functions over to compat Add S3 integ test for non-ascii keys with sigv4 Fix quoting of tilde in S3 canonical_uri for sigv4 Parse unicode query string properly in Python 2 (:issue:`2844`, :sha:`5092c6d`)
+* ninchat-config-fix: Add __setstate__ to fix pickling test fail Add unit tests for config parsing Don't access parser through __dict__ Config: Catch specific exceptions when wrapping ConfigParser methods Config: Don't inherit from ConfigParser (:issue:`3474`, :sha:`c21aa54`)
+
+
diff --git a/docs/source/s3_tut.rst b/docs/source/s3_tut.rst
index 9d253d1c..13ca0856 100644
--- a/docs/source/s3_tut.rst
+++ b/docs/source/s3_tut.rst
@@ -442,33 +442,38 @@ And, finally, to delete all CORS configurations from a bucket::
>>> bucket.delete_cors()
-Transitioning Objects to Glacier
+Transitioning Objects
--------------------------------
-You can configure objects in S3 to transition to Glacier after a period of
-time. This is done using lifecycle policies. A lifecycle policy can also
-specify that an object should be deleted after a period of time. Lifecycle
-configurations are assigned to buckets and require these parameters:
+S3 buckets support transitioning objects to various storage classes. This is
+done using lifecycle policies. You can currently transitions objects to
+Infrequent Access, Glacier, or just plain Expire. All of these options are
+capable of being applied after a number of days or after a given date.
+Lifecycle configurations are assigned to buckets and require these parameters:
-* The object prefix that identifies the objects you are targeting.
+* The object prefix that identifies the objects you are targeting. (or none)
* The action you want S3 to perform on the identified objects.
-* The date (or time period) when you want S3 to perform these actions.
+* The date or number of days when you want S3 to perform these actions.
-For example, given a bucket ``s3-glacier-boto-demo``, we can first retrieve the
+For example, given a bucket ``s3-lifecycle-boto-demo``, we can first retrieve the
bucket::
>>> import boto
>>> c = boto.connect_s3()
- >>> bucket = c.get_bucket('s3-glacier-boto-demo')
+ >>> bucket = c.get_bucket('s3-lifecycle-boto-demo')
Then we can create a lifecycle object. In our example, we want all objects
-under ``logs/*`` to transition to Glacier 30 days after the object is created.
+under ``logs/*`` to transition to Standard IA 30 days after the object is created,
+glacier 90 days after creation, and be deleted 120 days after creation.
::
- >>> from boto.s3.lifecycle import Lifecycle, Transition, Rule
- >>> to_glacier = Transition(days=30, storage_class='GLACIER')
- >>> rule = Rule('ruleid', 'logs/', 'Enabled', transition=to_glacier)
+ >>> from boto.s3.lifecycle import Lifecycle, Transitions, Rule
+ >>> transitions = Transitions()
+ >>> transitions.add_transition(days=30, storage_class='STANDARD_IA')
+ >>> transitions.add_transition(days=90, storage_class='GLACIER')
+ >>> expiration = Expiration(days=120)
+ >>> rule = Rule(id='ruleid', prefix='logs/', status='Enabled', expiration=expiration, transition=transitions)
>>> lifecycle = Lifecycle()
>>> lifecycle.append(rule)
@@ -485,19 +490,27 @@ You can also retrieve the current lifecycle policy for the bucket::
>>> current = bucket.get_lifecycle_config()
>>> print current[0].transition
- <Transition: in: 30 days, GLACIER>
+ >>> print current[0].expiration
+ [<Transition: in: 90 days, GLACIER>, <Transition: in: 30 days, STANDARD_IA>]
+ <Expiration: in: 120 days>
+
+Note: We have deprecated directly accessing transition properties from the lifecycle
+object. You must index into the transition array first.
-When an object transitions to Glacier, the storage class will be
+When an object transitions, the storage class will be
updated. This can be seen when you **list** the objects in a bucket::
>>> for key in bucket.list():
... print key, key.storage_class
...
- <Key: s3-glacier-boto-demo,logs/testlog1.log> GLACIER
+ <Key: s3-lifecycle-boto-demo,logs/testlog1.log> STANDARD_IA
+ <Key: s3-lifecycle-boto-demo,logs/testlog2.log> GLACIER
You can also use the prefix argument to the ``bucket.list`` method::
>>> print list(b.list(prefix='logs/testlog1.log'))[0].storage_class
+ >>> print list(b.list(prefix='logs/testlog2.log'))[0].storage_class
+ u'STANDARD_IA'
u'GLACIER'
diff --git a/scripts/rebuild-endpoints.py b/scripts/rebuild-endpoints.py
new file mode 100644
index 00000000..48f903a9
--- /dev/null
+++ b/scripts/rebuild-endpoints.py
@@ -0,0 +1,283 @@
+"""Rebuild endpoint config.
+
+Final format looks like this::
+{
+ "autoscaling": {
+ "ap-northeast-1": "autoscaling.ap-northeast-1.amazonaws.com",
+ "ap-northeast-2": "autoscaling.ap-northeast-2.amazonaws.com",
+ "ap-southeast-1": "autoscaling.ap-southeast-1.amazonaws.com",
+ ...
+ },
+ "service-name": {
+ "region": "hostname"
+ }
+}
+
+This will use the EndpointResolver from botocore to regenerate
+endpoints. To regen the latest static endpoints, ensure you have
+the latest version of botocore installed before running this script.
+
+Usage
+=====
+
+To print the newly gen'd endpoints to stdout::
+
+ python rebuild-endpoints.py
+
+To overwrite the existing endpoints.json file in boto:
+
+ python rebuild-endpoints.py --overwrite
+
+If you have a custom upstream endpoints.json file you'd like
+to use, you can provide the ``--endpoints-file``:
+
+ python rebuild-endpoints.py --endpoints-json custom-endpoints.json
+
+"""
+import sys
+import os
+import json
+import argparse
+
+
+try:
+ import botocore.session
+ from botocore.regions import EndpointResolver
+except ImportError:
+ print("Couldn't import botocore, make sure it's installed in order "
+ "to regen endpoint data.")
+ sys.exit(1)
+
+
+EXISTING_ENDPOINTS_FILE = os.path.join(
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
+ 'boto', 'endpoints.json')
+
+
+def _load_endpoint_services(filename):
+ with open(filename) as f:
+ return list(json.load(f))
+
+
+class StrictEndpointResolver(object):
+ """Endpoint Resolver that verifies services in a partition."""
+
+ # It's worth seeing if any of the stuff in this class makes sense
+ # to move back into botocore. This might be too specific to boto2's
+ # usage. The intent was to try to make the StaticEndpointBuilder
+ # as easy to follow as possible, so this class wraps an existing
+ # botocore endpoint and provides some extension methods. The main
+ # extension points are:
+ #
+ # * Introspection about known services in a partition.
+ # * Chaining partition iteration (for boto2 we just need to create
+ # a list of region->endpoints across all known partitions so this
+ # class provides iterators that allow you to iterate over all known
+ # regions for all known partitions).
+ # * Helper method for static hostname lookup by abstracting the
+ # sslCommonName checks into a "get_hostname" method.
+ # * Allowing you to use "service names" specific to boto2 when
+ # generating endpoints. Internally this has a mapping of which endpoint
+ # prefixes to use.
+
+ SERVICE_RENAMES = {
+ # The botocore resolver is based on endpoint prefix.
+ # These don't always sync up to the name that boto2 uses.
+ # A mapping can be provided that handles the mapping between
+ # "service names" and endpoint prefixes.
+ 'awslambda': 'lambda',
+ 'cloudwatch': 'monitoring',
+ 'ses': 'email',
+ 'ec2containerservice': 'ecs',
+ 'configservice': 'config',
+ }
+
+ def __init__(self, resolver, endpoint_data,
+ service_name_map=None):
+ #: An instance of botocore.regions.EndpointResolver.
+ self._resolver = resolver
+ self._endpoint_data = endpoint_data
+ if service_name_map is None:
+ service_name_map = self.SERVICE_RENAMES
+ self._service_map = service_name_map
+
+ def regions_for_service(self, service_name):
+ # "What are all the regions EC2 is in across all known partitions?"
+ endpoint_prefix = self._endpoint_prefix(service_name)
+ for partition_name in self.get_available_partitions():
+ if self.is_service_in_partition(service_name, partition_name):
+ for region_name in self._resolver.get_available_endpoints(
+ endpoint_prefix, partition_name):
+ yield region_name
+
+ def regions_for_partition(self, partition_name):
+ # "What are all the known regions in a given partition?"
+ # This is used in boto to create entries for "cloudfront"
+ # for every region:
+ # us-east-1: cloudfront.amazonaws.com
+ # us-west-2: cloudfront.amazonaws.com
+ # ...
+ partition_data = self._get_partition_data(partition_name)
+ return [r for r in list(partition_data['regions'])
+ if 'global' not in r]
+
+ def partitions_for_service(self, service_name):
+ # "In which partitions is 'cloudfront' available?"
+ # This is used because we should *not* generate entries
+ # for cn-north-1 for cloudfront, it's not available in China.
+ # This can be accomplished by using this method and
+ # regions_for_partition. See the _special_case_global_service
+ # method in StaticEndpointBuilder.
+ for partition_name in self.get_available_partitions():
+ if self.is_service_in_partition(service_name, partition_name):
+ yield partition_name
+
+ def get_available_partitions(self):
+ return self._resolver.get_available_partitions()
+
+ def get_hostname(self, service_name, region_name):
+ # Static hostname given a service_name/region_name
+ # We'll map the service_name to the endpoint_prefix
+ # and validate that the service is in the partition.
+ partition = self._partition_for_region(region_name)
+ if not self.is_service_in_partition(service_name, partition):
+ raise ValueError("Unknown service '%s' in partition '%s'" % (
+ service_name, partition))
+ endpoint_prefix = self._endpoint_prefix(service_name)
+ endpoint_config = self._resolver.construct_endpoint(
+ endpoint_prefix, region_name)
+ hostname = endpoint_config.get('sslCommonName',
+ endpoint_config.get('hostname'))
+ return hostname
+
+ def is_service_in_partition(self, service_name, partition_name):
+ # Is iam in aws-cn? Yes
+ # Is cloudfront in aws-cn? No
+ endpoint_prefix = self._endpoint_prefix(service_name)
+ partition_data = self._get_partition_data(partition_name)
+ return endpoint_prefix in partition_data['services']
+
+ def _partition_for_region(self, region_name):
+ # us-east-1 -> aws
+ # us-west-2 -> aws
+ # cn-north-1 -> aws-cn
+ for partition in self._endpoint_data['partitions']:
+ if region_name in partition['regions']:
+ return partition['partition']
+ raise ValueError("Unknown region name: %s" % region_name)
+
+ def _get_partition_data(self, partition_name):
+ for partition in self._endpoint_data['partitions']:
+ if partition['partition'] == partition_name:
+ return partition
+ raise ValueError("Could not find partition data for: %s"
+ % partition_name)
+
+ def _endpoint_prefix(self, service_name):
+ endpoint_prefix = self._service_map.get(
+ service_name, service_name)
+ return endpoint_prefix
+
+ def is_global_service(self, service_name):
+ # This is making the assumption that if a service is
+ # a partitionEndpoint for one partition, it will be that
+ # way for *all* partitions. Technically possible to be
+ # different, but in practice it's not.
+ # We need this because this is how we know to trigger
+ # special case behavior with services like iam, cloudfront.
+ return (
+ 'partitionEndpoint' in
+ self._endpoint_data['partitions'][0]['services'].get(
+ service_name, {}))
+
+
+class StaticEndpointBuilder(object):
+
+ def __init__(self, resolver):
+ self._resolver = resolver
+
+ def build_static_endpoints(self, service_names):
+ """Build a set of static endpoints.
+
+ :param service_names: The name of services to build.
+ These are the service names they are supported by
+ boto2. They also must use the names that boto2
+ uses, not boto3, e.g "ec2containerservice" and not "ecs".
+
+ :return: A dict consisting of::
+ {"service": {"region": "full.host.name"}}
+
+ """
+ static_endpoints = {}
+ for name in service_names:
+ endpoints_for_service = self._build_endpoints_for_service(name)
+ if endpoints_for_service:
+ # It's possible that when we try to build endpoints for services
+ # we get an empty hash. In that case we don't bother adding
+ # it to the final list of static endpoints.
+ static_endpoints[name] = endpoints_for_service
+ self._deal_with_special_cases(static_endpoints)
+ return static_endpoints
+
+ def _build_endpoints_for_service(self, service_name):
+ # Given a service name, 'ec2', build a dict of
+ # 'region' -> 'hostname'
+ if self._resolver.is_global_service(service_name):
+ return self._special_case_global_service(service_name)
+ endpoints = {}
+ for region_name in self._resolver.regions_for_service(service_name):
+ endpoints[region_name] = self._resolver.get_hostname(service_name,
+ region_name)
+ return endpoints
+
+ def _special_case_global_service(self, service_name):
+ # In boto2, an entry for each known region is added with the same
+ # partition wide endpoint for every partition the service is available
+ # in. This method implements this special cased behavior.
+ endpoints = {}
+ for partition in self._resolver.partitions_for_service(service_name):
+ region_names = self._resolver.regions_for_partition(
+ partition)
+ for region_name in region_names:
+ endpoints[region_name] = self._resolver.get_hostname(
+ service_name, region_name)
+ return endpoints
+
+ def _deal_with_special_cases(self, static_endpoints):
+ # I'm not sure why we do this, but cloudsearchdomain endpoints
+ # use the exact same set of endpoints as cloudsearch.
+ if 'cloudsearch' in static_endpoints:
+ static_endpoints['cloudsearchdomain'] = static_endpoints['cloudsearch']
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--overwrite', action='store_true')
+ parser.add_argument('--endpoints-file',
+ help=('Path to endpoints.json. If this argument '
+ 'is not given, then the endpoints.json file '
+ 'bundled with botocore will be used.'))
+ args = parser.parse_args()
+ known_services_in_existing_endpoints = _load_endpoint_services(
+ EXISTING_ENDPOINTS_FILE)
+ session = botocore.session.get_session()
+ if args.endpoints_file:
+ with open(args.endpoints_file) as f:
+ endpoint_data = json.load(f)
+ else:
+ endpoint_data = session.get_data('endpoints')
+ resolver = EndpointResolver(endpoint_data)
+ strict_resolver = StrictEndpointResolver(resolver, endpoint_data)
+ builder = StaticEndpointBuilder(strict_resolver)
+ static_endpoints = builder.build_static_endpoints(
+ known_services_in_existing_endpoints)
+ json_data = json.dumps(static_endpoints, indent=4, sort_keys=True)
+ if args.overwrite:
+ with open(EXISTING_ENDPOINTS_FILE, 'w') as f:
+ f.write(json_data)
+ else:
+ print(json_data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/rebuild_endpoints.py b/scripts/rebuild_endpoints.py
deleted file mode 100644
index 37ac37d8..00000000
--- a/scripts/rebuild_endpoints.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import json
-from pyquery import PyQuery as pq
-import requests
-
-
-class FetchError(Exception):
- pass
-
-
-def fetch_endpoints():
- # We utilize what the Java SDK publishes as a baseline.
- resp = requests.get('https://raw2.github.com/aws/aws-sdk-java/master/src/main/resources/etc/regions.xml')
-
- if int(resp.status_code) != 200:
- raise FetchError("Failed to fetch the endpoints. Got {0}: {1}".format(
- resp.status,
- resp.body
- ))
-
- return resp.text
-
-
-def parse_xml(raw_xml):
- return pq(raw_xml, parser='xml')
-
-
-def build_data(doc):
- data = {}
-
- # Run through all the regions. These have all the data we need.
- for region_elem in doc('Regions').find('Region'):
- region = pq(region_elem, parser='xml')
- region_name = region.find('Name').text()
-
- for endp in region.find('Endpoint'):
- service_name = endp.find('ServiceName').text
- endpoint = endp.find('Hostname').text
-
- data.setdefault(service_name, {})
- data[service_name][region_name] = endpoint
-
- return data
-
-
-def main():
- raw_xml = fetch_endpoints()
- doc = parse_xml(raw_xml)
- data = build_data(doc)
- print(json.dumps(data, indent=4, sort_keys=True))
-
-
-if __name__ == '__main__':
- main()
diff --git a/tests/integration/s3/test_key.py b/tests/integration/s3/test_key.py
index 40bc8c32..16f0220b 100644
--- a/tests/integration/s3/test_key.py
+++ b/tests/integration/s3/test_key.py
@@ -491,6 +491,19 @@ class S3KeySigV4Test(unittest.TestCase):
self.assertEqual(from_s3_key.get_contents_as_string().decode('utf-8'),
body)
+ def test_head_put_get_with_non_ascii_key(self):
+ k = Key(self.bucket)
+ k.key = u'''pt-Olá_ch-你好_ko-안녕_ru-Здравствуйте%20,.<>~`!@#$%^&()_-+='"'''
+ body = 'This is a test of S3'
+
+ k.set_contents_from_string(body)
+ from_s3_key = self.bucket.get_key(k.key, validate=True)
+ self.assertEqual(from_s3_key.get_contents_as_string().decode('utf-8'),
+ body)
+
+ keys = self.bucket.get_all_keys(prefix=k.key, max_keys=1)
+ self.assertEqual(1, len(keys))
+
class S3KeyVersionCopyTest(unittest.TestCase):
def setUp(self):
diff --git a/tests/integration/sqs/test_connection.py b/tests/integration/sqs/test_connection.py
index 5ab80924..7beaa7dc 100644
--- a/tests/integration/sqs/test_connection.py
+++ b/tests/integration/sqs/test_connection.py
@@ -129,6 +129,17 @@ class SQSConnectionTest(unittest.TestCase):
br = queue_1.delete_message_batch(msgs)
deleted += len(br.results)
+ # try a batch write with message attributes
+ num_msgs = 10
+ attrs = {
+ 'foo': {
+ 'data_type': 'String',
+ 'string_value': 'Hello, World!'
+ },
+ }
+ msgs = [(i, 'This is message %d' % i, 0, attrs) for i in range(num_msgs)]
+ queue_1.write_batch(msgs)
+
# create another queue so we can test force deletion
# we will also test MHMessage with this queue
queue_name = 'test%d' % int(time.time())
diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py
index cf8d44ca..74f0ec5b 100644
--- a/tests/unit/auth/test_sigv4.py
+++ b/tests/unit/auth/test_sigv4.py
@@ -342,13 +342,13 @@ class TestS3HmacAuthV4Handler(unittest.TestCase):
def test_canonical_uri(self):
request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
- 'x/./././x .html', None, {},
+ 'x/./././~x .html', None, {},
{}, ''
)
canonical_uri = self.auth.canonical_uri(request)
# S3 doesn't canonicalize the way other SigV4 services do.
# This just urlencoded, no normalization of the path.
- self.assertEqual(canonical_uri, 'x/./././x%20.html')
+ self.assertEqual(canonical_uri, 'x/./././~x%20.html')
def test_determine_service_name(self):
# What we wish we got.
@@ -447,6 +447,27 @@ class TestS3HmacAuthV4Handler(unittest.TestCase):
'delete': ''
})
+ def test_unicode_query_string(self):
+ request = HTTPRequest(
+ method='HEAD',
+ protocol='https',
+ host='awesome-bucket.s3-us-west-2.amazonaws.com',
+ port=443,
+ path=u'/?max-keys=1&prefix=El%20Ni%C3%B1o',
+ auth_path=u'/awesome-bucket/?max-keys=1&prefix=El%20Ni%C3%B1o',
+ params={},
+ headers={},
+ body=''
+ )
+
+ mod_req = self.auth.mangle_path_and_params(request)
+ self.assertEqual(mod_req.path, u'/?max-keys=1&prefix=El%20Ni%C3%B1o')
+ self.assertEqual(mod_req.auth_path, u'/awesome-bucket/')
+ self.assertEqual(mod_req.params, {
+ u'max-keys': u'1',
+ u'prefix': u'El Ni\xf1o',
+ })
+
def test_canonical_request(self):
expected = """GET
/
diff --git a/tests/unit/cloudformation/test_connection.py b/tests/unit/cloudformation/test_connection.py
index 613e3d2a..0889c7e3 100644
--- a/tests/unit/cloudformation/test_connection.py
+++ b/tests/unit/cloudformation/test_connection.py
@@ -391,8 +391,8 @@ class TestCloudFormationDescribeStacks(CloudFormationConnectionBase):
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackStatus>CREATE_COMPLETE</StackStatus>
+ <StackStatusReason>REASON</StackStatusReason>
<StackName>MyStack</StackName>
- <StackStatusReason/>
<Description>My Description</Description>
<CreationTime>2012-05-16T22:55:31Z</CreationTime>
<Capabilities>
@@ -444,7 +444,8 @@ class TestCloudFormationDescribeStacks(CloudFormationConnectionBase):
self.assertEqual(stack.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(stack.stack_status, 'CREATE_COMPLETE')
self.assertEqual(stack.stack_name, 'MyStack')
- self.assertEqual(stack.stack_name_reason, None)
+ self.assertEqual(stack.stack_name_reason, 'REASON')
+ self.assertEqual(stack.stack_status_reason, 'REASON')
self.assertEqual(stack.timeout_in_minutes, None)
self.assertEqual(len(stack.outputs), 1)
diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py
index 46148f9f..7a39de1a 100755
--- a/tests/unit/ec2/test_connection.py
+++ b/tests/unit/ec2/test_connection.py
@@ -531,37 +531,85 @@ class TestCopyImage(TestEC2ConnectionBase):
</CopyImageResponse>
"""
- def test_copy_image(self):
+ def test_copy_image_required_params(self):
self.set_http_response(status_code=200)
- copied_ami = self.ec2.copy_image('us-west-2', 'ami-id',
- 'name', 'description', 'client-token')
+ copied_ami = self.ec2.copy_image('us-west-2', 'ami-id')
self.assertEqual(copied_ami.image_id, 'ami-copied-id')
+ self.assert_request_parameters({
+ 'Action': 'CopyImage',
+ 'SourceRegion': 'us-west-2',
+ 'SourceImageId': 'ami-id'
+ }, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+ def test_copy_image_name_and_description(self):
+ self.set_http_response(status_code=200)
+ copied_ami = self.ec2.copy_image('us-west-2', 'ami-id', 'name', 'description')
+ self.assertEqual(copied_ami.image_id, 'ami-copied-id')
self.assert_request_parameters({
'Action': 'CopyImage',
- 'Description': 'description',
+ 'SourceRegion': 'us-west-2',
+ 'SourceImageId': 'ami-id',
'Name': 'name',
+ 'Description': 'description'
+ }, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+ def test_copy_image_client_token(self):
+ self.set_http_response(status_code=200)
+ copied_ami = self.ec2.copy_image('us-west-2', 'ami-id', client_token='client-token')
+ self.assertEqual(copied_ami.image_id, 'ami-copied-id')
+ self.assert_request_parameters({
+ 'Action': 'CopyImage',
'SourceRegion': 'us-west-2',
'SourceImageId': 'ami-id',
- 'ClientToken': 'client-token'},
- ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'ClientToken': 'client-token'
+ }, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
- def test_copy_image_without_name(self):
+ def test_copy_image_encrypted(self):
self.set_http_response(status_code=200)
- copied_ami = self.ec2.copy_image('us-west-2', 'ami-id',
- description='description',
- client_token='client-token')
+ copied_ami = self.ec2.copy_image('us-west-2', 'ami-id', encrypted=True)
self.assertEqual(copied_ami.image_id, 'ami-copied-id')
self.assert_request_parameters({
'Action': 'CopyImage',
- 'Description': 'description',
'SourceRegion': 'us-west-2',
'SourceImageId': 'ami-id',
- 'ClientToken': 'client-token'},
- ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'Encrypted': 'true'
+ }, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+ def test_copy_image_not_encrypted(self):
+ self.set_http_response(status_code=200)
+ copied_ami = self.ec2.copy_image('us-west-2', 'ami-id', encrypted=False)
+ self.assertEqual(copied_ami.image_id, 'ami-copied-id')
+
+ self.assert_request_parameters({
+ 'Action': 'CopyImage',
+ 'SourceRegion': 'us-west-2',
+ 'SourceImageId': 'ami-id',
+ 'Encrypted': 'false'
+ }, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+ def test_copy_image_encrypted_with_kms_key(self):
+ self.set_http_response(status_code=200)
+ copied_ami = self.ec2.copy_image('us-west-2', 'ami-id', encrypted=False, kms_key_id='kms-key')
+ self.assertEqual(copied_ami.image_id, 'ami-copied-id')
+
+ self.assert_request_parameters({
+ 'Action': 'CopyImage',
+ 'SourceRegion': 'us-west-2',
+ 'SourceImageId': 'ami-id',
+ 'Encrypted': 'false',
+ 'KmsKeyId': 'kms-key'
+ }, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
@@ -1113,6 +1161,38 @@ class TestModifyReservedInstances(TestEC2ConnectionBase):
self.assertEqual(response, 'rimod-3aae219d-3d63-47a9-a7e9-e764example')
+ def test_none_token(self):
+ """Ensures that if the token is set to None, nothing is serialized."""
+ self.set_http_response(status_code=200)
+ response = self.ec2.modify_reserved_instances(
+ None,
+ reserved_instance_ids=[
+ '2567o137-8a55-48d6-82fb-7258506bb497',
+ ],
+ target_configurations=[
+ ReservedInstancesConfiguration(
+ availability_zone='us-west-2c',
+ platform='EC2-VPC',
+ instance_count=3,
+ instance_type='c3.large'
+ ),
+ ]
+ )
+ self.assert_request_parameters({
+ 'Action': 'ModifyReservedInstances',
+ 'ReservedInstancesConfigurationSetItemType.0.AvailabilityZone': 'us-west-2c',
+ 'ReservedInstancesConfigurationSetItemType.0.InstanceCount': 3,
+ 'ReservedInstancesConfigurationSetItemType.0.Platform': 'EC2-VPC',
+ 'ReservedInstancesConfigurationSetItemType.0.InstanceType': 'c3.large',
+ 'ReservedInstancesId.1': '2567o137-8a55-48d6-82fb-7258506bb497'
+ }, ignore_params_values=[
+ 'AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'
+ ])
+
+ self.assertEqual(response, 'rimod-3aae219d-3d63-47a9-a7e9-e764example')
+
class TestDescribeReservedInstancesModifications(TestEC2ConnectionBase):
def default_body(self):
diff --git a/tests/unit/emr/test_connection.py b/tests/unit/emr/test_connection.py
index 5bcbefaa..ca13d1c0 100644
--- a/tests/unit/emr/test_connection.py
+++ b/tests/unit/emr/test_connection.py
@@ -25,20 +25,24 @@ from datetime import datetime
from time import time
from tests.unit import AWSMockServiceTestCase
+from boto.compat import six
from boto.emr.connection import EmrConnection
from boto.emr.emrobject import BootstrapAction, BootstrapActionList, \
- ClusterStateChangeReason, ClusterStatus, ClusterSummaryList, \
- ClusterSummary, ClusterTimeline, InstanceInfo, \
- InstanceList, InstanceGroupInfo, \
- InstanceGroup, InstanceGroupList, JobFlow, \
- JobFlowStepList, Step, StepSummaryList, \
- Cluster, RunJobFlowResponse
+ ClusterStateChangeReason, ClusterStatus, ClusterSummaryList, \
+ ClusterSummary, ClusterTimeline, InstanceInfo, \
+ InstanceList, InstanceGroupInfo, \
+ InstanceGroup, InstanceGroupList, JobFlow, \
+ JobFlowStepList, Step, StepSummaryList, \
+ Cluster, RunJobFlowResponse
+
# These tests are just checking the basic structure of
# the Elastic MapReduce code, by picking a few calls
# and verifying we get the expected results with mocked
# responses. The integration tests actually verify the
# API calls interact with the service correctly.
+
+
class TestListClusters(AWSMockServiceTestCase):
connection_class = EmrConnection
@@ -109,15 +113,22 @@ class TestListClusters(AWSMockServiceTestCase):
self.assertTrue(isinstance(response.clusters[0].status, ClusterStatus))
self.assertEqual(response.clusters[0].status.state, 'TERMINATED')
- self.assertTrue(isinstance(response.clusters[0].status.timeline, ClusterTimeline))
+ self.assertTrue(
+ isinstance(response.clusters[0].status.timeline, ClusterTimeline))
- self.assertEqual(response.clusters[0].status.timeline.creationdatetime, '2014-01-24T01:21:21Z')
- self.assertEqual(response.clusters[0].status.timeline.readydatetime, '2014-01-24T01:25:26Z')
- self.assertEqual(response.clusters[0].status.timeline.enddatetime, '2014-01-24T02:19:46Z')
+ self.assertEqual(
+ response.clusters[0].status.timeline.creationdatetime, '2014-01-24T01:21:21Z')
+ self.assertEqual(
+ response.clusters[0].status.timeline.readydatetime, '2014-01-24T01:25:26Z')
+ self.assertEqual(
+ response.clusters[0].status.timeline.enddatetime, '2014-01-24T02:19:46Z')
- self.assertTrue(isinstance(response.clusters[0].status.statechangereason, ClusterStateChangeReason))
- self.assertEqual(response.clusters[0].status.statechangereason.code, 'USER_REQUEST')
- self.assertEqual(response.clusters[0].status.statechangereason.message, 'Terminated by user request')
+ self.assertTrue(isinstance(
+ response.clusters[0].status.statechangereason, ClusterStateChangeReason))
+ self.assertEqual(
+ response.clusters[0].status.statechangereason.code, 'USER_REQUEST')
+ self.assertEqual(response.clusters[
+ 0].status.statechangereason.message, 'Terminated by user request')
def test_list_clusters_created_before(self):
self.set_http_response(status_code=200)
@@ -223,7 +234,8 @@ class TestListInstanceGroups(AWSMockServiceTestCase):
with self.assertRaises(TypeError):
self.service_connection.list_instance_groups()
- response = self.service_connection.list_instance_groups(cluster_id='j-123')
+ response = self.service_connection.list_instance_groups(
+ cluster_id='j-123')
self.assert_request_parameters({
'Action': 'ListInstanceGroups',
@@ -233,19 +245,25 @@ class TestListInstanceGroups(AWSMockServiceTestCase):
self.assertTrue(isinstance(response, InstanceGroupList))
self.assertEqual(len(response.instancegroups), 2)
- self.assertTrue(isinstance(response.instancegroups[0], InstanceGroupInfo))
+ self.assertTrue(
+ isinstance(response.instancegroups[0], InstanceGroupInfo))
self.assertEqual(response.instancegroups[0].id, 'ig-aaaaaaaaaaaaa')
- self.assertEqual(response.instancegroups[0].instancegrouptype, "MASTER")
+ self.assertEqual(
+ response.instancegroups[0].instancegrouptype, "MASTER")
self.assertEqual(response.instancegroups[0].instancetype, "m1.large")
self.assertEqual(response.instancegroups[0].market, "ON_DEMAND")
- self.assertEqual(response.instancegroups[0].name, "Master instance group")
- self.assertEqual(response.instancegroups[0].requestedinstancecount, '1')
+ self.assertEqual(
+ response.instancegroups[0].name, "Master instance group")
+ self.assertEqual(
+ response.instancegroups[0].requestedinstancecount, '1')
self.assertEqual(response.instancegroups[0].runninginstancecount, '0')
- self.assertTrue(isinstance(response.instancegroups[0].status, ClusterStatus))
+ self.assertTrue(
+ isinstance(response.instancegroups[0].status, ClusterStatus))
self.assertEqual(response.instancegroups[0].status.state, 'TERMINATED')
# status.statechangereason is not parsed into an object
#self.assertEqual(response.instancegroups[0].status.statechangereason.code, 'CLUSTER_TERMINATED')
+
class TestListInstances(AWSMockServiceTestCase):
connection_class = EmrConnection
@@ -334,11 +352,12 @@ class TestListInstances(AWSMockServiceTestCase):
self.assertTrue(isinstance(response.instances[0], InstanceInfo))
self.assertEqual(response.instances[0].ec2instanceid, 'i-aaaaaaaa')
self.assertEqual(response.instances[0].id, 'ci-123456789abc')
- self.assertEqual(response.instances[0].privatednsname , 'ip-10-0-0-60.us-west-1.compute.internal')
- self.assertEqual(response.instances[0].privateipaddress , '10.0.0.60')
- self.assertEqual(response.instances[0].publicdnsname , 'ec2-54-0-0-1.us-west-1.compute.amazonaws.com')
- self.assertEqual(response.instances[0].publicipaddress , '54.0.0.1')
-
+ self.assertEqual(
+ response.instances[0].privatednsname, 'ip-10-0-0-60.us-west-1.compute.internal')
+ self.assertEqual(response.instances[0].privateipaddress, '10.0.0.60')
+ self.assertEqual(response.instances[
+ 0].publicdnsname, 'ec2-54-0-0-1.us-west-1.compute.amazonaws.com')
+ self.assertEqual(response.instances[0].publicipaddress, '54.0.0.1')
self.assert_request_parameters({
'Action': 'ListInstances',
@@ -481,7 +500,7 @@ class TestListSteps(AWSMockServiceTestCase):
# Check for step config
step = response.steps[0]
self.assertEqual(step.config.jar,
- '/home/hadoop/lib/emr-s3distcp-1.0.jar')
+ '/home/hadoop/lib/emr-s3distcp-1.0.jar')
self.assertEqual(len(step.config.args), 4)
self.assertEqual(step.config.args[0].value, '--src')
self.assertEqual(step.config.args[1].value, 'hdfs:///data/test/')
@@ -507,6 +526,7 @@ class TestListSteps(AWSMockServiceTestCase):
self.assertTrue(isinstance(response, StepSummaryList))
self.assertEqual(response.steps[0].name, 'Step 1')
+
class TestListBootstrapActions(AWSMockServiceTestCase):
connection_class = EmrConnection
@@ -519,7 +539,8 @@ class TestListBootstrapActions(AWSMockServiceTestCase):
with self.assertRaises(TypeError):
self.service_connection.list_bootstrap_actions()
- response = self.service_connection.list_bootstrap_actions(cluster_id='j-123')
+ response = self.service_connection.list_bootstrap_actions(
+ cluster_id='j-123')
self.assert_request_parameters({
'Action': 'ListBootstrapActions',
@@ -593,12 +614,15 @@ class TestDescribeCluster(AWSMockServiceTestCase):
self.assertEqual(response.name, 'test analytics')
self.assertEqual(response.requestedamiversion, '2.4.2')
self.assertEqual(response.terminationprotected, 'false')
- self.assertEqual(response.ec2instanceattributes.ec2availabilityzone, "us-west-1c")
- self.assertEqual(response.ec2instanceattributes.ec2keyname, 'my_secret_key')
+ self.assertEqual(
+ response.ec2instanceattributes.ec2availabilityzone, "us-west-1c")
+ self.assertEqual(
+ response.ec2instanceattributes.ec2keyname, 'my_secret_key')
self.assertEqual(response.status.state, 'TERMINATED')
self.assertEqual(response.applications[0].name, 'hadoop')
self.assertEqual(response.applications[0].version, '1.0.3')
- self.assertEqual(response.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com')
+ self.assertEqual(
+ response.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com')
self.assertEqual(response.normalizedinstancehours, '10')
self.assertEqual(response.servicerole, 'my-service-role')
@@ -772,7 +796,8 @@ class TestRemoveTag(AWSMockServiceTestCase):
with self.assertRaises(AssertionError):
self.service_connection.add_tags('j-123', [])
- response = self.service_connection.remove_tags('j-123', ['FirstKey', 'SecondKey'])
+ response = self.service_connection.remove_tags(
+ 'j-123', ['FirstKey', 'SecondKey'])
self.assertTrue(response)
self.assert_request_parameters({
@@ -783,6 +808,7 @@ class TestRemoveTag(AWSMockServiceTestCase):
'Version': '2009-03-31'
})
+
class DescribeJobFlowsTestBase(AWSMockServiceTestCase):
connection_class = EmrConnection
@@ -888,6 +914,7 @@ class DescribeJobFlowsTestBase(AWSMockServiceTestCase):
</DescribeJobFlowsResponse>
"""
+
class TestDescribeJobFlows(DescribeJobFlowsTestBase):
def test_describe_jobflows_response(self):
@@ -910,14 +937,16 @@ class TestDescribeJobFlows(DescribeJobFlowsTestBase):
self.assertEqual(jf.masterinstanceid, 'i-aaaaaa')
self.assertEqual(jf.hadoopversion, '1.0.3')
self.assertEqual(jf.normalizedinstancehours, '12')
- self.assertEqual(jf.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com')
+ self.assertEqual(
+ jf.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com')
self.assertEqual(jf.instancecount, '3')
self.assertEqual(jf.terminationprotected, 'false')
self.assertTrue(isinstance(jf.steps, list))
step = jf.steps[0]
self.assertTrue(isinstance(step, Step))
- self.assertEqual(step.jar, 's3://us-west-1.elasticmapreduce/libs/script-runner/script-runner.jar')
+ self.assertEqual(
+ step.jar, 's3://us-west-1.elasticmapreduce/libs/script-runner/script-runner.jar')
self.assertEqual(step.name, 'Setup hive')
self.assertEqual(step.actiononfailure, 'TERMINATE_JOB_FLOW')
@@ -949,7 +978,8 @@ class TestDescribeJobFlows(DescribeJobFlowsTestBase):
now = datetime.now()
a_bit_before = datetime.fromtimestamp(time() - 1000)
- self.service_connection.describe_jobflows(states=['WAITING', 'RUNNING'], jobflow_ids=['j-aaaaaa', 'j-aaaaab'], created_after=a_bit_before, created_before=now)
+ self.service_connection.describe_jobflows(states=['WAITING', 'RUNNING'], jobflow_ids=[
+ 'j-aaaaaa', 'j-aaaaab'], created_after=a_bit_before, created_before=now)
self.assert_request_parameters({
'Action': 'DescribeJobFlows',
'JobFlowIds.member.1': 'j-aaaaaa',
@@ -960,7 +990,9 @@ class TestDescribeJobFlows(DescribeJobFlowsTestBase):
'CreatedBefore': now.strftime(boto.utils.ISO8601),
}, ignore_params_values=['Version'])
+
class TestDescribeJobFlow(DescribeJobFlowsTestBase):
+
def test_describe_jobflow(self):
self.set_http_response(200)
@@ -971,6 +1003,7 @@ class TestDescribeJobFlow(DescribeJobFlowsTestBase):
'JobFlowIds.member.1': 'j-aaaaaa',
}, ignore_params_values=['Version'])
+
class TestRunJobFlow(AWSMockServiceTestCase):
connection_class = EmrConnection
@@ -997,8 +1030,25 @@ class TestRunJobFlow(AWSMockServiceTestCase):
'Action': 'RunJobFlow',
'Version': '2009-03-31',
'ServiceRole': 'EMR_DefaultRole',
- 'Name': 'EmrCluster' },
+ 'Name': 'EmrCluster'},
ignore_params_values=['ActionOnFailure', 'Instances.InstanceCount',
'Instances.KeepJobFlowAliveWhenNoSteps',
'Instances.MasterInstanceType',
'Instances.SlaveInstanceType'])
+
+ def test_run_jobflow_enable_debugging(self):
+ self.region = 'ap-northeast-2'
+ self.set_http_response(200)
+ self.service_connection.run_jobflow(
+ 'EmrCluster', enable_debugging=True)
+
+ actual_params = set(self.actual_request.params.copy().items())
+
+ expected_params = set([
+ ('Steps.member.1.HadoopJarStep.Jar',
+ 's3://ap-northeast-2.elasticmapreduce/libs/script-runner/script-runner.jar'),
+ ('Steps.member.1.HadoopJarStep.Args.member.1',
+ 's3://ap-northeast-2.elasticmapreduce/libs/state-pusher/0.1/fetch'),
+ ])
+
+ self.assertTrue(expected_params <= actual_params)
diff --git a/tests/unit/glacier/test_layer1.py b/tests/unit/glacier/test_layer1.py
index 4c8f0cf7..fe4b7002 100644
--- a/tests/unit/glacier/test_layer1.py
+++ b/tests/unit/glacier/test_layer1.py
@@ -2,8 +2,10 @@ import json
import copy
import tempfile
+from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.glacier.layer1 import Layer1
+from boto.compat import six
class GlacierLayer1ConnectionBase(AWSMockServiceTestCase):
@@ -80,6 +82,33 @@ class GlacierJobOperations(GlacierLayer1ConnectionBase):
self.assertEqual(self.job_content, response.read())
+class TestGlacierUploadPart(GlacierLayer1ConnectionBase):
+ def test_upload_part_content_range_header(self):
+ fake_data = b'\xe2'
+ self.set_http_response(status_code=204)
+ self.service_connection.upload_part(
+ u'unicode_vault_name', 'upload_id', 'linear_hash', 'tree_hash',
+ (1,2), fake_data)
+ self.assertEqual(
+ self.actual_request.headers['Content-Range'], 'bytes 1-2/*')
+
+ def test_upload_part_with_unicode_name(self):
+ fake_data = b'\xe2'
+ self.set_http_response(status_code=204)
+ self.service_connection.upload_part(
+ u'unicode_vault_name', 'upload_id', 'linear_hash', 'tree_hash',
+ (1,2), fake_data)
+ self.assertEqual(
+ self.actual_request.path,
+ '/-/vaults/unicode_vault_name/multipart-uploads/upload_id')
+ # If the path is unicode in python2, it triggers the following bug
+ # noted in this PR: https://github.com/boto/boto/pull/2697
+ # httplib notices that if the path is unicode, it will try to encode
+ # body which may be impossible if there is the data is binary.
+ self.assertIsInstance(self.actual_request.body, six.binary_type)
+ self.assertEqual(self.actual_request.body, fake_data)
+
+
class GlacierUploadArchiveResets(GlacierLayer1ConnectionBase):
def test_upload_archive(self):
fake_data = tempfile.NamedTemporaryFile()
diff --git a/tests/unit/kinesis/test_kinesis.py b/tests/unit/kinesis/test_kinesis.py
index 6ad8adf9..2a37a2a7 100644
--- a/tests/unit/kinesis/test_kinesis.py
+++ b/tests/unit/kinesis/test_kinesis.py
@@ -36,7 +36,7 @@ class TestKinesis(AWSMockServiceTestCase):
self.service_connection.put_record('stream-name',
b'\x00\x01\x02\x03\x04\x05', 'partition-key')
- body = json.loads(self.actual_request.body)
+ body = json.loads(self.actual_request.body.decode('utf-8'))
self.assertEqual(body['Data'], 'AAECAwQF')
target = self.actual_request.headers['X-Amz-Target']
@@ -47,7 +47,7 @@ class TestKinesis(AWSMockServiceTestCase):
self.service_connection.put_record('stream-name',
'data', 'partition-key')
- body = json.loads(self.actual_request.body)
+ body = json.loads(self.actual_request.body.decode('utf-8'))
self.assertEqual(body['Data'], 'ZGF0YQ==')
target = self.actual_request.headers['X-Amz-Target']
@@ -66,7 +66,7 @@ class TestKinesis(AWSMockServiceTestCase):
self.service_connection.put_records(stream_name='stream-name',
records=[record_binary, record_str])
- body = json.loads(self.actual_request.body)
+ body = json.loads(self.actual_request.body.decode('utf-8'))
self.assertEqual(body['Records'][0]['Data'], 'AAECAwQF')
self.assertEqual(body['Records'][1]['Data'], 'ZGF0YQ==')
diff --git a/tests/unit/provider/test_provider.py b/tests/unit/provider/test_provider.py
index 89092253..ecf77256 100644
--- a/tests/unit/provider/test_provider.py
+++ b/tests/unit/provider/test_provider.py
@@ -6,6 +6,7 @@ import os
from boto import provider
from boto.compat import expanduser
+from boto.exception import InvalidInstanceMetadataError
INSTANCE_CONFIG = {
@@ -343,6 +344,32 @@ class TestProvider(unittest.TestCase):
self.get_instance_metadata.call_args[1]['data'],
'meta-data/iam/security-credentials/')
+ def test_metadata_server_returns_bad_type(self):
+ self.get_instance_metadata.return_value = {
+ 'rolename': [],
+ }
+ with self.assertRaises(InvalidInstanceMetadataError):
+ p = provider.Provider('aws')
+
+ def test_metadata_server_returns_empty_string(self):
+ self.get_instance_metadata.return_value = {
+ 'rolename': ''
+ }
+ with self.assertRaises(InvalidInstanceMetadataError):
+ p = provider.Provider('aws')
+
+ def test_metadata_server_returns_missing_keys(self):
+ self.get_instance_metadata.return_value = {
+ 'allowall': {
+ u'AccessKeyId': u'iam_access_key',
+ # Missing SecretAccessKey.
+ u'Token': u'iam_token',
+ u'Expiration': u'2012-09-01T03:57:34Z',
+ }
+ }
+ with self.assertRaises(InvalidInstanceMetadataError):
+ p = provider.Provider('aws')
+
def test_refresh_credentials(self):
now = datetime.utcnow()
first_expiration = (now + timedelta(seconds=10)).strftime(
diff --git a/tests/unit/pyami/__init__.py b/tests/unit/pyami/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/pyami/__init__.py
diff --git a/tests/unit/pyami/test_config.py b/tests/unit/pyami/test_config.py
new file mode 100644
index 00000000..3b7e3f44
--- /dev/null
+++ b/tests/unit/pyami/test_config.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from tests.compat import mock, unittest
+
+from boto.pyami import config
+from boto.compat import StringIO
+
+
+class TestCanLoadConfigFile(unittest.TestCase):
+ maxDiff = None
+
+ def setUp(self):
+ self.file_contents = StringIO()
+ file_contents = StringIO(
+ '[Boto]\n'
+ 'https_validate_certificates = true\n'
+ 'other = false\n'
+ 'http_socket_timeout = 1\n'
+ '[Credentials]\n'
+ 'aws_access_key_id=foo\n'
+ 'aws_secret_access_key=bar\n'
+ )
+ self.config = config.Config(fp=file_contents)
+
+ def test_can_get_bool(self):
+ self.assertTrue(
+ self.config.getbool('Boto', 'https_validate_certificates'))
+ self.assertFalse(self.config.getbool('Boto', 'other'))
+ self.assertFalse(self.config.getbool('Boto', 'does-not-exist'))
+
+ def test_can_get_int(self):
+ self.assertEqual(self.config.getint('Boto', 'http_socket_timeout'), 1)
+ self.assertEqual(self.config.getint('Boto', 'does-not-exist'), 0)
+ self.assertEqual(
+ self.config.getint('Boto', 'does-not-exist', default=20), 20)
+
+ def test_can_get_strings(self):
+ self.assertEqual(
+ self.config.get('Credentials', 'aws_access_key_id'), 'foo')
+ self.assertIsNone(
+ self.config.get('Credentials', 'no-exist'))
+ self.assertEqual(
+ self.config.get('Credentials', 'no-exist', 'default-value'),
+ 'default-value')
diff --git a/tests/unit/route53/test_connection.py b/tests/unit/route53/test_connection.py
index d1a80152..73d77248 100644
--- a/tests/unit/route53/test_connection.py
+++ b/tests/unit/route53/test_connection.py
@@ -64,16 +64,16 @@ class TestRoute53Connection(AWSMockServiceTestCase):
def test_retryable_400_prior_request_not_complete(self):
# Test ability to retry on ``PriorRequestNotComplete``.
- self.set_http_response(status_code=400, header=[
- ['Code', 'PriorRequestNotComplete'],
- ])
+ self.set_http_response(status_code=400, body="""<?xml version="1.0"?>
+<ErrorResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/"><Error><Type>Sender</Type><Code>PriorRequestNotComplete</Code><Message>The request was rejected because Route 53 was still processing a prior request.</Message></Error><RequestId>12d222a0-f3d9-11e4-a611-c321a3a00f9c</RequestId></ErrorResponse>
+""")
self.do_retry_handler()
def test_retryable_400_throttling(self):
# Test ability to rety on ``Throttling``.
- self.set_http_response(status_code=400, header=[
- ['Code', 'Throttling'],
- ])
+ self.set_http_response(status_code=400, body="""<?xml version="1.0"?>
+<ErrorResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/"><Error><Type>Sender</Type><Code>Throttling</Code><Message>Rate exceeded</Message></Error><RequestId>19d0a9a0-f3d9-11e4-a611-c321a3a00f9c</RequestId></ErrorResponse>
+""")
self.do_retry_handler()
@mock.patch('time.sleep')
diff --git a/tests/unit/s3/test_bucketlistresultset.py b/tests/unit/s3/test_bucketlistresultset.py
new file mode 100644
index 00000000..f3c75684
--- /dev/null
+++ b/tests/unit/s3/test_bucketlistresultset.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016 Mitch Garnaat http://garnaat.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from mock import patch, Mock
+import unittest
+
+from boto.s3.bucket import ResultSet
+from boto.s3.bucketlistresultset import multipart_upload_lister
+from boto.s3.bucketlistresultset import versioned_bucket_lister
+
+
+class S3BucketListResultSetTest (unittest.TestCase):
+ def _test_patched_lister_encoding(self, inner_method, outer_method):
+ bucket = Mock()
+ call_args = []
+ first = ResultSet()
+ first.append('foo')
+ first.next_key_marker = 'a+b'
+ first.is_truncated = True
+ second = ResultSet()
+ second.append('bar')
+ second.is_truncated = False
+ pages = [first, second]
+
+ def return_pages(**kwargs):
+ call_args.append(kwargs)
+ return pages.pop(0)
+
+ setattr(bucket, inner_method, return_pages)
+ results = list(outer_method(bucket, encoding_type='url'))
+ self.assertEqual(['foo', 'bar'], results)
+ self.assertEqual('a b', call_args[1]['key_marker'])
+
+ def test_list_object_versions_with_url_encoding(self):
+ self._test_patched_lister_encoding(
+ 'get_all_versions', versioned_bucket_lister)
+
+ def test_list_multipart_upload_with_url_encoding(self):
+ self._test_patched_lister_encoding(
+ 'get_all_multipart_uploads', multipart_upload_lister)
diff --git a/tests/unit/s3/test_connection.py b/tests/unit/s3/test_connection.py
index 5839a6a2..56bcfd87 100644
--- a/tests/unit/s3/test_connection.py
+++ b/tests/unit/s3/test_connection.py
@@ -19,9 +19,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-import time
-
-from tests.compat import mock, unittest
+from tests.compat import unittest
from tests.unit import AWSMockServiceTestCase
from tests.unit import MockServiceWithConfigTestCase
@@ -50,6 +48,31 @@ class TestSignatureAlteration(AWSMockServiceTestCase):
)
+class TestPresigned(MockServiceWithConfigTestCase):
+ connection_class = S3Connection
+
+ def test_presign_respect_query_auth(self):
+ self.config = {
+ 's3': {
+ 'use-sigv4': False,
+ }
+ }
+
+ conn = self.connection_class(
+ aws_access_key_id='less',
+ aws_secret_access_key='more',
+ host='s3.amazonaws.com'
+ )
+
+ url_enabled = conn.generate_url(86400, 'GET', bucket='examplebucket',
+ key='test.txt', query_auth=True)
+
+ url_disabled = conn.generate_url(86400, 'GET', bucket='examplebucket',
+ key='test.txt', query_auth=False)
+ self.assertIn('Signature=', url_enabled)
+ self.assertNotIn('Signature=', url_disabled)
+
+
class TestSigV4HostError(MockServiceWithConfigTestCase):
connection_class = S3Connection
@@ -111,9 +134,12 @@ class TestSigV4Presigned(MockServiceWithConfigTestCase):
# Here we force an input iso_date to ensure we always get the
# same signature.
url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
- key='test.txt', iso_date='20140625T000000Z')
+ key='test.txt',
+ iso_date='20140625T000000Z')
- self.assertIn('a937f5fbc125d98ac8f04c49e0204ea1526a7b8ca058000a54c192457be05b7d', url)
+ self.assertIn(
+ 'a937f5fbc125d98ac8f04c49e0204ea1526a7b8ca058000a54c192457be05b7d',
+ url)
def test_sigv4_presign_optional_params(self):
self.config = {
@@ -130,11 +156,32 @@ class TestSigV4Presigned(MockServiceWithConfigTestCase):
)
url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
- key='test.txt', version_id=2)
+ key='test.txt', version_id=2)
self.assertIn('VersionId=2', url)
self.assertIn('X-Amz-Security-Token=token', url)
+ def test_sigv4_presign_respect_query_auth(self):
+ self.config = {
+ 's3': {
+ 'use-sigv4': True,
+ }
+ }
+
+ conn = self.connection_class(
+ aws_access_key_id='less',
+ aws_secret_access_key='more',
+ host='s3.amazonaws.com'
+ )
+
+ url_enabled = conn.generate_url(86400, 'GET', bucket='examplebucket',
+ key='test.txt', query_auth=True)
+
+ url_disabled = conn.generate_url(86400, 'GET', bucket='examplebucket',
+ key='test.txt', query_auth=False)
+ self.assertIn('Signature=', url_enabled)
+ self.assertNotIn('Signature=', url_disabled)
+
def test_sigv4_presign_headers(self):
self.config = {
's3': {
@@ -155,6 +202,26 @@ class TestSigV4Presigned(MockServiceWithConfigTestCase):
self.assertIn('host', url)
self.assertIn('x-amz-meta-key', url)
+ def test_sigv4_presign_response_headers(self):
+ self.config = {
+ 's3': {
+ 'use-sigv4': True,
+ }
+ }
+
+ conn = self.connection_class(
+ aws_access_key_id='less',
+ aws_secret_access_key='more',
+ host='s3.amazonaws.com'
+ )
+
+ response_headers = {'response-content-disposition': 'attachment; filename="file.ext"'}
+ url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
+ key='test.txt', response_headers=response_headers)
+
+ self.assertIn('host', url)
+ self.assertIn('response-content-disposition', url)
+
class TestUnicodeCallingFormat(AWSMockServiceTestCase):
connection_class = S3Connection
diff --git a/tests/unit/s3/test_lifecycle.py b/tests/unit/s3/test_lifecycle.py
index da50f3a8..44aafc86 100644
--- a/tests/unit/s3/test_lifecycle.py
+++ b/tests/unit/s3/test_lifecycle.py
@@ -50,27 +50,103 @@ class TestS3LifeCycle(AWSMockServiceTestCase):
<Status>Disabled</Status>
<Transition>
<Date>2012-12-31T00:00:000Z</Date>
- <StorageClass>GLACIER</StorageClass>
+ <StorageClass>STANDARD_IA</StorageClass>
+ </Transition>
+ <Expiration>
+ <Date>2012-12-31T00:00:000Z</Date>
+ </Expiration>
+ </Rule>
+ <Rule>
+ <ID>multiple-transitions</ID>
+ <Prefix></Prefix>
+ <Status>Enabled</Status>
+ <Transition>
+ <Days>30</Days>
+ <StorageClass>STANDARD_IA</StorageClass>
+ </Transition>
+ <Transition>
+ <Days>90</Days>
+ <StorageClass>GLACIER</StorageClass>
</Transition>
</Rule>
</LifecycleConfiguration>
"""
- def test_parse_lifecycle_response(self):
+ def _get_bucket_lifecycle_config(self):
self.set_http_response(status_code=200)
bucket = Bucket(self.service_connection, 'mybucket')
- response = bucket.get_lifecycle_config()
- self.assertEqual(len(response), 2)
- rule = response[0]
+ return bucket.get_lifecycle_config()
+
+ def test_lifecycle_response_contains_all_rules(self):
+ self.assertEqual(len(self._get_bucket_lifecycle_config()), 3)
+
+ def test_parse_lifecycle_id(self):
+ rule = self._get_bucket_lifecycle_config()[0]
self.assertEqual(rule.id, 'rule-1')
+
+ def test_parse_lifecycle_prefix(self):
+ rule = self._get_bucket_lifecycle_config()[0]
self.assertEqual(rule.prefix, 'prefix/foo')
+
+ def test_parse_lifecycle_no_prefix(self):
+ rule = self._get_bucket_lifecycle_config()[2]
+ self.assertEquals(rule.prefix, '')
+
+ def test_parse_lifecycle_enabled(self):
+ rule = self._get_bucket_lifecycle_config()[0]
self.assertEqual(rule.status, 'Enabled')
+
+ def test_parse_lifecycle_disabled(self):
+ rule = self._get_bucket_lifecycle_config()[1]
+ self.assertEqual(rule.status, 'Disabled')
+
+ def test_parse_expiration_days(self):
+ rule = self._get_bucket_lifecycle_config()[0]
self.assertEqual(rule.expiration.days, 365)
- self.assertIsNone(rule.expiration.date)
- transition = rule.transition
- self.assertEqual(transition.days, 30)
+
+ def test_parse_expiration_date(self):
+ rule = self._get_bucket_lifecycle_config()[1]
+ self.assertEqual(rule.expiration.date, '2012-12-31T00:00:000Z')
+
+ def test_parse_expiration_not_required(self):
+ rule = self._get_bucket_lifecycle_config()[2]
+ self.assertIsNone(rule.expiration)
+
+ def test_parse_transition_days(self):
+ transition = self._get_bucket_lifecycle_config()[0].transition[0]
+ self.assertEquals(transition.days, 30)
+ self.assertIsNone(transition.date)
+
+ def test_parse_transition_days_deprecated(self):
+ transition = self._get_bucket_lifecycle_config()[0].transition
+ self.assertEquals(transition.days, 30)
+ self.assertIsNone(transition.date)
+
+ def test_parse_transition_date(self):
+ transition = self._get_bucket_lifecycle_config()[1].transition[0]
+ self.assertEquals(transition.date, '2012-12-31T00:00:000Z')
+ self.assertIsNone(transition.days)
+
+ def test_parse_transition_date_deprecated(self):
+ transition = self._get_bucket_lifecycle_config()[1].transition
+ self.assertEquals(transition.date, '2012-12-31T00:00:000Z')
+ self.assertIsNone(transition.days)
+
+ def test_parse_storage_class_standard_ia(self):
+ transition = self._get_bucket_lifecycle_config()[1].transition[0]
+ self.assertEqual(transition.storage_class, 'STANDARD_IA')
+
+ def test_parse_storage_class_glacier(self):
+ transition = self._get_bucket_lifecycle_config()[0].transition[0]
self.assertEqual(transition.storage_class, 'GLACIER')
- self.assertEqual(response[1].transition.date, '2012-12-31T00:00:000Z')
+
+ def test_parse_storage_class_deprecated(self):
+ transition = self._get_bucket_lifecycle_config()[1].transition
+ self.assertEqual(transition.storage_class, 'STANDARD_IA')
+
+ def test_parse_multiple_lifecycle_rules(self):
+ transition = self._get_bucket_lifecycle_config()[2].transition
+ self.assertEqual(len(transition), 2)
def test_expiration_with_no_transition(self):
lifecycle = Lifecycle()
@@ -87,7 +163,14 @@ class TestS3LifeCycle(AWSMockServiceTestCase):
'<Transition><StorageClass>GLACIER</StorageClass><Days>30</Days>',
xml)
- def test_expiration_with_expiration_and_transition(self):
+ def test_transition_is_optional(self):
+ r = Rule('myid', 'prefix', 'Enabled')
+ xml = r.to_xml()
+ self.assertEqual(
+ '<Rule><ID>myid</ID><Prefix>prefix</Prefix><Status>Enabled</Status></Rule>',
+ xml)
+
+ def test_expiration_and_transition(self):
t = Transition(date='2012-11-30T00:00:000Z', storage_class='GLACIER')
r = Rule('myid', 'prefix', 'Enabled', expiration=30, transition=t)
xml = r.to_xml()
diff --git a/tests/unit/sqs/test_connection.py b/tests/unit/sqs/test_connection.py
index b1735de1..21b05ea0 100644
--- a/tests/unit/sqs/test_connection.py
+++ b/tests/unit/sqs/test_connection.py
@@ -296,15 +296,15 @@ class SQSSendBatchMessageAttributes(AWSMockServiceTestCase):
'Action': 'SendMessageBatch',
'SendMessageBatchRequestEntry.1.DelaySeconds': 0,
'SendMessageBatchRequestEntry.1.Id': 1,
- 'SendMessageBatchRequestEntry.1.MessageAttribute.1.DataType': 'String',
'SendMessageBatchRequestEntry.1.MessageAttribute.1.Name': 'name1',
- 'SendMessageBatchRequestEntry.1.MessageAttribute.1.StringValue': 'foo',
+ 'SendMessageBatchRequestEntry.1.MessageAttribute.1.Value.DataType': 'String',
+ 'SendMessageBatchRequestEntry.1.MessageAttribute.1.Value.StringValue': 'foo',
'SendMessageBatchRequestEntry.1.MessageBody': 'Message 1',
'SendMessageBatchRequestEntry.2.DelaySeconds': 0,
'SendMessageBatchRequestEntry.2.Id': 2,
- 'SendMessageBatchRequestEntry.2.MessageAttribute.1.DataType': 'Number',
'SendMessageBatchRequestEntry.2.MessageAttribute.1.Name': 'name2',
- 'SendMessageBatchRequestEntry.2.MessageAttribute.1.StringValue': '1',
+ 'SendMessageBatchRequestEntry.2.MessageAttribute.1.Value.DataType': 'Number',
+ 'SendMessageBatchRequestEntry.2.MessageAttribute.1.Value.StringValue': '1',
'SendMessageBatchRequestEntry.2.MessageBody': 'Message 2',
'Version': '2012-11-05'
})
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index 69e8816e..92ab6e43 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -145,6 +145,24 @@ class TestAWSAuthConnection(unittest.TestCase):
)
self.assertEqual(conn.get_proxy_url_with_auth(), 'http://john.doe:p4ssw0rd@127.0.0.1:8180')
+ def test_build_base_http_request_noproxy(self):
+ os.environ['no_proxy'] = 'mockservice.cc-zone-1.amazonaws.com'
+
+ conn = AWSAuthConnection(
+ 'mockservice.cc-zone-1.amazonaws.com',
+ aws_access_key_id='access_key',
+ aws_secret_access_key='secret',
+ suppress_consec_slashes=False,
+ proxy="127.0.0.1",
+ proxy_user="john.doe",
+ proxy_pass="p4ssw0rd",
+ proxy_port="8180"
+ )
+ request = conn.build_base_http_request('GET', '/', None)
+
+ del os.environ['no_proxy']
+ self.assertEqual(request.path, '/')
+
def test_connection_behind_proxy_without_explicit_port(self):
os.environ['http_proxy'] = "http://127.0.0.1"
conn = AWSAuthConnection(
@@ -504,7 +522,7 @@ class TestAWSQueryStatus(TestAWSQueryConnection):
class TestHTTPRequest(unittest.TestCase):
def test_user_agent_not_url_encoded(self):
- headers = {'Some-Header': u'should be url encoded',
+ headers = {'Some-Header': u'should be encoded \u2713',
'User-Agent': UserAgent}
request = HTTPRequest('PUT', 'https', 'amazon.com', 443, None,
None, {}, headers, 'Body')
@@ -521,7 +539,7 @@ class TestHTTPRequest(unittest.TestCase):
# Ensure the headers at authorization are as expected i.e.
# the user agent header was not url encoded but the other header was.
self.assertEqual(mock_connection.headers_at_auth,
- {'Some-Header': 'should%20be%20url%20encoded',
+ {'Some-Header': 'should be encoded %E2%9C%93',
'User-Agent': UserAgent})
def test_content_length_str(self):