summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.rst7
-rw-r--r--boto/__init__.py26
-rw-r--r--boto/auth.py51
-rw-r--r--boto/cloudformation/connection.py3
-rw-r--r--boto/cloudsearch2/document.py93
-rw-r--r--boto/cloudsearch2/layer1.py4
-rw-r--r--boto/cloudsearch2/layer2.py5
-rw-r--r--boto/cloudsearch2/search.py131
-rw-r--r--boto/cloudsearchdomain/__init__.py41
-rw-r--r--boto/cloudsearchdomain/exceptions.py30
-rw-r--r--boto/cloudsearchdomain/layer1.py540
-rw-r--r--boto/connection.py13
-rw-r--r--boto/dynamodb2/items.py5
-rw-r--r--boto/dynamodb2/results.py1
-rw-r--r--boto/dynamodb2/table.py40
-rw-r--r--boto/ec2/autoscale/launchconfig.py16
-rw-r--r--boto/ec2/connection.py49
-rw-r--r--boto/ec2/elb/__init__.py2
-rw-r--r--boto/ec2/reservedinstance.py3
-rw-r--r--boto/ec2/snapshot.py2
-rw-r--r--boto/emr/emrobject.py36
-rw-r--r--boto/endpoints.json15
-rw-r--r--boto/glacier/response.py7
-rw-r--r--boto/iam/connection.py96
-rw-r--r--boto/mws/connection.py2
-rw-r--r--boto/provider.py1
-rw-r--r--boto/route53/connection.py83
-rw-r--r--boto/route53/hostedzone.py9
-rw-r--r--boto/route53/record.py2
-rw-r--r--boto/s3/__init__.py10
-rw-r--r--boto/s3/acl.py8
-rw-r--r--boto/s3/key.py10
-rw-r--r--boto/ses/connection.py4
-rw-r--r--boto/sqs/connection.py12
-rw-r--r--boto/sqs/queue.py17
-rw-r--r--boto/sts/connection.py22
-rw-r--r--boto/vpc/__init__.py141
-rw-r--r--boto/vpc/vpc.py123
-rw-r--r--docs/source/dynamodb2_tut.rst1
-rw-r--r--docs/source/index.rst2
-rw-r--r--docs/source/ref/cloudsearchdomain.rst26
-rw-r--r--docs/source/ref/elb.rst10
-rw-r--r--docs/source/releasenotes/v2.35.0.rst55
-rw-r--r--docs/source/sqs_tut.rst7
-rw-r--r--setup.py3
-rw-r--r--tests/integration/dynamodb2/test_highlevel.py6
-rw-r--r--tests/integration/ec2/elb/test_connection.py6
-rw-r--r--tests/integration/ec2/test_connection.py5
-rw-r--r--tests/integration/iam/test_connection.py2
-rw-r--r--tests/integration/iam/test_password_policy.py80
-rw-r--r--tests/integration/route53/test_zone.py33
-rw-r--r--tests/integration/s3/test_connect_to_region.py73
-rw-r--r--tests/integration/s3/test_key.py75
-rw-r--r--tests/integration/sqs/test_connection.py41
-rw-r--r--tests/integration/sts/test_session_token.py2
-rw-r--r--tests/unit/__init__.py9
-rw-r--r--tests/unit/auth/test_sigv4.py19
-rw-r--r--tests/unit/auth/test_stsanon.py (renamed from tests/unit/auth/test_query.py)30
-rw-r--r--tests/unit/cloudformation/test_connection.py2
-rw-r--r--tests/unit/cloudsearchdomain/__init__.py0
-rw-r--r--tests/unit/cloudsearchdomain/test_cloudsearchdomain.py127
-rw-r--r--tests/unit/dynamodb2/test_table.py33
-rwxr-xr-xtests/unit/ec2/test_connection.py59
-rw-r--r--tests/unit/ec2/test_reservedinstance.py44
-rw-r--r--tests/unit/emr/test_connection.py16
-rw-r--r--tests/unit/emr/test_emr_responses.py15
-rw-r--r--tests/unit/glacier/test_response.py35
-rw-r--r--tests/unit/iam/test_connection.py86
-rw-r--r--tests/unit/mws/test_connection.py8
-rw-r--r--tests/unit/provider/test_provider.py11
-rw-r--r--tests/unit/route53/test_connection.py131
-rw-r--r--tests/unit/s3/test_key.py48
-rw-r--r--tests/unit/ses/test_identity.py27
-rw-r--r--tests/unit/test_connection.py20
-rw-r--r--tests/unit/vpc/test_vpc.py225
75 files changed, 2822 insertions, 210 deletions
diff --git a/README.rst b/README.rst
index 44231ab3..2d156ec6 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.34.0
+boto 2.35.0
-Released: 23-Oct-2014
+Released: 08-Jan-2015
.. image:: https://travis-ci.org/boto/boto.svg?branch=develop
:target: https://travis-ci.org/boto/boto
@@ -62,6 +62,7 @@ At the moment, boto supports:
* Application Services
* Amazon CloudSearch (Python 3)
+ * Amazon CloudSearch Domain (Python 3)
* Amazon Elastic Transcoder (Python 3)
* Amazon Simple Workflow Service (SWF) (Python 3)
* Amazon Simple Queue Service (SQS) (Python 3)
@@ -179,7 +180,7 @@ boto config file. See `this`_ for details.
.. _github.com: http://github.com/boto/boto
.. _Online documentation: http://docs.pythonboto.org
.. _Python Cheese Shop: http://pypi.python.org/pypi/boto
-.. _this: http://code.google.com/p/boto/wiki/BotoConfig
+.. _this: http://docs.pythonboto.org/en/latest/boto_config_tut.html
.. _gitflow: http://nvie.com/posts/a-successful-git-branching-model/
.. _neo: https://github.com/boto/boto/tree/neo
.. _boto-users Google Group: https://groups.google.com/forum/?fromgroups#!forum/boto-users
diff --git a/boto/__init__.py b/boto/__init__.py
index fe5fd7d4..74223e58 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -38,7 +38,7 @@ import logging.config
from boto.compat import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.34.0'
+__version__ = '2.35.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
@@ -664,6 +664,7 @@ def connect_cloudsearch(aws_access_key_id=None,
def connect_cloudsearch2(aws_access_key_id=None,
aws_secret_access_key=None,
+ sign_request=False,
**kwargs):
"""
:type aws_access_key_id: string
@@ -672,14 +673,37 @@ def connect_cloudsearch2(aws_access_key_id=None,
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
+ :type sign_request: bool
+ :param sign_request: whether or not to sign search and
+ upload requests
+
:rtype: :class:`boto.cloudsearch2.layer2.Layer2`
:return: A connection to Amazon's CloudSearch2 service
"""
from boto.cloudsearch2.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
+ sign_request=sign_request,
**kwargs)
+def connect_cloudsearchdomain(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.cloudsearchdomain.layer1.CloudSearchDomainConnection`
+ :return: A connection to Amazon's CloudSearch Domain service
+ """
+ from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
+ return CloudSearchDomainConnection(aws_access_key_id,
+ aws_secret_access_key, **kwargs)
+
+
def connect_beanstalk(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
diff --git a/boto/auth.py b/boto/auth.py
index 3e9ded79..f769472b 100644
--- a/boto/auth.py
+++ b/boto/auth.py
@@ -51,6 +51,16 @@ except ImportError:
sha256 = None
+# Region detection strings to determine if SigV4 should be used
+# by default.
+SIGV4_DETECT = [
+ '.cn-',
+ # In eu-central we support both host styles for S3
+ '.eu-central',
+ '-eu-central',
+]
+
+
class HmacKeys(object):
"""Key based Auth handler helper."""
@@ -359,7 +369,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
for header in headers_to_sign:
c_name = header.lower().strip()
- raw_value = headers_to_sign[header]
+ raw_value = str(headers_to_sign[header])
if '"' in raw_value:
c_value = raw_value.strip()
else:
@@ -761,20 +771,22 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
urllib.parse.urlencode(req.params))
-class QueryAuthHandler(AuthHandler):
+class STSAnonHandler(AuthHandler):
"""
Provides pure query construction (no actual signing).
- Mostly useful for STS' ``assume_role_with_web_identity``.
-
- Does **NOT** escape query string values!
+ Used for making anonymous STS request for operations like
+ ``assume_role_with_web_identity``.
"""
- capability = ['pure-query']
+ capability = ['sts-anon']
def _escape_value(self, value):
- # Would normally be ``return urllib.parse.quote(value)``.
- return value
+ # This is changed from a previous version because this string is
+ # being passed to the query string and query strings must
+ # be url encoded. In particular STS requires the saml_response to
+ # be urlencoded when calling assume_role_with_saml.
+ return urllib.parse.quote(value)
def _build_query_string(self, params):
keys = list(params.keys())
@@ -790,13 +802,11 @@ class QueryAuthHandler(AuthHandler):
qs = self._build_query_string(
http_request.params
)
- boto.log.debug('query_string: %s' % qs)
- headers['Content-Type'] = 'application/json; charset=UTF-8'
- http_request.body = ''
- # if this is a retried request, the qs from the previous try will
- # already be there, we need to get rid of that and rebuild it
- http_request.path = http_request.path.split('?')[0]
- http_request.path = http_request.path + '?' + qs
+ boto.log.debug('query_string in body: %s' % qs)
+ headers['Content-Type'] = 'application/x-www-form-urlencoded'
+ # This will be a POST so the query string should go into the body
+ # as opposed to being in the uri
+ http_request.body = qs
class QuerySignatureHelper(HmacKeys):
@@ -1000,9 +1010,9 @@ def detect_potential_sigv4(func):
# ``boto/iam/connection.py``, as several things there are also
# endpoint-related.
if getattr(self.region, 'endpoint', ''):
- if '.cn-' in self.region.endpoint or \
- '.eu-central' in self.region.endpoint:
- return ['hmac-v4']
+ for test in SIGV4_DETECT:
+ if test in self.region.endpoint:
+ return ['hmac-v4']
return func(self)
return _wrapper
@@ -1020,8 +1030,9 @@ def detect_potential_s3sigv4(func):
# If you're making changes here, you should also check
# ``boto/iam/connection.py``, as several things there are also
# endpoint-related.
- if '.cn-' in self.host or '.eu-central' in self.host:
- return ['hmac-v4-s3']
+ for test in SIGV4_DETECT:
+ if test in self.host:
+ return ['hmac-v4-s3']
return func(self)
return _wrapper
diff --git a/boto/cloudformation/connection.py b/boto/cloudformation/connection.py
index 6c31ac53..84b4ea6e 100644
--- a/boto/cloudformation/connection.py
+++ b/boto/cloudformation/connection.py
@@ -884,5 +884,4 @@ class CloudFormationConnection(AWSQueryConnection):
params['StackPolicyURL'] = stack_policy_url
response = self._do_request('SetStackPolicy', params, '/', 'POST')
- return response['SetStackPolicyResponse']\
- ['SetStackPolicyResult']
+ return response['SetStackPolicyResponse']
diff --git a/boto/cloudsearch2/document.py b/boto/cloudsearch2/document.py
index 3b36f133..cfe5fe62 100644
--- a/boto/cloudsearch2/document.py
+++ b/boto/cloudsearch2/document.py
@@ -25,6 +25,7 @@ import boto.exception
from boto.compat import json
import requests
import boto
+from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
class SearchServiceException(Exception):
@@ -93,11 +94,25 @@ class DocumentServiceConnection(object):
self.documents_batch = []
self._sdf = None
- # Copy proxy settings from connection
- if self.domain and self.domain.layer1 and self.domain.layer1.use_proxy:
- self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()}
- else:
- self.proxy = {}
+ # Copy proxy settings from connection and check if request should be signed
+ self.proxy = {}
+ self.sign_request = False
+ if self.domain and self.domain.layer1:
+ if self.domain.layer1.use_proxy:
+ self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()}
+
+ self.sign_request = getattr(self.domain.layer1, 'sign_request', False)
+
+ if self.sign_request:
+ # Create a domain connection to send signed requests
+ layer1 = self.domain.layer1
+ self.domain_connection = CloudSearchDomainConnection(
+ host=self.endpoint,
+ aws_access_key_id=layer1.aws_access_key_id,
+ aws_secret_access_key=layer1.aws_secret_access_key,
+ region=layer1.region,
+ provider=layer1.provider
+ )
def add(self, _id, fields):
"""
@@ -164,6 +179,26 @@ class DocumentServiceConnection(object):
self._sdf = key_obj.get_contents_as_string()
+ def _commit_with_auth(self, sdf, api_version):
+ return self.domain_connection.upload_documents(sdf, 'application/json')
+
+ def _commit_without_auth(self, sdf, api_version):
+ url = "http://%s/%s/documents/batch" % (self.endpoint, api_version)
+
+ # Keep-alive is automatic in a post-1.0 requests world.
+ session = requests.Session()
+ session.proxies = self.proxy
+ adapter = requests.adapters.HTTPAdapter(
+ pool_connections=20,
+ pool_maxsize=50,
+ max_retries=5
+ )
+ session.mount('http://', adapter)
+ session.mount('https://', adapter)
+
+ resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
+ return resp
+
def commit(self):
"""
Actually send an SDF to CloudSearch for processing
@@ -184,24 +219,15 @@ class DocumentServiceConnection(object):
boto.log.error(sdf[index - 100:index + 100])
api_version = '2013-01-01'
- if self.domain:
+ if self.domain and self.domain.layer1:
api_version = self.domain.layer1.APIVersion
- url = "http://%s/%s/documents/batch" % (self.endpoint, api_version)
- # Keep-alive is automatic in a post-1.0 requests world.
- session = requests.Session()
- session.proxies = self.proxy
- adapter = requests.adapters.HTTPAdapter(
- pool_connections=20,
- pool_maxsize=50,
- max_retries=5
- )
- session.mount('http://', adapter)
- session.mount('https://', adapter)
- r = session.post(url, data=sdf,
- headers={'Content-Type': 'application/json'})
+ if self.sign_request:
+ r = self._commit_with_auth(sdf, api_version)
+ else:
+ r = self._commit_without_auth(sdf, api_version)
- return CommitResponse(r, self, sdf)
+ return CommitResponse(r, self, sdf, signed_request=self.sign_request)
class CommitResponse(object):
@@ -219,20 +245,24 @@ class CommitResponse(object):
:raises: :class:`boto.cloudsearch2.document.EncodingError`
:raises: :class:`boto.cloudsearch2.document.ContentTooLongError`
"""
- def __init__(self, response, doc_service, sdf):
+ def __init__(self, response, doc_service, sdf, signed_request=False):
self.response = response
self.doc_service = doc_service
self.sdf = sdf
+ self.signed_request = signed_request
- _body = response.content.decode('utf-8')
+ if self.signed_request:
+ self.content = response
+ else:
+ _body = response.content.decode('utf-8')
- try:
- self.content = json.loads(_body)
- except:
- boto.log.error('Error indexing documents.\nResponse Content:\n{0}'
- '\n\nSDF:\n{1}'.format(_body, self.sdf))
- raise boto.exception.BotoServerError(self.response.status_code, '',
- body=_body)
+ try:
+ self.content = json.loads(_body)
+ except:
+ boto.log.error('Error indexing documents.\nResponse Content:\n{0}'
+ '\n\nSDF:\n{1}'.format(_body, self.sdf))
+ raise boto.exception.BotoServerError(self.response.status_code, '',
+ body=_body)
self.status = self.content['status']
if self.status == 'error':
@@ -266,7 +296,10 @@ class CommitResponse(object):
if d['type'] == type_])
if response_num != commit_num:
- boto.log.debug(self.response.content)
+ if self.signed_request:
+ boto.log.debug(self.response)
+ else:
+ boto.log.debug(self.response.content)
# There will always be a commit mismatch error if there is any
# errors on cloudsearch. self.errors gets lost when this
# CommitMismatchError is raised. Whoever is using boto has no idea
diff --git a/boto/cloudsearch2/layer1.py b/boto/cloudsearch2/layer1.py
index 0c41762f..a39b08f5 100644
--- a/boto/cloudsearch2/layer1.py
+++ b/boto/cloudsearch2/layer1.py
@@ -56,7 +56,6 @@ class CloudSearchConnection(AWSQueryConnection):
"BaseException": exceptions.BaseException,
}
-
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
@@ -66,6 +65,9 @@ class CloudSearchConnection(AWSQueryConnection):
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
+ sign_request = kwargs.pop('sign_request', False)
+ self.sign_request = sign_request
+
super(CloudSearchConnection, self).__init__(**kwargs)
self.region = region
diff --git a/boto/cloudsearch2/layer2.py b/boto/cloudsearch2/layer2.py
index c4840482..28fdc74c 100644
--- a/boto/cloudsearch2/layer2.py
+++ b/boto/cloudsearch2/layer2.py
@@ -32,7 +32,7 @@ class Layer2(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
host=None, debug=0, session_token=None, region=None,
- validate_certs=True):
+ validate_certs=True, sign_request=False):
if isinstance(region, six.string_types):
import boto.cloudsearch2
@@ -52,7 +52,8 @@ class Layer2(object):
debug=debug,
security_token=session_token,
region=region,
- validate_certs=validate_certs)
+ validate_certs=validate_certs,
+ sign_request=sign_request)
def list_domains(self, domain_names=None):
"""
diff --git a/boto/cloudsearch2/search.py b/boto/cloudsearch2/search.py
index 634faf85..3db3a472 100644
--- a/boto/cloudsearch2/search.py
+++ b/boto/cloudsearch2/search.py
@@ -23,6 +23,7 @@
from math import ceil
from boto.compat import json, map, six
import requests
+from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
SIMPLE = 'simple'
STRUCTURED = 'structured'
@@ -144,6 +145,62 @@ class Query(object):
return params
+ def to_domain_connection_params(self):
+ """
+ Transform search parameters from instance properties to a dictionary
+ that CloudSearchDomainConnection can accept
+
+ :rtype: dict
+ :return: search parameters
+ """
+ params = {'start': self.start, 'size': self.real_size}
+
+ if self.q:
+ params['q'] = self.q
+
+ if self.parser:
+ params['query_parser'] = self.parser
+
+ if self.fq:
+ params['filter_query'] = self.fq
+
+ if self.expr:
+ expr = {}
+ for k, v in six.iteritems(self.expr):
+ expr['expr.%s' % k] = v
+
+ params['expr'] = expr
+
+ if self.facet:
+ facet = {}
+ for k, v in six.iteritems(self.facet):
+ if not isinstance(v, six.string_types):
+ v = json.dumps(v)
+ facet['facet.%s' % k] = v
+
+ params['facet'] = facet
+
+ if self.highlight:
+ highlight = {}
+ for k, v in six.iteritems(self.highlight):
+ highlight['highlight.%s' % k] = v
+
+ params['highlight'] = highlight
+
+ if self.options:
+ params['query_options'] = self.options
+
+ if self.return_fields:
+ params['ret'] = ','.join(self.return_fields)
+
+ if self.partial is not None:
+ params['partial'] = self.partial
+
+ if self.sort:
+ params['sort'] = ','.join(self.sort)
+
+ return params
+
class SearchConnection(object):
@@ -152,13 +209,28 @@ class SearchConnection(object):
self.endpoint = endpoint
self.session = requests.Session()
- # Copy proxy settings from connection
- if self.domain and self.domain.layer1 and self.domain.layer1.use_proxy:
- self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth()
-
+ # Endpoint needs to be set before initializing CloudSearchDomainConnection
if not endpoint:
self.endpoint = domain.search_service_endpoint
+ # Copy proxy settings from connection and check if request should be signed
+ self.sign_request = False
+ if self.domain and self.domain.layer1:
+ if self.domain.layer1.use_proxy:
+ self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth()
+
+ self.sign_request = getattr(self.domain.layer1, 'sign_request', False)
+
+ if self.sign_request:
+ layer1 = self.domain.layer1
+ self.domain_connection = CloudSearchDomainConnection(
+ host=self.endpoint,
+ aws_access_key_id=layer1.aws_access_key_id,
+ aws_secret_access_key=layer1.aws_secret_access_key,
+ region=layer1.region,
+ provider=layer1.provider
+ )
+
def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None,
size=10, start=0, facet=None, highlight=None, sort=None,
partial=None, options=None):
@@ -263,6 +335,15 @@ class SearchConnection(object):
partial=partial, options=options)
return self(query)
+ def _search_with_auth(self, params):
+ return self.domain_connection.search(params.pop("q", ""), **params)
+
+ def _search_without_auth(self, params, api_version):
+ url = "http://%s/%s/search" % (self.endpoint, api_version)
+ resp = self.session.get(url, params=params)
+
+ return {'body': resp.content.decode('utf-8'), 'status_code': resp.status_code}
+
def __call__(self, query):
"""Make a call to CloudSearch
@@ -273,26 +354,30 @@ class SearchConnection(object):
:return: search results
"""
api_version = '2013-01-01'
- if self.domain:
+ if self.domain and self.domain.layer1:
api_version = self.domain.layer1.APIVersion
- url = "http://%s/%s/search" % (self.endpoint, api_version)
- params = query.to_params()
-
- r = self.session.get(url, params=params)
- _body = r.content.decode('utf-8')
- try:
- data = json.loads(_body)
- except ValueError:
- if r.status_code == 403:
- msg = ''
- import re
- g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', _body)
- try:
- msg = ': %s' % (g.groups()[0].strip())
- except AttributeError:
- pass
- raise SearchServiceException('Authentication error from Amazon%s' % msg)
- raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query)
+
+ if self.sign_request:
+ data = self._search_with_auth(query.to_domain_connection_params())
+ else:
+ r = self._search_without_auth(query.to_params(), api_version)
+
+ _body = r['body']
+ _status_code = r['status_code']
+
+ try:
+ data = json.loads(_body)
+ except ValueError:
+ if _status_code == 403:
+ msg = ''
+ import re
+ g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', _body)
+ try:
+ msg = ': %s' % (g.groups()[0].strip())
+ except AttributeError:
+ pass
+ raise SearchServiceException('Authentication error from Amazon%s' % msg)
+ raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query)
if 'messages' in data and 'error' in data:
for m in data['messages']:
diff --git a/boto/cloudsearchdomain/__init__.py b/boto/cloudsearchdomain/__init__.py
new file mode 100644
index 00000000..1b307a0f
--- /dev/null
+++ b/boto/cloudsearchdomain/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo, get_regions
+
+
+def regions():
+ """
+ Get all available regions for the Amazon CloudSearch Domain service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
+ return get_regions('cloudsearchdomain',
+ connection_cls=CloudSearchDomainConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/cloudsearchdomain/exceptions.py b/boto/cloudsearchdomain/exceptions.py
new file mode 100644
index 00000000..0f996153
--- /dev/null
+++ b/boto/cloudsearchdomain/exceptions.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import BotoServerError
+
+
+class SearchException(BotoServerError):
+ pass
+
+
+class DocumentServiceException(BotoServerError):
+ pass
diff --git a/boto/cloudsearchdomain/layer1.py b/boto/cloudsearchdomain/layer1.py
new file mode 100644
index 00000000..7a68bbed
--- /dev/null
+++ b/boto/cloudsearchdomain/layer1.py
@@ -0,0 +1,540 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.compat import json
+from boto.exception import JSONResponseError
+from boto.connection import AWSAuthConnection
+from boto.regioninfo import RegionInfo
+from boto.cloudsearchdomain import exceptions
+
+
+class CloudSearchDomainConnection(AWSAuthConnection):
+ """
+ You use the AmazonCloudSearch2013 API to upload documents to a
+ search domain and search those documents.
+
+ The endpoints for submitting `UploadDocuments`, `Search`, and
+ `Suggest` requests are domain-specific. To get the endpoints for
+ your domain, use the Amazon CloudSearch configuration service
+ `DescribeDomains` action. The domain endpoints are also displayed
+ on the domain dashboard in the Amazon CloudSearch console. You
+ submit suggest requests to the search endpoint.
+
+ For more information, see the `Amazon CloudSearch Developer
+ Guide`_.
+ """
+ APIVersion = "2013-01-01"
+ AuthServiceName = 'cloudsearch'
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "SearchException": exceptions.SearchException,
+ "DocumentServiceException": exceptions.DocumentServiceException,
+ }
+
+ def __init__(self, **kwargs):
+ region = kwargs.get('region')
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ else:
+ del kwargs['region']
+ if kwargs.get('host', None) is None:
+ raise ValueError(
+ 'The argument, host, must be provided when creating a '
+ 'CloudSearchDomainConnection because its methods require the '
+ 'specific domain\'s endpoint in order to successfully make '
+ 'requests to that CloudSearch Domain.'
+ )
+ super(CloudSearchDomainConnection, self).__init__(**kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def search(self, query, cursor=None, expr=None, facet=None,
+ filter_query=None, highlight=None, partial=None,
+ query_options=None, query_parser=None, ret=None, size=None,
+ sort=None, start=None):
+ """
+ Retrieves a list of documents that match the specified search
+ criteria. How you specify the search criteria depends on which
+ query parser you use. Amazon CloudSearch supports four query
+ parsers:
+
+
+ + `simple`: search all `text` and `text-array` fields for the
+ specified string. Search for phrases, individual terms, and
+ prefixes.
+ + `structured`: search specific fields, construct compound
+ queries using Boolean operators, and use advanced features
+ such as term boosting and proximity searching.
+ + `lucene`: specify search criteria using the Apache Lucene
+ query parser syntax.
+ + `dismax`: specify search criteria using the simplified
+ subset of the Apache Lucene query parser syntax defined by the
+ DisMax query parser.
+
+
+ For more information, see `Searching Your Data`_ in the Amazon
+ CloudSearch Developer Guide .
+
+ The endpoint for submitting `Search` requests is domain-
+ specific. You submit search requests to a domain's search
+ endpoint. To get the search endpoint for your domain, use the
+ Amazon CloudSearch configuration service `DescribeDomains`
+ action. A domain's endpoints are also displayed on the domain
+ dashboard in the Amazon CloudSearch console.
+
+ :type cursor: string
+ :param cursor: Retrieves a cursor value you can use to page through
+ large result sets. Use the `size` parameter to control the number
+ of hits to include in each response. You can specify either the
+ `cursor` or `start` parameter in a request; they are mutually
+ exclusive. To get the first cursor, set the cursor value to
+ `initial`. In subsequent requests, specify the cursor value
+ returned in the hits section of the response.
+ For more information, see `Paginating Results`_ in the Amazon
+ CloudSearch Developer Guide .
+
+ :type expr: string
+ :param expr: Defines one or more numeric expressions that can be used
+ to sort results or specify search or filter criteria. You can also
+ specify expressions as return fields.
+ For more information about defining and using expressions, see
+ `Configuring Expressions`_ in the Amazon CloudSearch Developer
+ Guide .
+
+ :type facet: string
+ :param facet: Specifies one or more fields for which to get facet
+ information, and options that control how the facet information is
+ returned. Each specified field must be facet-enabled in the domain
+ configuration. The fields and options are specified in JSON using
+ the form `{"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTI
+ ON":VALUE,"OPTION":"STRING"}}`.
+ You can specify the following faceting options:
+
+
+ + `buckets` specifies an array of the facet values or ranges to count.
+ Ranges are specified using the same syntax that you use to search
+ for a range of values. For more information, see ` Searching for a
+ Range of Values`_ in the Amazon CloudSearch Developer Guide .
+ Buckets are returned in the order they are specified in the
+ request. The `sort` and `size` options are not valid if you specify
+ `buckets`.
+ + `size` specifies the maximum number of facets to include in the
+ results. By default, Amazon CloudSearch returns counts for the top
+ 10. The `size` parameter is only valid when you specify the `sort`
+ option; it cannot be used in conjunction with `buckets`.
+ + `sort` specifies how you want to sort the facets in the results:
+ `bucket` or `count`. Specify `bucket` to sort alphabetically or
+ numerically by facet value (in ascending order). Specify `count` to
+ sort by the facet counts computed for each facet value (in
+ descending order). To retrieve facet counts for particular values
+ or ranges of values, use the `buckets` option instead of `sort`.
+
+
+ If no facet options are specified, facet counts are computed for all
+ field values, the facets are sorted by facet count, and the top 10
+ facets are returned in the results.
+
+ For more information, see `Getting and Using Facet Information`_ in the
+ Amazon CloudSearch Developer Guide .
+
+ :type filter_query: string
+ :param filter_query: Specifies a structured query that filters the
+ results of a search without affecting how the results are scored
+ and sorted. You use `filterQuery` in conjunction with the `query`
+ parameter to filter the documents that match the constraints
+ specified in the `query` parameter. Specifying a filter controls
+ only which matching documents are included in the results, it has
+ no effect on how they are scored and sorted. The `filterQuery`
+ parameter supports the full structured query syntax.
+ For more information about using filters, see `Filtering Matching
+ Documents`_ in the Amazon CloudSearch Developer Guide .
+
+ :type highlight: string
+ :param highlight: Retrieves highlights for matches in the specified
+ `text` or `text-array` fields. Each specified field must be
+ highlight enabled in the domain configuration. The fields and
+ options are specified in JSON using the form `{"FIELD":{"OPTION":VA
+ LUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}`.
+ You can specify the following highlight options:
+
+
+ + `format`: specifies the format of the data in the text field: `text`
+ or `html`. When data is returned as HTML, all non-alphanumeric
+ characters are encoded. The default is `html`.
+ + `max_phrases`: specifies the maximum number of occurrences of the
+ search term(s) you want to highlight. By default, the first
+ occurrence is highlighted.
+ + `pre_tag`: specifies the string to prepend to an occurrence of a
+ search term. The default for HTML highlights is `<em>`. The
+ default for text highlights is `*`.
+ + `post_tag`: specifies the string to append to an occurrence of a
+ search term. The default for HTML highlights is `</em>`. The
+ default for text highlights is `*`.
+
+
+ If no highlight options are specified for a field, the returned field
+ text is treated as HTML and the first match is highlighted with
+ emphasis tags: `<em>search-term</em>`.
+
+ :type partial: boolean
+ :param partial: Enables partial results to be returned if one or more
+ index partitions are unavailable. When your search index is
+ partitioned across multiple search instances, by default Amazon
+ CloudSearch only returns results if every partition can be queried.
+ This means that the failure of a single search instance can result
+ in 5xx (internal server) errors. When you enable partial results,
+ Amazon CloudSearch returns whatever results are available and
+ includes the percentage of documents searched in the search results
+ (percent-searched). This enables you to more gracefully degrade
+ your users' search experience. For example, rather than displaying
+ no results, you could display the partial results and a message
+ indicating that the results might be incomplete due to a temporary
+ system outage.
+
+ :type query: string
+ :param query: Specifies the search criteria for the request. How you
+ specify the search criteria depends on the query parser used for
+ the request and the parser options specified in the `queryOptions`
+ parameter. By default, the `simple` query parser is used to process
+ requests. To use the `structured`, `lucene`, or `dismax` query
+ parser, you must also specify the `queryParser` parameter.
+ For more information about specifying search criteria, see `Searching
+ Your Data`_ in the Amazon CloudSearch Developer Guide .
+
+ :type query_options: string
+ :param query_options:
+ Configures options for the query parser specified in the `queryParser`
+ parameter.
+
+ The options you can configure vary according to which parser you use:
+
+
+ + `defaultOperator`: The default operator used to combine individual
+ terms in the search string. For example: `defaultOperator: 'or'`.
+ For the `dismax` parser, you specify a percentage that represents
+ the percentage of terms in the search string (rounded down) that
+ must match, rather than a default operator. A value of `0%` is the
+ equivalent to OR, and a value of `100%` is equivalent to AND. The
+ percentage must be specified as a value in the range 0-100 followed
+ by the percent (%) symbol. For example, `defaultOperator: 50%`.
+ Valid values: `and`, `or`, a percentage in the range 0%-100% (
+ `dismax`). Default: `and` ( `simple`, `structured`, `lucene`) or
+ `100` ( `dismax`). Valid for: `simple`, `structured`, `lucene`, and
+ `dismax`.
+ + `fields`: An array of the fields to search when no fields are
+ specified in a search. If no fields are specified in a search and
+ this option is not specified, all text and text-array fields are
+ searched. You can specify a weight for each field to control the
+ relative importance of each field when Amazon CloudSearch
+ calculates relevance scores. To specify a field weight, append a
+ caret ( `^`) symbol and the weight to the field name. For example,
+ to boost the importance of the `title` field over the `description`
+ field you could specify: `"fields":["title^5","description"]`.
+ Valid values: The name of any configured field and an optional
+ numeric value greater than zero. Default: All `text` and `text-
+ array` fields. Valid for: `simple`, `structured`, `lucene`, and
+ `dismax`.
+ + `operators`: An array of the operators or special characters you want
+ to disable for the simple query parser. If you disable the `and`,
+ `or`, or `not` operators, the corresponding operators ( `+`, `|`,
+ `-`) have no special meaning and are dropped from the search
+ string. Similarly, disabling `prefix` disables the wildcard
+ operator ( `*`) and disabling `phrase` disables the ability to
+ search for phrases by enclosing phrases in double quotes. Disabling
+ precedence disables the ability to control order of precedence
+ using parentheses. Disabling `near` disables the ability to use the
+ ~ operator to perform a sloppy phrase search. Disabling the `fuzzy`
+ operator disables the ability to use the ~ operator to perform a
+ fuzzy search. `escape` disables the ability to use a backslash (
+ `\`) to escape special characters within the search string.
+ Disabling whitespace is an advanced option that prevents the parser
+ from tokenizing on whitespace, which can be useful for Vietnamese.
+ (It prevents Vietnamese words from being split incorrectly.) For
+ example, you could disable all operators other than the phrase
+ operator to support just simple term and phrase queries:
+ `"operators":["and","not","or", "prefix"]`. Valid values: `and`,
+ `escape`, `fuzzy`, `near`, `not`, `or`, `phrase`, `precedence`,
+ `prefix`, `whitespace`. Default: All operators and special
+ characters are enabled. Valid for: `simple`.
+ + `phraseFields`: An array of the `text` or `text-array` fields you
+ want to use for phrase searches. When the terms in the search
+ string appear in close proximity within a field, the field scores
+ higher. You can specify a weight for each field to boost that
+ score. The `phraseSlop` option controls how much the matches can
+ deviate from the search string and still be boosted. To specify a
+ field weight, append a caret ( `^`) symbol and the weight to the
+ field name. For example, to boost phrase matches in the `title`
+ field over the `abstract` field, you could specify:
+ `"phraseFields":["title^3", "plot"]` Valid values: The name of any
+ `text` or `text-array` field and an optional numeric value greater
+ than zero. Default: No fields. If you don't specify any fields with
+ `phraseFields`, proximity scoring is disabled even if `phraseSlop`
+ is specified. Valid for: `dismax`.
+ + `phraseSlop`: An integer value that specifies how much matches can
+ deviate from the search phrase and still be boosted according to
+ the weights specified in the `phraseFields` option; for example,
+ `phraseSlop: 2`. You must also specify `phraseFields` to enable
+ proximity scoring. Valid values: positive integers. Default: 0.
+ Valid for: `dismax`.
+ + `explicitPhraseSlop`: An integer value that specifies how much a
+ match can deviate from the search phrase when the phrase is
+ enclosed in double quotes in the search string. (Phrases that
+ exceed this proximity distance are not considered a match.) For
+ example, to specify a slop of three for dismax phrase queries, you
+ would specify `"explicitPhraseSlop":3`. Valid values: positive
+ integers. Default: 0. Valid for: `dismax`.
+ + `tieBreaker`: When a term in the search string is found in a
+ document's field, a score is calculated for that field based on how
+ common the word is in that field compared to other documents. If
+ the term occurs in multiple fields within a document, by default
+ only the highest scoring field contributes to the document's
+ overall score. You can specify a `tieBreaker` value to enable the
+ matches in lower-scoring fields to contribute to the document's
+ score. That way, if two documents have the same max field score for
+ a particular term, the score for the document that has matches in
+ more fields will be higher. The formula for calculating the score
+ with a tieBreaker is `(max field score) + (tieBreaker) * (sum of
+ the scores for the rest of the matching fields)`. Set `tieBreaker`
+ to 0 to disregard all but the highest scoring field (pure max):
+ `"tieBreaker":0`. Set to 1 to sum the scores from all fields (pure
+ sum): `"tieBreaker":1`. Valid values: 0.0 to 1.0. Default: 0.0.
+ Valid for: `dismax`.
+
+ :type query_parser: string
+ :param query_parser:
+ Specifies which query parser to use to process the request. If
+ `queryParser` is not specified, Amazon CloudSearch uses the
+ `simple` query parser.
+
+ Amazon CloudSearch supports four query parsers:
+
+
+ + `simple`: perform simple searches of `text` and `text-array` fields.
+ By default, the `simple` query parser searches all `text` and
+ `text-array` fields. You can specify which fields to search by with
+ the `queryOptions` parameter. If you prefix a search term with a
+ plus sign (+) documents must contain the term to be considered a
+ match. (This is the default, unless you configure the default
+ operator with the `queryOptions` parameter.) You can use the `-`
+ (NOT), `|` (OR), and `*` (wildcard) operators to exclude particular
+ terms, find results that match any of the specified terms, or
+ search for a prefix. To search for a phrase rather than individual
+ terms, enclose the phrase in double quotes. For more information,
+ see `Searching for Text`_ in the Amazon CloudSearch Developer Guide
+ .
+ + `structured`: perform advanced searches by combining multiple
+ expressions to define the search criteria. You can also search
+ within particular fields, search for values and ranges of values,
+ and use advanced options such as term boosting, `matchall`, and
+ `near`. For more information, see `Constructing Compound Queries`_
+ in the Amazon CloudSearch Developer Guide .
+ + `lucene`: search using the Apache Lucene query parser syntax. For
+ more information, see `Apache Lucene Query Parser Syntax`_.
+ + `dismax`: search using the simplified subset of the Apache Lucene
+ query parser syntax defined by the DisMax query parser. For more
+ information, see `DisMax Query Parser Syntax`_.
+
+ :type ret: string
+ :param ret: Specifies the field and expression values to include in
+ the response. Multiple fields or expressions are specified as a
+ comma-separated list. By default, a search response includes all
+ return enabled fields ( `_all_fields`). To return only the document
+ IDs for the matching documents, specify `_no_fields`. To retrieve
+ the relevance score calculated for each document, specify `_score`.
+
+ :type size: long
+ :param size: Specifies the maximum number of search hits to include in
+ the response.
+
+ :type sort: string
+ :param sort: Specifies the fields or custom expressions to use to sort
+ the search results. Multiple fields or expressions are specified as
+ a comma-separated list. You must specify the sort direction ( `asc`
+ or `desc`) for each field; for example, `year desc,title asc`. To
+ use a field to sort results, the field must be sort-enabled in the
+ domain configuration. Array type fields cannot be used for sorting.
+ If no `sort` parameter is specified, results are sorted by their
+ default relevance scores in descending order: `_score desc`. You
+ can also sort by document ID ( `_id asc`) and version ( `_version
+ desc`).
+ For more information, see `Sorting Results`_ in the Amazon CloudSearch
+ Developer Guide .
+
+ :type start: long
+ :param start: Specifies the offset of the first search hit you want to
+ return. Note that the result set is zero-based; the first result is
+ at index 0. You can specify either the `start` or `cursor`
+ parameter in a request, they are mutually exclusive.
+ For more information, see `Paginating Results`_ in the Amazon
+ CloudSearch Developer Guide .
+
+ """
+ uri = '/2013-01-01/search'
+ params = {}
+ headers = {}
+ query_params = {}
+ if cursor is not None:
+ query_params['cursor'] = cursor
+ if expr is not None:
+ query_params['expr'] = expr
+ if facet is not None:
+ query_params['facet'] = facet
+ if filter_query is not None:
+ query_params['fq'] = filter_query
+ if highlight is not None:
+ query_params['highlight'] = highlight
+ if partial is not None:
+ query_params['partial'] = partial
+ if query is not None:
+ query_params['q'] = query
+ if query_options is not None:
+ query_params['q.options'] = query_options
+ if query_parser is not None:
+ query_params['q.parser'] = query_parser
+ if ret is not None:
+ query_params['return'] = ret
+ if size is not None:
+ query_params['size'] = size
+ if sort is not None:
+ query_params['sort'] = sort
+ if start is not None:
+ query_params['start'] = start
+ return self.make_request('POST', uri, expected_status=200,
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def suggest(self, query, suggester, size=None):
+ """
+ Retrieves autocomplete suggestions for a partial query string.
+ You can use suggestions enable you to display likely matches
+ before users finish typing. In Amazon CloudSearch, suggestions
+ are based on the contents of a particular text field. When you
+ request suggestions, Amazon CloudSearch finds all of the
+ documents whose values in the suggester field start with the
+ specified query string. The beginning of the field must match
+ the query string to be considered a match.
+
+ For more information about configuring suggesters and
+ retrieving suggestions, see `Getting Suggestions`_ in the
+ Amazon CloudSearch Developer Guide .
+
+ The endpoint for submitting `Suggest` requests is domain-
+ specific. You submit suggest requests to a domain's search
+ endpoint. To get the search endpoint for your domain, use the
+ Amazon CloudSearch configuration service `DescribeDomains`
+ action. A domain's endpoints are also displayed on the domain
+ dashboard in the Amazon CloudSearch console.
+
+ :type query: string
+ :param query: Specifies the string for which you want to get
+ suggestions.
+
+ :type suggester: string
+ :param suggester: Specifies the name of the suggester to use to find
+ suggested matches.
+
+ :type size: long
+ :param size: Specifies the maximum number of suggestions to return.
+
+ """
+ uri = '/2013-01-01/suggest'
+ params = {}
+ headers = {}
+ query_params = {}
+ if query is not None:
+ query_params['q'] = query
+ if suggester is not None:
+ query_params['suggester'] = suggester
+ if size is not None:
+ query_params['size'] = size
+ return self.make_request('GET', uri, expected_status=200,
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def upload_documents(self, documents, content_type):
+ """
+ Posts a batch of documents to a search domain for indexing. A
+ document batch is a collection of add and delete operations
+ that represent the documents you want to add, update, or
+ delete from your domain. Batches can be described in either
+ JSON or XML. Each item that you want Amazon CloudSearch to
+ return as a search result (such as a product) is represented
+ as a document. Every document has a unique ID and one or more
+ fields that contain the data that you want to search and
+ return in results. Individual documents cannot contain more
+ than 1 MB of data. The entire batch cannot exceed 5 MB. To get
+ the best possible upload performance, group add and delete
+ operations in batches that are close the 5 MB limit.
+ Submitting a large volume of single-document batches can
+ overload a domain's document service.
+
+ The endpoint for submitting `UploadDocuments` requests is
+ domain-specific. To get the document endpoint for your domain,
+ use the Amazon CloudSearch configuration service
+ `DescribeDomains` action. A domain's endpoints are also
+ displayed on the domain dashboard in the Amazon CloudSearch
+ console.
+
+ For more information about formatting your data for Amazon
+ CloudSearch, see `Preparing Your Data`_ in the Amazon
+ CloudSearch Developer Guide . For more information about
+ uploading data for indexing, see `Uploading Data`_ in the
+ Amazon CloudSearch Developer Guide .
+
+ :type documents: blob
+ :param documents: A batch of documents formatted in JSON or HTML.
+
+ :type content_type: string
+ :param content_type:
+ The format of the batch you are uploading. Amazon CloudSearch supports
+ two document batch formats:
+
+
+ + application/json
+ + application/xml
+
+ """
+ uri = '/2013-01-01/documents/batch'
+ headers = {}
+ query_params = {}
+ if content_type is not None:
+ headers['Content-Type'] = content_type
+ return self.make_request('POST', uri, expected_status=200,
+ data=documents, headers=headers,
+ params=query_params)
+
+ def make_request(self, verb, resource, headers=None, data='',
+ expected_status=None, params=None):
+ if headers is None:
+ headers = {}
+ response = AWSAuthConnection.make_request(
+ self, verb, resource, headers=headers, data=data, params=params)
+ body = json.loads(response.read().decode('utf-8'))
+ if response.status == expected_status:
+ return body
+ else:
+ raise JSONResponseError(response.status, response.reason, body)
diff --git a/boto/connection.py b/boto/connection.py
index 40db69a7..ae948096 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -903,7 +903,7 @@ class AWSAuthConnection(object):
boto.log.debug('Params: %s' % request.params)
response = None
body = None
- e = None
+ ex = None
if override_num_retries is None:
num_retries = config.getint('Boto', 'num_retries', self.num_retries)
else:
@@ -1002,6 +1002,7 @@ class AWSAuthConnection(object):
connection = self.new_http_connection(request.host, request.port,
self.is_secure)
response = e.response
+ ex = e
except self.http_exceptions as e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
@@ -1013,6 +1014,7 @@ class AWSAuthConnection(object):
e.__class__.__name__)
connection = self.new_http_connection(request.host, request.port,
self.is_secure)
+ ex = e
time.sleep(next_sleep)
i += 1
# If we made it here, it's because we have exhausted our retries
@@ -1023,8 +1025,8 @@ class AWSAuthConnection(object):
self.request_hook.handle_request_data(request, response, error=True)
if response:
raise BotoServerError(response.status, response.reason, body)
- elif e:
- raise
+ elif ex:
+ raise ex
else:
msg = 'Please report this exception as a Boto Issue!'
raise BotoClientError(msg)
@@ -1084,7 +1086,7 @@ class AWSQueryConnection(AWSAuthConnection):
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host=None, debug=0,
https_connection_factory=None, path='/', security_token=None,
- validate_certs=True, profile_name=None):
+ validate_certs=True, profile_name=None, provider='aws'):
super(AWSQueryConnection, self).__init__(
host, aws_access_key_id,
aws_secret_access_key,
@@ -1093,7 +1095,8 @@ class AWSQueryConnection(AWSAuthConnection):
debug, https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs,
- profile_name=profile_name)
+ profile_name=profile_name,
+ provider=provider)
def _required_auth_capability(self):
return []
diff --git a/boto/dynamodb2/items.py b/boto/dynamodb2/items.py
index 463feee7..b1d72139 100644
--- a/boto/dynamodb2/items.py
+++ b/boto/dynamodb2/items.py
@@ -35,7 +35,8 @@ class Item(object):
being table-level. It's also for persisting schema around many objects.
Optionally accepts a ``data`` parameter, which should be a dictionary
- of the fields & values of the item.
+ of the fields & values of the item. Alternatively, an ``Item`` instance
+ may be provided from which to extract the data.
Optionally accepts a ``loaded`` parameter, which should be a boolean.
``True`` if it was preexisting data loaded from DynamoDB, ``False`` if
@@ -71,6 +72,8 @@ class Item(object):
self._data = data
self._dynamizer = Dynamizer()
+ if isinstance(self._data, Item):
+ self._data = self._data._data
if self._data is None:
self._data = {}
diff --git a/boto/dynamodb2/results.py b/boto/dynamodb2/results.py
index 3d80ecf3..cba35ea1 100644
--- a/boto/dynamodb2/results.py
+++ b/boto/dynamodb2/results.py
@@ -183,7 +183,6 @@ class BatchGetResultSet(ResultSet):
results = self.the_callable(*args, **kwargs)
if not len(results.get('results', [])):
- self._results_left = False
return
self._results.extend(results['results'])
diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py
index 052758f7..cddd296d 100644
--- a/boto/dynamodb2/table.py
+++ b/boto/dynamodb2/table.py
@@ -1003,7 +1003,7 @@ class Table(object):
def query_count(self, index=None, consistent=False, conditional_operator=None,
query_filter=None, scan_index_forward=True, limit=None,
- **filter_kwargs):
+ exclusive_start_key=None, **filter_kwargs):
"""
Queries the exact count of matching items in a DynamoDB table.
@@ -1034,6 +1034,9 @@ class Table(object):
+ `AND` - True if all filter conditions evaluate to true (default)
+ `OR` - True if at least one filter condition evaluates to true
+ Optionally accept a ``exclusive_start_key`` which is used to get
+ the remaining items when a query cannot return the complete count.
+
Returns an integer which represents the exact amount of matched
items.
@@ -1079,18 +1082,29 @@ class Table(object):
using=FILTER_OPERATORS
)
- raw_results = self.connection.query(
- self.table_name,
- index_name=index,
- consistent_read=consistent,
- select='COUNT',
- key_conditions=key_conditions,
- query_filter=built_query_filter,
- conditional_operator=conditional_operator,
- limit=limit,
- scan_index_forward=scan_index_forward,
- )
- return int(raw_results.get('Count', 0))
+ count_buffer = 0
+ last_evaluated_key = exclusive_start_key
+
+ while True:
+ raw_results = self.connection.query(
+ self.table_name,
+ index_name=index,
+ consistent_read=consistent,
+ select='COUNT',
+ key_conditions=key_conditions,
+ query_filter=built_query_filter,
+ conditional_operator=conditional_operator,
+ limit=limit,
+ scan_index_forward=scan_index_forward,
+ exclusive_start_key=last_evaluated_key
+ )
+
+ count_buffer += int(raw_results.get('Count', 0))
+ last_evaluated_key = raw_results.get('LastEvaluatedKey')
+ if not last_evaluated_key or count_buffer < 1:
+ break
+
+ return count_buffer
def _query(self, limit=None, index=None, reverse=False, consistent=False,
exclusive_start_key=None, select=None, attributes_to_get=None,
diff --git a/boto/ec2/autoscale/launchconfig.py b/boto/ec2/autoscale/launchconfig.py
index 889cee21..6a94f7db 100644
--- a/boto/ec2/autoscale/launchconfig.py
+++ b/boto/ec2/autoscale/launchconfig.py
@@ -159,6 +159,22 @@ class LaunchConfiguration(object):
:type associate_public_ip_address: bool
:param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
+
+ :type volume_type: str
+ :param volume_type: The type of the volume.
+ Valid values are: standard | io1 | gp2.
+
+ :type delete_on_termination: bool
+ :param delete_on_termination: Whether the device will be deleted
+ when the instance is terminated.
+
+ :type iops: int
+ :param iops: The provisioned IOPs you want to associate with this volume.
+
+ :type use_block_device_types: bool
+ :param use_block_device_types: Specifies whether to return
+ described Launch Configs with block device mappings containing.
+
"""
self.connection = connection
self.name = name
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index f301ae5d..b2d7d45c 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -72,7 +72,7 @@ from boto.compat import six
class EC2Connection(AWSQueryConnection):
- APIVersion = boto.config.get('Boto', 'ec2_version', '2014-05-01')
+ APIVersion = boto.config.get('Boto', 'ec2_version', '2014-10-01')
DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint',
'ec2.us-east-1.amazonaws.com')
@@ -104,9 +104,8 @@ class EC2Connection(AWSQueryConnection):
if api_version:
self.APIVersion = api_version
- @detect_potential_sigv4
def _required_auth_capability(self):
- return ['ec2']
+ return ['hmac-v4']
def get_params(self):
"""
@@ -4459,3 +4458,47 @@ class EC2Connection(AWSQueryConnection):
if dry_run:
params['DryRun'] = 'true'
return self.get_status('ModifyVpcAttribute', params, verb='POST')
+
+ def get_all_classic_link_instances(self, instance_ids=None, filters=None,
+ dry_run=False, max_results=None,
+ next_token=None):
+ """
+ Get all of your linked EC2-Classic instances. This request only
+ returns information about EC2-Classic instances linked to
+ a VPC through ClassicLink
+
+ :type instance_ids: list
+ :param instance_ids: A list of strings of instance IDs. Must be
+ instances linked to a VPC through ClassicLink.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit the
+ results returned. Filters are provided in the form of a
+ dictionary consisting of filter names as the key and
+ filter values as the value. The set of allowable filter
+ names/values is dependent on the request being performed.
+ Check the EC2 API guide for details.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :type max_results: int
+ :param max_results: The maximum number of paginated instance
+ items per response.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.instance.Instance`
+ """
+ params = {}
+ if instance_ids:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ if filters:
+ self.build_filter_params(params, filters)
+ if dry_run:
+ params['DryRun'] = 'true'
+ if max_results is not None:
+ params['MaxResults'] = max_results
+ if next_token:
+ params['NextToken'] = next_token
+ return self.get_list('DescribeClassicLinkInstances', params,
+ [('item', Instance)], verb='POST')
diff --git a/boto/ec2/elb/__init__.py b/boto/ec2/elb/__init__.py
index 6123909c..a1d8f185 100644
--- a/boto/ec2/elb/__init__.py
+++ b/boto/ec2/elb/__init__.py
@@ -98,7 +98,7 @@ class ELBConnection(AWSQueryConnection):
profile_name=profile_name)
def _required_auth_capability(self):
- return ['ec2']
+ return ['hmac-v4']
def build_list_params(self, params, items, label):
if isinstance(items, six.string_types):
diff --git a/boto/ec2/reservedinstance.py b/boto/ec2/reservedinstance.py
index 02d15276..5ccc008e 100644
--- a/boto/ec2/reservedinstance.py
+++ b/boto/ec2/reservedinstance.py
@@ -134,6 +134,7 @@ class ReservedInstance(ReservedInstancesOffering):
self.instance_count = instance_count
self.state = state
self.start = None
+ self.end = None
def __repr__(self):
return 'ReservedInstance:%s' % self.id
@@ -147,6 +148,8 @@ class ReservedInstance(ReservedInstancesOffering):
self.state = value
elif name == 'start':
self.start = value
+ elif name == 'end':
+ self.end = value
else:
super(ReservedInstance, self).endElement(name, value, connection)
diff --git a/boto/ec2/snapshot.py b/boto/ec2/snapshot.py
index 8a84cdee..eaf7164c 100644
--- a/boto/ec2/snapshot.py
+++ b/boto/ec2/snapshot.py
@@ -141,7 +141,7 @@ class Snapshot(TaggedEC2Object):
:type volume_type: string
:param volume_type: The type of the volume. (optional). Valid
- values are: standard | io1.
+ values are: standard | io1 | gp2.
:type iops: int
:param iops: The provisioned IOPs you want to associate with
diff --git a/boto/emr/emrobject.py b/boto/emr/emrobject.py
index f605834c..73f7060b 100644
--- a/boto/emr/emrobject.py
+++ b/boto/emr/emrobject.py
@@ -64,6 +64,10 @@ class StepId(Arg):
pass
+class SupportedProduct(Arg):
+ pass
+
+
class JobFlowStepList(EmrObject):
def __ini__(self, connection=None):
self.connection = connection
@@ -190,6 +194,9 @@ class JobFlow(EmrObject):
elif name == 'BootstrapActions':
self.bootstrapactions = ResultSet([('member', BootstrapAction)])
return self.bootstrapactions
+ elif name == 'SupportedProducts':
+ self.supported_products = ResultSet([('member', SupportedProduct)])
+ return self.supported_products
else:
return None
@@ -201,6 +208,11 @@ class ClusterTimeline(EmrObject):
'EndDateTime'
])
+class ClusterStateChangeReason(EmrObject):
+ Fields = set([
+ 'Code',
+ 'Message'
+ ])
class ClusterStatus(EmrObject):
Fields = set([
@@ -217,6 +229,9 @@ class ClusterStatus(EmrObject):
if name == 'Timeline':
self.timeline = ClusterTimeline()
return self.timeline
+ elif name == 'StateChangeReason':
+ self.statechangereason = ClusterStateChangeReason()
+ return self.statechangereason
else:
return None
@@ -248,7 +263,10 @@ class Cluster(EmrObject):
'RunningAmiVersion',
'AutoTerminate',
'TerminationProtected',
- 'VisibleToAllUsers'
+ 'VisibleToAllUsers',
+ 'MasterPublicDnsName',
+ 'NormalizedInstanceHours',
+ 'ServiceRole'
])
def __init__(self, connection=None):
@@ -275,12 +293,24 @@ class Cluster(EmrObject):
return None
-class ClusterSummary(Cluster):
+class ClusterSummary(EmrObject):
Fields = set([
'Id',
- 'Name'
+ 'Name',
+ 'NormalizedInstanceHours'
])
+ def __init__(self, connection):
+ self.connection = connection
+ self.status = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Status':
+ self.status = ClusterStatus()
+ return self.status
+ else:
+ return None
+
class ClusterSummaryList(EmrObject):
Fields = set([
diff --git a/boto/endpoints.json b/boto/endpoints.json
index 14673142..d9b4f6bb 100644
--- a/boto/endpoints.json
+++ b/boto/endpoints.json
@@ -47,6 +47,17 @@
"us-west-2": "cloudsearch.us-west-2.amazonaws.com",
"eu-central-1": "cloudsearch.eu-central-1.amazonaws.com"
},
+ "cloudsearchdomain": {
+ "ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com",
+ "ap-southeast-2": "cloudsearch.ap-southeast-2.amazonaws.com",
+ "ap-northeast-1": "cloudsearch.ap-northeast-1.amazonaws.com",
+ "sa-east-1": "cloudsearch.sa-east-1.amazonaws.com",
+ "eu-west-1": "cloudsearch.eu-west-1.amazonaws.com",
+ "us-east-1": "cloudsearch.us-east-1.amazonaws.com",
+ "us-west-1": "cloudsearch.us-west-1.amazonaws.com",
+ "us-west-2": "cloudsearch.us-west-2.amazonaws.com",
+ "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com"
+ },
"cloudtrail": {
"ap-northeast-1": "cloudtrail.ap-northeast-1.amazonaws.com",
"ap-southeast-1": "cloudtrail.ap-southeast-1.amazonaws.com",
@@ -169,7 +180,7 @@
"us-gov-west-1": "us-gov-west-1.elasticmapreduce.amazonaws.com",
"us-west-1": "us-west-1.elasticmapreduce.amazonaws.com",
"us-west-2": "us-west-2.elasticmapreduce.amazonaws.com",
- "eu-central-1": "eu-central-1.elasticmapreduce.amazonaws.com"
+ "eu-central-1": "elasticmapreduce.eu-central-1.amazonaws.com"
},
"elastictranscoder": {
"ap-northeast-1": "elastictranscoder.ap-northeast-1.amazonaws.com",
@@ -313,7 +324,7 @@
"ap-northeast-1": "ap-northeast-1.queue.amazonaws.com",
"ap-southeast-1": "ap-southeast-1.queue.amazonaws.com",
"ap-southeast-2": "ap-southeast-2.queue.amazonaws.com",
- "cn-north-1": "sqs.cn-north-1.amazonaws.com.cn",
+ "cn-north-1": "cn-north-1.queue.amazonaws.com.cn",
"eu-west-1": "eu-west-1.queue.amazonaws.com",
"sa-east-1": "sa-east-1.queue.amazonaws.com",
"us-east-1": "queue.amazonaws.com",
diff --git a/boto/glacier/response.py b/boto/glacier/response.py
index a67ec61d..c7a2612c 100644
--- a/boto/glacier/response.py
+++ b/boto/glacier/response.py
@@ -36,9 +36,10 @@ class GlacierResponse(dict):
if response_headers:
for header_name, item_name in response_headers:
self[item_name] = http_response.getheader(header_name)
- if http_response.getheader('Content-Type') == 'application/json':
- body = json.loads(http_response.read().decode('utf-8'))
- self.update(body)
+ if http_response.status != 204:
+ if http_response.getheader('Content-Type') == 'application/json':
+ body = json.loads(http_response.read().decode('utf-8'))
+ self.update(body)
size = http_response.getheader('Content-Length', None)
if size is not None:
self.size = size
diff --git a/boto/iam/connection.py b/boto/iam/connection.py
index 8590971d..392d3f35 100644
--- a/boto/iam/connection.py
+++ b/boto/iam/connection.py
@@ -1544,3 +1544,99 @@ class IAMConnection(AWSQueryConnection):
'VirtualMFADeviceName': device_name
}
return self.get_response('CreateVirtualMFADevice', params)
+
+ #
+ # IAM password policy
+ #
+
+ def get_account_password_policy(self):
+ """
+ Returns the password policy for the AWS account.
+ """
+ params = {}
+ return self.get_response('GetAccountPasswordPolicy', params)
+
+ def delete_account_password_policy(self):
+ """
+ Delete the password policy currently set for the AWS account.
+ """
+ params = {}
+ return self.get_response('DeleteAccountPasswordPolicy', params)
+
+ def update_account_password_policy(self, allow_users_to_change_password=None,
+ hard_expiry=None, max_password_age=None ,
+ minimum_password_length=None ,
+ password_reuse_prevention=None,
+ require_lowercase_characters=None,
+ require_numbers=None, require_symbols=None ,
+ require_uppercase_characters=None):
+ """
+ Update the password policy for the AWS account.
+
+ Notes: unset parameters will be reset to Amazon default settings!
+ Most of the password policy settings are enforced the next time your users
+ change their passwords. When you set minimum length and character type
+ requirements, they are enforced the next time your users change their
+ passwords - users are not forced to change their existing passwords, even
+ if the pre-existing passwords do not adhere to the updated password
+ policy. When you set a password expiration period, the expiration period
+ is enforced immediately.
+
+ :type allow_users_to_change_password: bool
+ :param allow_users_to_change_password: Allows all IAM users in your account
+ to use the AWS Management Console to change their own passwords.
+
+ :type hard_expiry: bool
+ :param hard_expiry: Prevents IAM users from setting a new password after
+ their password has expired.
+
+ :type max_password_age: int
+ :param max_password_age: The number of days that an IAM user password is valid.
+
+ :type minimum_password_length: int
+ :param minimum_password_length: The minimum number of characters allowed in
+ an IAM user password.
+
+ :type password_reuse_prevention: int
+ :param password_reuse_prevention: Specifies the number of previous passwords
+ that IAM users are prevented from reusing.
+
+ :type require_lowercase_characters: bool
+ :param require_lowercase_characters: Specifies whether IAM user passwords
+ must contain at least one lowercase character from the ISO basic Latin
+ alphabet (``a`` to ``z``).
+
+ :type require_numbers: bool
+ :param require_numbers: Specifies whether IAM user passwords must contain at
+ least one numeric character (``0`` to ``9``).
+
+ :type require_symbols: bool
+ :param require_symbols: Specifies whether IAM user passwords must contain at
+ least one of the following non-alphanumeric characters:
+ ``! @ # $ % ^ & * ( ) _ + - = [ ] { } | '``
+
+ :type require_uppercase_characters: bool
+ :param require_uppercase_characters: Specifies whether IAM user passwords
+ must contain at least one uppercase character from the ISO basic Latin
+ alphabet (``A`` to ``Z``).
+ """
+ params = {}
+ if allow_users_to_change_password is not None and type(allow_users_to_change_password) is bool:
+ params['AllowUsersToChangePassword'] = str(allow_users_to_change_password).lower()
+ if hard_expiry is not None and type(allow_users_to_change_password) is bool:
+ params['HardExpiry'] = str(hard_expiry).lower()
+ if max_password_age is not None:
+ params['MaxPasswordAge'] = max_password_age
+ if minimum_password_length is not None:
+ params['MinimumPasswordLength'] = minimum_password_length
+ if password_reuse_prevention is not None:
+ params['PasswordReusePrevention'] = password_reuse_prevention
+ if require_lowercase_characters is not None and type(allow_users_to_change_password) is bool:
+ params['RequireLowercaseCharacters'] = str(require_lowercase_characters).lower()
+ if require_numbers is not None and type(allow_users_to_change_password) is bool:
+ params['RequireNumbers'] = str(require_numbers).lower()
+ if require_symbols is not None and type(allow_users_to_change_password) is bool:
+ params['RequireSymbols'] = str(require_symbols).lower()
+ if require_uppercase_characters is not None and type(allow_users_to_change_password) is bool:
+ params['RequireUppercaseCharacters'] = str(require_uppercase_characters).lower()
+ return self.get_response('UpdateAccountPasswordPolicy', params)
diff --git a/boto/mws/connection.py b/boto/mws/connection.py
index b372ffb4..687fae74 100644
--- a/boto/mws/connection.py
+++ b/boto/mws/connection.py
@@ -292,7 +292,7 @@ class MWSConnection(AWSQueryConnection):
return path
splat = path.split('/')
splat[-2] += '_Sandbox'
- return splat.join('/')
+ return '/'.join(splat)
def _required_auth_capability(self):
return ['mws']
diff --git a/boto/provider.py b/boto/provider.py
index 0da2f78a..349a7a6c 100644
--- a/boto/provider.py
+++ b/boto/provider.py
@@ -67,6 +67,7 @@ STORAGE_CREATE_ERROR = 'StorageCreateError'
STORAGE_DATA_ERROR = 'StorageDataError'
STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError'
STORAGE_RESPONSE_ERROR = 'StorageResponseError'
+NO_CREDENTIALS_PROVIDED = object()
class ProfileNotFoundError(ValueError):
diff --git a/boto/route53/connection.py b/boto/route53/connection.py
index c13ab2e0..23e05ea5 100644
--- a/boto/route53/connection.py
+++ b/boto/route53/connection.py
@@ -47,6 +47,19 @@ HZXML = """<?xml version="1.0" encoding="UTF-8"?>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
+HZPXML = """<?xml version="1.0" encoding="UTF-8"?>
+<CreateHostedZoneRequest xmlns="%(xmlns)s">
+ <Name>%(name)s</Name>
+ <VPC>
+ <VPCId>%(vpc_id)s</VPCId>
+ <VPCRegion>%(vpc_region)s</VPCRegion>
+ </VPC>
+ <CallerReference>%(caller_ref)s</CallerReference>
+ <HostedZoneConfig>
+ <Comment>%(comment)s</Comment>
+ </HostedZoneConfig>
+</CreateHostedZoneRequest>"""
+
# boto.set_stream_logger('dns')
@@ -162,7 +175,8 @@ class Route53Connection(AWSAuthConnection):
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
- def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
+ def create_hosted_zone(self, domain_name, caller_ref=None, comment='',
+ private_zone=False, vpc_id=None, vpc_region=None):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
@@ -189,14 +203,34 @@ class Route53Connection(AWSAuthConnection):
:param comment: Any comments you want to include about the hosted
zone.
+ :type private_zone: bool
+ :param private_zone: Set True if creating a private hosted zone.
+
+ :type vpc_id: str
+ :param vpc_id: When creating a private hosted zone, the VPC Id to
+ associate to is required.
+
+ :type vpc_region: str
+ :param vpc_id: When creating a private hosted zone, the region of
+ the associated VPC is required.
+
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
- params = {'name': domain_name,
- 'caller_ref': caller_ref,
- 'comment': comment,
- 'xmlns': self.XMLNameSpace}
- xml_body = HZXML % params
+ if private_zone:
+ params = {'name': domain_name,
+ 'caller_ref': caller_ref,
+ 'comment': comment,
+ 'vpc_id': vpc_id,
+ 'vpc_region': vpc_region,
+ 'xmlns': self.XMLNameSpace}
+ xml_body = HZPXML % params
+ else:
+ params = {'name': domain_name,
+ 'caller_ref': caller_ref,
+ 'comment': comment,
+ 'xmlns': self.XMLNameSpace}
+ xml_body = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'}, xml_body)
@@ -301,7 +335,25 @@ class Route53Connection(AWSAuthConnection):
raise exception.DNSServerError(response.status,
response.reason,
body)
- e = boto.jsonresponse.Element(list_marker='HealthChecks', item_marker=('HealthCheck',))
+ e = boto.jsonresponse.Element(list_marker='HealthChecks',
+ item_marker=('HealthCheck',))
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+ def get_checker_ip_ranges(self):
+ """
+ Return a list of Route53 healthcheck IP ranges
+ """
+ uri = '/%s/checkeripranges' % self.Version
+ response = self.make_request('GET', uri)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element(list_marker='CheckerIpRanges', item_marker=('member',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
@@ -451,7 +503,8 @@ class Route53Connection(AWSAuthConnection):
h.parse(body)
return e
- def create_zone(self, name):
+ def create_zone(self, name, private_zone=False,
+ vpc_id=None, vpc_region=None):
"""
Create a new Hosted Zone. Returns a Zone object for the newly
created Hosted Zone.
@@ -465,8 +518,20 @@ class Route53Connection(AWSAuthConnection):
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.
+
+ :type private_zone: bool
+ :param private_zone: Set True if creating a private hosted zone.
+
+ :type vpc_id: str
+ :param vpc_id: When creating a private hosted zone, the VPC Id to
+ associate to is required.
+
+ :type vpc_region: str
+ :param vpc_id: When creating a private hosted zone, the region of
+ the associated VPC is required.
"""
- zone = self.create_hosted_zone(name)
+ zone = self.create_hosted_zone(name, private_zone=private_zone,
+ vpc_id=vpc_id, vpc_region=vpc_region)
return Zone(self, zone['CreateHostedZoneResponse']['HostedZone'])
def get_zone(self, name):
diff --git a/boto/route53/hostedzone.py b/boto/route53/hostedzone.py
index fd8420c4..93215382 100644
--- a/boto/route53/hostedzone.py
+++ b/boto/route53/hostedzone.py
@@ -26,20 +26,15 @@
class HostedZone(object):
def __init__(self, id=None, name=None, owner=None, version=None,
- caller_reference=None, config=None):
+ caller_reference=None):
self.id = id
self.name = name
self.owner = owner
self.version = version
self.caller_reference = caller_reference
- self.config = config
def startElement(self, name, attrs, connection):
- if name == 'Config':
- self.config = Config()
- return self.config
- else:
- return None
+ return None
def endElement(self, name, value, connection):
if name == 'Id':
diff --git a/boto/route53/record.py b/boto/route53/record.py
index d871e0bd..05cddce6 100644
--- a/boto/route53/record.py
+++ b/boto/route53/record.py
@@ -123,7 +123,7 @@ class ResourceRecordSets(ResultSet):
a value that determines which region this should be associated with
for the latency-based routing
- :type alias_evaluate_target_health: Boolean
+ :type alias_evaluate_target_health: bool
:param alias_evaluate_target_health: *Required for alias resource record
sets* Indicates whether this Resource Record Set should respect the
health status of any health checks associated with the ALIAS target
diff --git a/boto/s3/__init__.py b/boto/s3/__init__.py
index 67d53e3b..b1994b9d 100644
--- a/boto/s3/__init__.py
+++ b/boto/s3/__init__.py
@@ -1,5 +1,6 @@
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# Copyright (c) 2014, Steven Richards <sbrichards@mit.edu>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -59,6 +60,15 @@ def regions():
def connect_to_region(region_name, **kw_params):
for region in regions():
+ if 'host' in kw_params.keys():
+ # Make sure the host specified is not nothing
+ if kw_params['host'] not in ['', None]:
+ region.endpoint = kw_params['host']
+ del kw_params['host']
+ return region.connect(**kw_params)
+ # If it is nothing then remove it from kw_params and proceed with default
+ else:
+ del kw_params['host']
if region.name == region_name:
return region.connect(**kw_params)
return None
diff --git a/boto/s3/acl.py b/boto/s3/acl.py
index 51613883..9d73ddfe 100644
--- a/boto/s3/acl.py
+++ b/boto/s3/acl.py
@@ -53,7 +53,7 @@ class Policy(object):
def startElement(self, name, attrs, connection):
if name == 'AccessControlPolicy':
self.namespace = attrs.get('xmlns', None)
- return None
+ return None
if name == 'Owner':
self.owner = User(self)
return self.owner
@@ -75,12 +75,13 @@ class Policy(object):
if self.namespace is not None:
s = '<AccessControlPolicy xmlns="{0}">'.format(self.namespace)
else:
- s = '<AccessControlPolicy>'
+ s = '<AccessControlPolicy>'
s += self.owner.to_xml()
s += self.acl.to_xml()
s += '</AccessControlPolicy>'
return s
+
class ACL(object):
def __init__(self, policy=None):
@@ -119,6 +120,7 @@ class ACL(object):
s += '</AccessControlList>'
return s
+
class Grant(object):
NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
@@ -167,5 +169,3 @@ class Grant(object):
s += '<Permission>%s</Permission>' % self.permission
s += '</Grant>'
return s
-
-
diff --git a/boto/s3/key.py b/boto/s3/key.py
index bc490e30..194c6b6e 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -426,12 +426,13 @@ class Key(object):
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
+ bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
- return self.copy(self.bucket.name, self.name,
+ return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
- return self.copy(self.bucket.name, self.name,
+ return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
@@ -495,7 +496,8 @@ class Key(object):
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
- encrypt_key=encrypt_key)
+ encrypt_key=encrypt_key,
+ src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
@@ -1673,7 +1675,7 @@ class Key(object):
the second representing the size of the to be transmitted
object.
- :type cb: int
+ :type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
diff --git a/boto/ses/connection.py b/boto/ses/connection.py
index d9774125..244029a0 100644
--- a/boto/ses/connection.py
+++ b/boto/ses/connection.py
@@ -104,8 +104,8 @@ class SESConnection(AWSAuthConnection):
body = response.read().decode('utf-8')
if response.status == 200:
list_markers = ('VerifiedEmailAddresses', 'Identities',
- 'DkimTokens', 'VerificationAttributes',
- 'SendDataPoints')
+ 'DkimTokens', 'DkimAttributes',
+ 'VerificationAttributes', 'SendDataPoints')
item_markers = ('member', 'item', 'entry')
e = boto.jsonresponse.Element(list_marker=list_markers,
diff --git a/boto/sqs/connection.py b/boto/sqs/connection.py
index e2f59d79..0eafc1fa 100644
--- a/boto/sqs/connection.py
+++ b/boto/sqs/connection.py
@@ -111,6 +111,18 @@ class SQSConnection(AWSQueryConnection):
"""
return self.get_status('DeleteQueue', None, queue.id)
+ def purge_queue(self, queue):
+ """
+ Purge all messages in an SQS Queue.
+
+ :type queue: A Queue object
+ :param queue: The SQS queue to be purged
+
+ :rtype: bool
+ :return: True if the command succeeded, False otherwise
+ """
+ return self.get_status('PurgeQueue', None, queue.id)
+
def get_queue_attributes(self, queue, attribute='All'):
"""
Gets one or all attributes of a Queue
diff --git a/boto/sqs/queue.py b/boto/sqs/queue.py
index 162ec93c..bf3720d9 100644
--- a/boto/sqs/queue.py
+++ b/boto/sqs/queue.py
@@ -340,16 +340,15 @@ class Queue(object):
"""
return self.connection.delete_queue(self)
+ def purge(self):
+ """
+ Purge all messages in the queue.
+ """
+ return self.connection.purge_queue(self)
+
def clear(self, page_size=10, vtimeout=10):
- """Utility function to remove all messages from a queue"""
- n = 0
- l = self.get_messages(page_size, vtimeout)
- while l:
- for m in l:
- self.delete_message(m)
- n += 1
- l = self.get_messages(page_size, vtimeout)
- return n
+ """Deprecated utility function to remove all messages from a queue"""
+ return self.purge()
def count(self, page_size=10, vtimeout=10):
"""
diff --git a/boto/sts/connection.py b/boto/sts/connection.py
index e02f0f1e..8c0cf4b2 100644
--- a/boto/sts/connection.py
+++ b/boto/sts/connection.py
@@ -22,6 +22,7 @@
# IN THE SOFTWARE.
from boto.connection import AWSQueryConnection
+from boto.provider import Provider, NO_CREDENTIALS_PROVIDED
from boto.regioninfo import RegionInfo
from boto.sts.credentials import Credentials, FederationToken, AssumedRole
from boto.sts.credentials import DecodeAuthorizationMessage
@@ -71,6 +72,13 @@ class STSConnection(AWSQueryConnection):
https_connection_factory=None, region=None, path='/',
converter=None, validate_certs=True, anon=False,
security_token=None, profile_name=None):
+ """
+ :type anon: boolean
+ :param anon: If this parameter is True, the ``STSConnection`` object
+ will make anonymous requests, and it will not use AWS
+ Credentials or even search for AWS Credentials to make these
+ requests.
+ """
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
@@ -78,6 +86,15 @@ class STSConnection(AWSQueryConnection):
self.region = region
self.anon = anon
self._mutex = threading.Semaphore()
+ provider = 'aws'
+ # If an anonymous request is sent, do not try to look for credentials.
+ # So we pass in dummy values for the access key id, secret access
+ # key, and session token. It does not matter that they are
+ # not actual values because the request is anonymous.
+ if self.anon:
+ provider = Provider('aws', NO_CREDENTIALS_PROVIDED,
+ NO_CREDENTIALS_PROVIDED,
+ NO_CREDENTIALS_PROVIDED)
super(STSConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
@@ -86,11 +103,12 @@ class STSConnection(AWSQueryConnection):
https_connection_factory, path,
validate_certs=validate_certs,
security_token=security_token,
- profile_name=profile_name)
+ profile_name=profile_name,
+ provider=provider)
def _required_auth_capability(self):
if self.anon:
- return ['pure-query']
+ return ['sts-anon']
else:
return ['hmac-v4']
diff --git a/boto/vpc/__init__.py b/boto/vpc/__init__.py
index 868eabcb..2c87adfc 100644
--- a/boto/vpc/__init__.py
+++ b/boto/vpc/__init__.py
@@ -1685,3 +1685,144 @@ class VPCConnection(EC2Connection):
return self.get_object('AcceptVpcPeeringConnection', params,
VpcPeeringConnection)
+ def get_all_classic_link_vpcs(self, vpc_ids=None, filters=None,
+ dry_run=False):
+ """
+ Describes the ClassicLink status of one or more VPCs.
+
+ :type vpc_ids: list
+ :param vpc_ids: A list of strings with the desired VPC ID's
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :type filters: list of tuples or dict
+ :param filters: A list of tuples or dict containing filters. Each tuple
+ or dict item consists of a filter key and a filter value.
+
+ :rtype: list
+ :return: A list of :class:`boto.vpc.vpc.VPC`
+ """
+ params = {}
+ if vpc_ids:
+ self.build_list_params(params, vpc_ids, 'VpcId')
+ if filters:
+ self.build_filter_params(params, filters)
+ if dry_run:
+ params['DryRun'] = 'true'
+ return self.get_list('DescribeVpcClassicLink', params, [('item', VPC)],
+ verb='POST')
+
+ def attach_classic_link_vpc(self, vpc_id, instance_id, groups,
+ dry_run=False):
+ """
+ Links an EC2-Classic instance to a ClassicLink-enabled VPC through one
+ or more of the VPC's security groups. You cannot link an EC2-Classic
+ instance to more than one VPC at a time. You can only link an instance
+ that's in the running state. An instance is automatically unlinked from
+ a VPC when it's stopped. You can link it to the VPC again when you
+ restart it.
+
+ After you've linked an instance, you cannot change the VPC security
+ groups that are associated with it. To change the security groups, you
+ must first unlink the instance, and then link it again.
+
+ Linking your instance to a VPC is sometimes referred to as attaching
+ your instance.
+
+ :type vpc_id: str
+ :param vpc_id: The ID of a ClassicLink-enabled VPC.
+
+ :type intance_id: str
+ :param instance_is: The ID of a ClassicLink-enabled VPC.
+
+ :tye groups: list
+ :param groups: The ID of one or more of the VPC's security groups.
+ You cannot specify security groups from a different VPC. The
+ members of the list can be
+ :class:`boto.ec2.securitygroup.SecurityGroup` objects or
+ strings of the id's of the security groups.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VpcId': vpc_id, 'InstanceId': instance_id}
+ if dry_run:
+ params['DryRun'] = 'true'
+ l = []
+ for group in groups:
+ if hasattr(group, 'id'):
+ l.append(group.id)
+ else:
+ l.append(group)
+ self.build_list_params(params, l, 'SecurityGroupId')
+ return self.get_status('AttachClassicLinkVpc', params)
+
+ def detach_classic_link_vpc(self, vpc_id, instance_id, dry_run=False):
+ """
+ Unlinks a linked EC2-Classic instance from a VPC. After the instance
+ has been unlinked, the VPC security groups are no longer associated
+ with it. An instance is automatically unlinked from a VPC when
+ it's stopped.
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the instance to unlink from the VPC.
+
+ :type intance_id: str
+ :param instance_is: The ID of the VPC to which the instance is linked.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VpcId': vpc_id, 'InstanceId': instance_id}
+ if dry_run:
+ params['DryRun'] = 'true'
+ return self.get_status('DetachClassicLinkVpc', params)
+
+ def disable_vpc_classic_link(self, vpc_id, dry_run=False):
+ """
+ Disables ClassicLink for a VPC. You cannot disable ClassicLink for a
+ VPC that has EC2-Classic instances linked to it.
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the VPC.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VpcId': vpc_id}
+ if dry_run:
+ params['DryRun'] = 'true'
+ return self.get_status('DisableVpcClassicLink', params)
+
+ def enable_vpc_classic_link(self, vpc_id, dry_run=False):
+ """
+ Enables a VPC for ClassicLink. You can then link EC2-Classic instances
+ to your ClassicLink-enabled VPC to allow communication over private IP
+ addresses. You cannot enable your VPC for ClassicLink if any of your
+ VPC's route tables have existing routes for address ranges within the
+ 10.0.0.0/8 IP address range, excluding local routes for VPCs in the
+ 10.0.0.0/16 and 10.1.0.0/16 IP address ranges.
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the VPC.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VpcId': vpc_id}
+ if dry_run:
+ params['DryRun'] = 'true'
+ return self.get_status('EnableVpcClassicLink', params)
diff --git a/boto/vpc/vpc.py b/boto/vpc/vpc.py
index 575a1c45..219a0b59 100644
--- a/boto/vpc/vpc.py
+++ b/boto/vpc/vpc.py
@@ -38,6 +38,7 @@ class VPC(TaggedEC2Object):
:ivar cidr_block: The CIDR block for the VPC.
:ivar is_default: Indicates whether the VPC is the default VPC.
:ivar instance_tenancy: The allowed tenancy of instances launched into the VPC.
+ :ivar classic_link_enabled: Indicates whether ClassicLink is enabled.
"""
super(VPC, self).__init__(connection)
self.id = None
@@ -46,6 +47,7 @@ class VPC(TaggedEC2Object):
self.cidr_block = None
self.is_default = None
self.instance_tenancy = None
+ self.classic_link_enabled = None
def __repr__(self):
return 'VPC:%s' % self.id
@@ -63,6 +65,8 @@ class VPC(TaggedEC2Object):
self.is_default = True if value == 'true' else False
elif name == 'instanceTenancy':
self.instance_tenancy = value
+ elif name == 'classicLinkEnabled':
+ self.classic_link_enabled = value
else:
setattr(self, name, value)
@@ -72,8 +76,9 @@ class VPC(TaggedEC2Object):
def _update(self, updated):
self.__dict__.update(updated.__dict__)
- def update(self, validate=False, dry_run=False):
- vpc_list = self.connection.get_all_vpcs(
+ def _get_status_then_update_vpc(self, get_status_method, validate=False,
+ dry_run=False):
+ vpc_list = get_status_method(
[self.id],
dry_run=dry_run
)
@@ -82,4 +87,118 @@ class VPC(TaggedEC2Object):
self._update(updated_vpc)
elif validate:
raise ValueError('%s is not a valid VPC ID' % (self.id,))
+
+ def update(self, validate=False, dry_run=False):
+ self._get_status_then_update_vpc(
+ self.connection.get_all_vpcs,
+ validate=validate,
+ dry_run=dry_run
+ )
return self.state
+
+ def update_classic_link_enabled(self, validate=False, dry_run=False):
+ """
+ Updates instance's classic_link_enabled attribute
+
+ :rtype: bool
+ :return: self.classic_link_enabled after update has occurred.
+ """
+ self._get_status_then_update_vpc(
+ self.connection.get_all_classic_link_vpcs,
+ validate=validate,
+ dry_run=dry_run
+ )
+ return self.classic_link_enabled
+
+ def disable_classic_link(self, dry_run=False):
+ """
+ Disables ClassicLink for a VPC. You cannot disable ClassicLink for a
+ VPC that has EC2-Classic instances linked to it.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.disable_vpc_classic_link(self.id,
+ dry_run=dry_run)
+
+ def enable_classic_link(self, dry_run=False):
+ """
+ Enables a VPC for ClassicLink. You can then link EC2-Classic instances
+ to your ClassicLink-enabled VPC to allow communication over private IP
+ addresses. You cannot enable your VPC for ClassicLink if any of your
+ VPC's route tables have existing routes for address ranges within the
+ 10.0.0.0/8 IP address range, excluding local routes for VPCs in the
+ 10.0.0.0/16 and 10.1.0.0/16 IP address ranges.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.enable_vpc_classic_link(self.id,
+ dry_run=dry_run)
+
+ def attach_classic_instance(self, instance_id, groups, dry_run=False):
+ """
+ Links an EC2-Classic instance to a ClassicLink-enabled VPC through one
+ or more of the VPC's security groups. You cannot link an EC2-Classic
+ instance to more than one VPC at a time. You can only link an instance
+ that's in the running state. An instance is automatically unlinked from
+ a VPC when it's stopped. You can link it to the VPC again when you
+ restart it.
+
+ After you've linked an instance, you cannot change the VPC security
+ groups that are associated with it. To change the security groups, you
+ must first unlink the instance, and then link it again.
+
+ Linking your instance to a VPC is sometimes referred to as attaching
+ your instance.
+
+ :type intance_id: str
+ :param instance_is: The ID of a ClassicLink-enabled VPC.
+
+ :tye groups: list
+ :param groups: The ID of one or more of the VPC's security groups.
+ You cannot specify security groups from a different VPC. The
+ members of the list can be
+ :class:`boto.ec2.securitygroup.SecurityGroup` objects or
+ strings of the id's of the security groups.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.attach_classic_link_vpc(
+ vpc_id=self.id,
+ instance_id=instance_id,
+ groups=groups,
+ dry_run=dry_run
+ )
+
+ def detach_classic_instance(self, instance_id, dry_run=False):
+ """
+ Unlinks a linked EC2-Classic instance from a VPC. After the instance
+ has been unlinked, the VPC security groups are no longer associated
+ with it. An instance is automatically unlinked from a VPC when
+ it's stopped.
+
+ :type intance_id: str
+ :param instance_is: The ID of the VPC to which the instance is linked.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.detach_classic_link_vpc(
+ vpc_id=self.id,
+ instance_id=instance_id,
+ dry_run=dry_run
+ )
diff --git a/docs/source/dynamodb2_tut.rst b/docs/source/dynamodb2_tut.rst
index bca39606..ae012a58 100644
--- a/docs/source/dynamodb2_tut.rst
+++ b/docs/source/dynamodb2_tut.rst
@@ -629,6 +629,7 @@ during development. Connecting to a running DynamoDB Local server is easy::
conn = DynamoDBConnection(
host='localhost',
port=8000,
+ aws_access_key_id='anything',
aws_secret_access_key='anything',
is_secure=False)
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 8e3daa1f..ab9a55a6 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -68,6 +68,7 @@ Currently Supported Services
* Cloudsearch 2 -- (:doc:`API Reference <ref/cloudsearch2>`) (Python 3)
* :doc:`Cloudsearch <cloudsearch_tut>` -- (:doc:`API Reference <ref/cloudsearch>`) (Python 3)
+ * CloudSearch Domain --(:doc:`API Reference <ref/cloudsearchdomain>`) (Python 3)
* Elastic Transcoder -- (:doc:`API Reference <ref/elastictranscoder>`) (Python 3)
* :doc:`Simple Workflow Service (SWF) <swf_tut>` -- (:doc:`API Reference <ref/swf>`) (Python 3)
* :doc:`Simple Queue Service (SQS) <sqs_tut>` -- (:doc:`API Reference <ref/sqs>`) (Python 3)
@@ -135,6 +136,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.35.0
releasenotes/v2.34.0
releasenotes/v2.33.0
releasenotes/v2.32.1
diff --git a/docs/source/ref/cloudsearchdomain.rst b/docs/source/ref/cloudsearchdomain.rst
new file mode 100644
index 00000000..72c0f0ad
--- /dev/null
+++ b/docs/source/ref/cloudsearchdomain.rst
@@ -0,0 +1,26 @@
+.. ref-cloudsearchdomain
+
+==================
+CloudSearch Domain
+==================
+
+boto.cloudsearchdomain
+----------------------
+
+.. automodule:: boto.cloudsearchdomain
+ :members:
+ :undoc-members:
+
+boto.cloudsearchdomain.layer1
+-----------------------------
+
+.. automodule:: boto.cloudsearchdomain.layer1
+ :members:
+ :undoc-members:
+
+boto.cloudsearchdomain.exceptions
+---------------------------------
+
+.. automodule:: boto.cloudsearchdomain.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/elb.rst b/docs/source/ref/elb.rst
index aef0c5b4..f64517e3 100644
--- a/docs/source/ref/elb.rst
+++ b/docs/source/ref/elb.rst
@@ -47,15 +47,21 @@ boto.ec2.elb.loadbalancer
:undoc-members:
boto.ec2.elb.policies
--------------------------
+---------------------
.. automodule:: boto.ec2.elb.policies
:members:
:undoc-members:
boto.ec2.elb.securitygroup
--------------------------
+--------------------------
.. automodule:: boto.ec2.elb.securitygroup
:members:
:undoc-members:
+
+boto.ec2.elb.attributes
+-----------------------
+.. automodule:: boto.ec2.elb.attributes
+ :members:
+ :undoc-members:
diff --git a/docs/source/releasenotes/v2.35.0.rst b/docs/source/releasenotes/v2.35.0.rst
new file mode 100644
index 00000000..b81d4c4f
--- /dev/null
+++ b/docs/source/releasenotes/v2.35.0.rst
@@ -0,0 +1,55 @@
+boto v2.35.0
+===========
+
+:date: 2015/01/08
+
+This release adds support for Amazon EC2 Classic Link which allows users
+to link classic instances to Classic Link enabled VPCs,
+adds support for Amazon CloudSearch Domain, adds sigv4 support
+for Elastic Load Balancing, and fixes several other issues including issues
+making anonymous AWS Security Token Service requests.
+
+
+Changes
+-------
+* Add Amazon EC2 Classic Link support (:sha: `5dbd2d7`)
+* Add query string to body for anon STS POST (:issue:`2812`, :sha:`6513789`)
+* Fix bug that prevented initializing a dynamo item from existing item (:issue:`2764`, :sha:`743e814`)
+* * switchover-sigv4: Add integ tests for sigv4 switchover Switch elb/ec2 over to signature version 4 (:sha:`0dadce8`)
+* Return SetStackPolicyResponse - (:issue:`2822`, :issue:`2346`, :issue:`2639`, :sha:`c4defb4`)
+* Added ELB Attributes to docs. (:issue:`2821`, :sha:`5dfeba9`)
+* Fix bug by using correct string joining syntax. (:issue:`2817`, :sha:`8426148`)
+* Fix SES get_identity_dkim_attributes when input length > 1. (:issue:`2810`, :sha:`cc4d42d`)
+* DynamoDB table batch_get fails to process all remaining results if single batch result is empty. (:issue:`2809`, :sha:`a193bc0`)
+* Added suppport for additional fields in EMR objects. (:issue:`2807`, :sha:`2936ac0`)
+* Pass version_id in copy if key is versioned. (:issue:`2803`, :sha:`66b3604`)
+* Add support for SQS PurgeQueue operation. (:issue:`2806`, :sha:`90a5d44`)
+* Update documentation for launchconfig. (:issue:`2802`, :sha:`0dc8412`)
+* Remove unimplemented config param. (:issue:`2801`, :issue:`2572`, :sha:`f1a5ebd`)
+* Add support for private hosted zones. (:issue:`2785`, :sha:`2e7829b`)
+* Fix Key.change_storage_class so that it obeys dst_bucket. (:issue:`2752`, :sha:`55ed184`)
+* Fix for s3put host specification. (:issue:`2736`, :issue:`2522`, :sha:`1af31f2`)
+* Improve handling of Glacier HTTP 204 responses. (:issue:`2726`, :sha:`c314298`)
+* Fix raising exception syntax in Python 3. (:issue:`2735`, :issue:`2563`, :sha:`58f76f6`)
+* Privatezone: Adding unit/integration test coverage (:issue:`1`, :sha:`d1ff14e`)
+* Minor documentation/pep8 fixes. (:issue:`2753`, :sha:`6a853be`)
+* Correct argument type in doc string. (:issue:`2728`, :sha:`1ddf6df`)
+* Use exclusive start key to get all items from DynamoDB query. (:issue:`2676`, :issue:`2573`, :sha:`419d8a5`)
+* Updated link to current config documentation. (:issue:`2755`, :sha:`9be3f85`)
+* Fix the SQS certificate error for region cn-north-1. (:issue:`2766`, :sha:`1d5368a`)
+* Adds support for getting health checker IP ranges from Route53. (:issue:`2792`, :sha:`ee14911`)
+* fix: snap.create_volume documentation lists general purpose ssd. Fixes @2774. (:issue:`2774`, :sha:`36fae2b`)
+* Fixed param type in get_contents_to_filename docstring. (:issue:`2783`, :sha:`478f66a`)
+* Update DynamoDB local example to include fake access key id. (:issue:`2791`, :sha:`2c1f8d5`)
+* Added 'end' attribute to ReservedInstance. (:issue:`2793`, :issue:`2757`, :sha:`28814d8`)
+* Parse ClusterStatus’s StateChangeReason. (:issue:`2696`, :sha:`48c5d17`)
+* Adds SupportedProducts field to EMR JobFlow objects. (:issue:`2775`, :sha:`6771d04`)
+* Fix EMR endpoint. (:issue:`2750`, :sha:`8329e02`)
+* Detect old-style S3 URL for auto-sigv4. (:issue:`2773`, :sha:`f5be409`)
+* Throw host warning for cloudsearch domain (:issue:`2765`, :sha:`9af6f41`)
+* Fix CloudSearch2 to work with IAM-based search and upload requests (:issue:`2717`, :sha:`9f4fe8b`)
+* iam: add support for Account Password Policy APIs (:issue:`2574`, :sha:`6c9bd53`)
+* Handle sigv4 non-string header values properly (:issue:`2744`, :sha:`e043e4b`)
+* Url encode query string for pure query (:issue:`2720`, :sha:`bbbf9d2`)
+
+
diff --git a/docs/source/sqs_tut.rst b/docs/source/sqs_tut.rst
index 9b8e508c..06cdc440 100644
--- a/docs/source/sqs_tut.rst
+++ b/docs/source/sqs_tut.rst
@@ -262,12 +262,9 @@ to count the number of messages in a queue:
>>> q.count()
10
-This can be handy but this command as well as the other two utility methods
-I'll describe in a minute are inefficient and should be used with caution
-on queues with lots of messages (e.g. many hundreds or more). Similarly,
-you can clear (delete) all messages in a queue with:
+Removing all messages in a queue is as simple as calling purge:
->>> q.clear()
+>>> q.purge()
Be REAL careful with that one! Finally, if you want to dump all of the
messages in a queue to a local file:
diff --git a/setup.py b/setup.py
index b46e683e..89c1bc65 100644
--- a/setup.py
+++ b/setup.py
@@ -78,7 +78,8 @@ setup(name = "boto",
"boto.directconnect", "boto.kinesis", "boto.rds2",
"boto.cloudsearch2", "boto.logs", "boto.vendored",
"boto.route53.domains", "boto.cognito",
- "boto.cognito.identity", "boto.cognito.sync"],
+ "boto.cognito.identity", "boto.cognito.sync",
+ "boto.cloudsearchdomain"],
package_data = {
"boto.cacerts": ["cacerts.txt"],
"boto": ["endpoints.json"],
diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py
index 0f893b14..8e8b1a05 100644
--- a/tests/integration/dynamodb2/test_highlevel.py
+++ b/tests/integration/dynamodb2/test_highlevel.py
@@ -279,8 +279,14 @@ class DynamoDBv2Test(unittest.TestCase):
johndoe = users.get_item(username='johndoe', friend_count=4)
johndoe.delete()
+ # Set batch get limit to ensure keys with no results are
+ # handled correctly.
+ users.max_batch_get = 2
+
# Test the eventually consistent batch get.
results = users.batch_get(keys=[
+ {'username': 'noone', 'friend_count': 4},
+ {'username': 'nothere', 'friend_count': 10},
{'username': 'bob', 'friend_count': 1},
{'username': 'jane', 'friend_count': 3}
])
diff --git a/tests/integration/ec2/elb/test_connection.py b/tests/integration/ec2/elb/test_connection.py
index 5dc44141..7f1ca80b 100644
--- a/tests/integration/ec2/elb/test_connection.py
+++ b/tests/integration/ec2/elb/test_connection.py
@@ -28,6 +28,7 @@ import boto
import time
from tests.compat import unittest
from boto.ec2.elb import ELBConnection
+import boto.ec2.elb
class ELBConnectionTest(unittest.TestCase):
@@ -286,6 +287,11 @@ class ELBConnectionTest(unittest.TestCase):
[]
)
+ def test_can_make_sigv4_call(self):
+ connection = boto.ec2.elb.connect_to_region('eu-central-1')
+ lbs = connection.get_all_load_balancers()
+ self.assertTrue(isinstance(lbs, list))
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/integration/ec2/test_connection.py b/tests/integration/ec2/test_connection.py
index ec3721d4..f0117987 100644
--- a/tests/integration/ec2/test_connection.py
+++ b/tests/integration/ec2/test_connection.py
@@ -33,6 +33,7 @@ import socket
from nose.plugins.attrib import attr
from boto.ec2.connection import EC2Connection
from boto.exception import EC2ResponseError
+import boto.ec2
class EC2ConnectionTest(unittest.TestCase):
@@ -239,3 +240,7 @@ class EC2ConnectionTest(unittest.TestCase):
# And kill it.
rs.instances[0].terminate()
+
+ def test_can_get_all_instances_sigv4(self):
+ connection = boto.ec2.connect_to_region('eu-central-1')
+ self.assertTrue(isinstance(connection.get_all_instances(), list))
diff --git a/tests/integration/iam/test_connection.py b/tests/integration/iam/test_connection.py
index 61953f17..6e536413 100644
--- a/tests/integration/iam/test_connection.py
+++ b/tests/integration/iam/test_connection.py
@@ -27,6 +27,8 @@ from tests.compat import unittest
class TestIAM(unittest.TestCase):
+ iam = True
+
def test_group_users(self):
# A very basic test to create a group, a user, add the user
# to the group and then delete everything
diff --git a/tests/integration/iam/test_password_policy.py b/tests/integration/iam/test_password_policy.py
new file mode 100644
index 00000000..aa86fc57
--- /dev/null
+++ b/tests/integration/iam/test_password_policy.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2014 Rocket Internet AG.
+# Luca Bruno <luca.bruno@rocket-internet.de>
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+import time
+
+from tests.compat import unittest
+
+class IAMAccountPasswordPolicy(unittest.TestCase):
+ iam = True
+
+ def test_password_policy(self):
+ # A series of tests to check the password policy API
+ iam = boto.connect_iam()
+
+ # First preserve what is the current password policy
+ try:
+ initial_policy_result = iam.get_account_password_policy()
+ except boto.exception.BotoServerError as srv_error:
+ initial_policy = None
+ if srv_error.status != 404:
+ raise srv_error
+
+ # Update the policy and check it back
+ test_min_length = 88
+ iam.update_account_password_policy(minimum_password_length=test_min_length)
+ new_policy = iam.get_account_password_policy()
+ new_min_length = new_policy['get_account_password_policy_response']\
+ ['get_account_password_policy_result']['password_policy']\
+ ['minimum_password_length']
+
+ if test_min_length != int(new_min_length):
+ raise Exception("Failed to update account password policy")
+
+ # Delete the policy and check the correct deletion
+ test_policy = ''
+ iam.delete_account_password_policy()
+ try:
+ test_policy = iam.get_account_password_policy()
+ except boto.exception.BotoServerError as srv_error:
+ test_policy = None
+ if srv_error.status != 404:
+ raise srv_error
+
+ if test_policy is not None:
+ raise Exception("Failed to delete account password policy")
+
+ # Restore initial account password policy
+ if initial_policy:
+ p = initial_policy['get_account_password_policy_response']\
+ ['get_account_password_policy_result']['password_policy']
+ iam.update_account_password_policy(minimum_password_length=int(p['minimum_password_length']),
+ allow_users_to_change_password=bool(p['allow_users_to_change_password']),
+ hard_expiry=bool(p['hard_expiry']),
+ max_password_age=int(p['max_password_age']),
+ password_reuse_prevention=int(p['password_reuse_prevention']),
+ require_lowercase_characters=bool(p['require_lowercase_characters']),
+ require_numbers=bool(p['require_numbers']),
+ require_symbols=bool(p['require_symbols']),
+ require_uppercase_characters=bool(p['require_uppercase_characters']))
diff --git a/tests/integration/route53/test_zone.py b/tests/integration/route53/test_zone.py
index 9ba9e452..d6351dd6 100644
--- a/tests/integration/route53/test_zone.py
+++ b/tests/integration/route53/test_zone.py
@@ -27,6 +27,7 @@ from tests.compat import unittest
from nose.plugins.attrib import attr
from boto.route53.connection import Route53Connection
from boto.exception import TooManyRecordsException
+from boto.vpc import VPCConnection
@attr(route53=True)
@@ -151,7 +152,9 @@ class TestRoute53Zone(unittest.TestCase):
identifier=('baz', 'us-east-1'))
self.zone.add_a('exception.%s' % self.base_domain, '8.7.6.5',
identifier=('bam', 'us-west-1'))
- self.assertRaises(TooManyRecordsException, lambda: self.zone.get_a('exception.%s' % self.base_domain))
+ self.assertRaises(TooManyRecordsException,
+ lambda: self.zone.get_a('exception.%s' %
+ self.base_domain))
self.zone.delete_a('exception.%s' % self.base_domain, all=True)
@classmethod
@@ -161,5 +164,33 @@ class TestRoute53Zone(unittest.TestCase):
self.zone.delete_mx(self.base_domain)
self.zone.delete()
+
+@attr(route53=True)
+class TestRoute53PrivateZone(unittest.TestCase):
+ @classmethod
+ def setUpClass(self):
+ time_str = str(int(time.time()))
+ self.route53 = Route53Connection()
+ self.base_domain = 'boto-private-zone-test-%s.com' % time_str
+ self.vpc = VPCConnection()
+ self.test_vpc = self.vpc.create_vpc(cidr_block='10.11.0.0/16')
+ # tag the vpc to make it easily identifiable if things go spang
+ self.test_vpc.add_tag("Name", self.base_domain)
+ self.zone = self.route53.get_zone(self.base_domain)
+ if self.zone is not None:
+ self.zone.delete()
+
+ def test_create_private_zone(self):
+ self.zone = self.route53.create_hosted_zone(self.base_domain,
+ private_zone=True,
+ vpc_id=self.test_vpc.id,
+ vpc_region='us-east-1')
+
+ @classmethod
+ def tearDownClass(self):
+ if self.zone is not None:
+ self.zone.delete()
+ self.test_vpc.delete()
+
if __name__ == '__main__':
unittest.main(verbosity=3)
diff --git a/tests/integration/s3/test_connect_to_region.py b/tests/integration/s3/test_connect_to_region.py
new file mode 100644
index 00000000..5c76ada9
--- /dev/null
+++ b/tests/integration/s3/test_connect_to_region.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014 Steven Richards <sbrichards@mit.edu>
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Unit test for passing in 'host' parameter and overriding the region
+See issue: #2522
+"""
+from tests.compat import unittest
+
+from boto.s3.connection import S3Connection
+from boto.s3 import connect_to_region
+
+class S3SpecifyHost(unittest.TestCase):
+ s3 = True
+
+ def testWithNonAWSHost(self):
+ connect_args = dict({'host':'www.not-a-website.com'})
+ connection = connect_to_region('us-east-1', **connect_args)
+ self.assertEquals('www.not-a-website.com', connection.host)
+ self.assertIsInstance(connection, S3Connection)
+
+ def testSuccessWithHostOverrideRegion(self):
+ connect_args = dict({'host':'s3.amazonaws.com'})
+ connection = connect_to_region('us-west-2', **connect_args)
+ self.assertEquals('s3.amazonaws.com', connection.host)
+ self.assertIsInstance(connection, S3Connection)
+
+
+ def testSuccessWithDefaultUSWest1(self):
+ connection = connect_to_region('us-west-2')
+ self.assertEquals('s3-us-west-2.amazonaws.com', connection.host)
+ self.assertIsInstance(connection, S3Connection)
+
+ def testSuccessWithDefaultUSEast1(self):
+ connection = connect_to_region('us-east-1')
+ self.assertEquals('s3.amazonaws.com', connection.host)
+ self.assertIsInstance(connection, S3Connection)
+
+ def testDefaultWithInvalidHost(self):
+ connect_args = dict({'host':''})
+ connection = connect_to_region('us-west-2', **connect_args)
+ self.assertEquals('s3-us-west-2.amazonaws.com', connection.host)
+ self.assertIsInstance(connection, S3Connection)
+
+ def testDefaultWithInvalidHostNone(self):
+ connect_args = dict({'host':None})
+ connection = connect_to_region('us-east-1', **connect_args)
+ self.assertEquals('s3.amazonaws.com', connection.host)
+ self.assertIsInstance(connection, S3Connection)
+
+ def tearDown(self):
+ self = connection = connect_args = None
diff --git a/tests/integration/s3/test_key.py b/tests/integration/s3/test_key.py
index f7a67b2b..8d426a26 100644
--- a/tests/integration/s3/test_key.py
+++ b/tests/integration/s3/test_key.py
@@ -27,8 +27,9 @@ Some unit tests for S3 Key
from tests.unit import unittest
import time
-from boto.compat import six, StringIO, urllib
+import boto.s3
+from boto.compat import six, StringIO, urllib
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
@@ -459,3 +460,75 @@ class S3KeyTest(unittest.TestCase):
kn = self.bucket.new_key("testkey_for_sse_c")
ks = kn.get_contents_as_string(headers=header)
self.assertEqual(ks, content.encode('utf-8'))
+
+
+class S3KeySigV4Test(unittest.TestCase):
+ def setUp(self):
+ self.conn = boto.s3.connect_to_region('eu-central-1')
+ self.bucket_name = 'boto-sigv4-key-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name,
+ location='eu-central-1')
+
+ def tearDown(self):
+ for key in self.bucket:
+ key.delete()
+ self.bucket.delete()
+
+ def test_put_get_with_non_string_headers_key(self):
+ k = Key(self.bucket)
+ k.key = 'foobar'
+ body = 'This is a test of S3'
+ # A content-length header will be added to this request since it
+ # has a body.
+ k.set_contents_from_string(body)
+ # Set a header that has an integer. This checks for a bug where
+ # the sigv4 signer assumes that all of the headers are strings.
+ headers = {'Content-Length': 0}
+ from_s3_key = self.bucket.get_key('foobar', headers=headers)
+ self.assertEqual(from_s3_key.get_contents_as_string().decode('utf-8'),
+ body)
+
+
+class S3KeyVersionCopyTest(unittest.TestCase):
+ def setUp(self):
+ self.conn = S3Connection()
+ self.bucket_name = 'boto-key-version-copy-%d' % int(time.time())
+ self.bucket = self.conn.create_bucket(self.bucket_name)
+ self.bucket.configure_versioning(True)
+
+ def tearDown(self):
+ for key in self.bucket.list_versions():
+ key.delete()
+ self.bucket.delete()
+
+ def test_key_overwrite_and_copy(self):
+ first_content = "abcdefghijklm"
+ second_content = "nopqrstuvwxyz"
+ k = Key(self.bucket, 'testkey')
+ k.set_contents_from_string(first_content)
+ # Wait for S3's eventual consistency (may not be necessary)
+ while self.bucket.get_key('testkey') is None:
+ time.sleep(5)
+ # Get the first version_id
+ first_key = self.bucket.get_key('testkey')
+ first_version_id = first_key.version_id
+ # Overwrite the key
+ k = Key(self.bucket, 'testkey')
+ k.set_contents_from_string(second_content)
+ # Wait for eventual consistency
+ while True:
+ second_key = self.bucket.get_key('testkey')
+ if second_key is None or second_key.version_id == first_version_id:
+ time.sleep(5)
+ else:
+ break
+ # Copy first key (no longer the current version) to a new key
+ source_key = self.bucket.get_key('testkey',
+ version_id=first_version_id)
+ source_key.copy(self.bucket, 'copiedkey')
+ while self.bucket.get_key('copiedkey') is None:
+ time.sleep(5)
+ copied_key = self.bucket.get_key('copiedkey')
+ copied_key_contents = copied_key.get_contents_as_string()
+ self.assertEqual(first_content, copied_key_contents)
+
diff --git a/tests/integration/sqs/test_connection.py b/tests/integration/sqs/test_connection.py
index 43648625..5ab80924 100644
--- a/tests/integration/sqs/test_connection.py
+++ b/tests/integration/sqs/test_connection.py
@@ -242,15 +242,11 @@ class SQSConnectionTest(unittest.TestCase):
def test_get_messages_attributes(self):
conn = SQSConnection()
current_timestamp = int(time.time())
- queue_name = 'test%d' % int(time.time())
- test = conn.create_queue(queue_name)
- self.addCleanup(conn.delete_queue, test)
+ test = self.create_temp_queue(conn)
time.sleep(65)
# Put a message in the queue.
- m1 = Message()
- m1.set_body('This is a test message.')
- test.write(m1)
+ self.put_queue_message(test)
self.assertEqual(test.count(), 1)
# Check all attributes.
@@ -265,9 +261,7 @@ class SQSConnectionTest(unittest.TestCase):
self.assertTrue(first_rec >= current_timestamp)
# Put another message in the queue.
- m2 = Message()
- m2.set_body('This is another test message.')
- test.write(m2)
+ self.put_queue_message(test)
self.assertEqual(test.count(), 1)
# Check a specific attribute.
@@ -279,3 +273,32 @@ class SQSConnectionTest(unittest.TestCase):
self.assertEqual(msg.attributes['ApproximateReceiveCount'], '1')
with self.assertRaises(KeyError):
msg.attributes['ApproximateFirstReceiveTimestamp']
+
+ def test_queue_purge(self):
+ conn = SQSConnection()
+ test = self.create_temp_queue(conn)
+ time.sleep(65)
+
+ # Put some messages in the queue.
+ for x in range(0, 4):
+ self.put_queue_message(test)
+ self.assertEqual(test.count(), 4)
+
+ # Now purge the queue
+ conn.purge_queue(test)
+
+ # Now assert queue count is 0
+ self.assertEqual(test.count(), 0)
+
+ def create_temp_queue(self, conn):
+ current_timestamp = int(time.time())
+ queue_name = 'test%d' % int(time.time())
+ test = conn.create_queue(queue_name)
+ self.addCleanup(conn.delete_queue, test)
+
+ return test
+
+ def put_queue_message(self, queue):
+ m1 = Message()
+ m1.set_body('This is a test message.')
+ queue.write(m1)
diff --git a/tests/integration/sts/test_session_token.py b/tests/integration/sts/test_session_token.py
index a441fb63..2c911d3a 100644
--- a/tests/integration/sts/test_session_token.py
+++ b/tests/integration/sts/test_session_token.py
@@ -88,4 +88,4 @@ class SessionTokenTest(unittest.TestCase):
creds = c.decode_authorization_message('b94d27b9934')
except BotoServerError as err:
self.assertEqual(err.status, 400)
- self.assertTrue('Invalid token' in err.body)
+ self.assertIn('InvalidAuthorizationMessageException', err.body)
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
index 9751decd..33329657 100644
--- a/tests/unit/__init__.py
+++ b/tests/unit/__init__.py
@@ -63,11 +63,10 @@ class AWSMockServiceTestCase(unittest.TestCase):
request_params = self.actual_request.params.copy()
if ignore_params_values is not None:
for param in ignore_params_values:
- # We still want to check that the ignore_params_values params
- # are in the request parameters, we just don't need to check
- # their value.
- self.assertIn(param, request_params)
- del request_params[param]
+ try:
+ del request_params[param]
+ except KeyError:
+ pass
self.assertDictEqual(request_params, params)
def set_http_response(self, status_code, reason='', header=[], body=None):
diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py
index 7b4afa5c..8f7876b6 100644
--- a/tests/unit/auth/test_sigv4.py
+++ b/tests/unit/auth/test_sigv4.py
@@ -483,6 +483,19 @@ e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"""
authed_req = self.auth.canonical_request(request)
self.assertEqual(authed_req, expected)
+ def test_non_string_headers(self):
+ self.awesome_bucket_request.headers['Content-Length'] = 8
+ canonical_headers = self.auth.canonical_headers(
+ self.awesome_bucket_request.headers)
+ self.assertEqual(
+ canonical_headers,
+ 'content-length:8\n'
+ 'user-agent:Boto\n'
+ 'x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae'
+ '41e4649b934ca495991b7852b855\n'
+ 'x-amz-date:20130605T193245Z'
+ )
+
class FakeS3Connection(object):
def __init__(self, *args, **kwargs):
@@ -518,8 +531,10 @@ class TestS3SigV4OptIn(MockServiceWithConfigTestCase):
def test_sigv4_non_optional(self):
# Requires SigV4.
- fake = FakeS3Connection(host='s3.cn-north-1.amazonaws.com.cn')
- self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3'])
+ for region in ['.cn-north', '.eu-central', '-eu-central']:
+ fake = FakeS3Connection(host='s3' + region + '-1.amazonaws.com')
+ self.assertEqual(
+ fake._required_auth_capability(), ['hmac-v4-s3'])
def test_sigv4_opt_in_config(self):
# Opt-in via the config.
diff --git a/tests/unit/auth/test_query.py b/tests/unit/auth/test_stsanon.py
index fa5882c9..9079a7d8 100644
--- a/tests/unit/auth/test_query.py
+++ b/tests/unit/auth/test_stsanon.py
@@ -23,11 +23,11 @@ import copy
from mock import Mock
from tests.unit import unittest
-from boto.auth import QueryAuthHandler
+from boto.auth import STSAnonHandler
from boto.connection import HTTPRequest
-class TestQueryAuthHandler(unittest.TestCase):
+class TestSTSAnonHandler(unittest.TestCase):
def setUp(self):
self.provider = Mock()
self.provider.access_key = 'access_key'
@@ -51,26 +51,28 @@ class TestQueryAuthHandler(unittest.TestCase):
)
def test_escape_value(self):
- auth = QueryAuthHandler('sts.amazonaws.com',
- Mock(), self.provider)
- # This should **NOT** get escaped.
+ auth = STSAnonHandler('sts.amazonaws.com',
+ Mock(), self.provider)
+ # This is changed from a previous version because this string is
+ # being passed to the query string and query strings must
+ # be url encoded.
value = auth._escape_value('Atza|IQEBLjAsAhRkcxQ')
- self.assertEqual(value, 'Atza|IQEBLjAsAhRkcxQ')
+ self.assertEqual(value, 'Atza%7CIQEBLjAsAhRkcxQ')
def test_build_query_string(self):
- auth = QueryAuthHandler('sts.amazonaws.com',
- Mock(), self.provider)
+ auth = STSAnonHandler('sts.amazonaws.com',
+ Mock(), self.provider)
query_string = auth._build_query_string(self.request.params)
self.assertEqual(query_string, 'Action=AssumeRoleWithWebIdentity' + \
'&ProviderId=2012-06-01&RoleSessionName=web-identity-federation' + \
- '&Version=2011-06-15&WebIdentityToken=Atza|IQEBLjAsAhRkcxQ')
+ '&Version=2011-06-15&WebIdentityToken=Atza%7CIQEBLjAsAhRkcxQ')
def test_add_auth(self):
- auth = QueryAuthHandler('sts.amazonaws.com',
- Mock(), self.provider)
+ auth = STSAnonHandler('sts.amazonaws.com',
+ Mock(), self.provider)
req = copy.copy(self.request)
auth.add_auth(req)
- self.assertEqual(req.path,
- '/?Action=AssumeRoleWithWebIdentity' + \
+ self.assertEqual(req.body,
+ 'Action=AssumeRoleWithWebIdentity' + \
'&ProviderId=2012-06-01&RoleSessionName=web-identity-federation' + \
- '&Version=2011-06-15&WebIdentityToken=Atza|IQEBLjAsAhRkcxQ')
+ '&Version=2011-06-15&WebIdentityToken=Atza%7CIQEBLjAsAhRkcxQ')
diff --git a/tests/unit/cloudformation/test_connection.py b/tests/unit/cloudformation/test_connection.py
index a863a2df..61d9fe7d 100644
--- a/tests/unit/cloudformation/test_connection.py
+++ b/tests/unit/cloudformation/test_connection.py
@@ -698,7 +698,7 @@ class TestCloudFormationSetStackPolicy(CloudFormationConnectionBase):
self.set_http_response(status_code=200)
api_response = self.service_connection.set_stack_policy('stack-id',
stack_policy_body='{}')
- self.assertEqual(api_response['Some'], 'content')
+ self.assertDictEqual(api_response, {'SetStackPolicyResult': {'Some': 'content'}})
self.assert_request_parameters({
'Action': 'SetStackPolicy',
'ContentType': 'JSON',
diff --git a/tests/unit/cloudsearchdomain/__init__.py b/tests/unit/cloudsearchdomain/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/cloudsearchdomain/__init__.py
diff --git a/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py b/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py
new file mode 100644
index 00000000..694e98ff
--- /dev/null
+++ b/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py
@@ -0,0 +1,127 @@
+#!/usr/bin env python
+import json
+import mock
+from tests.unit import AWSMockServiceTestCase
+from boto.cloudsearch2.domain import Domain
+from boto.cloudsearch2.layer1 import CloudSearchConnection
+from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
+
+
+class CloudSearchDomainConnectionTest(AWSMockServiceTestCase):
+ connection_class = CloudSearchDomainConnection
+
+ domain_status = """{
+ "SearchInstanceType": null,
+ "DomainId": "1234567890/demo",
+ "DomainName": "demo",
+ "Deleted": false,
+ "SearchInstanceCount": 0,
+ "Created": true,
+ "SearchService": {
+ "Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com"
+ },
+ "RequiresIndexDocuments": false,
+ "Processing": false,
+ "DocService": {
+ "Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com"
+ },
+ "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo",
+ "SearchPartitionCount": 0
+ }"""
+
+ def create_service_connection(self, **kwargs):
+ if kwargs.get('host', None) is None:
+ kwargs['host'] = 'search-demo.us-east-1.cloudsearch.amazonaws.com'
+ return super(CloudSearchDomainConnectionTest, self).\
+ create_service_connection(**kwargs)
+
+ def test_get_search_service(self):
+ layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key',
+ sign_request=True)
+ domain = Domain(layer1=layer1, data=json.loads(self.domain_status))
+ search_service = domain.get_search_service()
+
+ self.assertEqual(search_service.sign_request, True)
+
+ def test_get_document_service(self):
+ layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key',
+ sign_request=True)
+ domain = Domain(layer1=layer1, data=json.loads(self.domain_status))
+ document_service = domain.get_document_service()
+
+ self.assertEqual(document_service.sign_request, True)
+
+ def test_search_with_auth(self):
+ layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key',
+ sign_request=True)
+ domain = Domain(layer1=layer1, data=json.loads(self.domain_status))
+ search_service = domain.get_search_service()
+
+ response = {
+ 'rank': '-text_relevance',
+ 'match-expr': "Test",
+ 'hits': {
+ 'found': 30,
+ 'start': 0,
+ 'hit': {
+ 'id': '12341',
+ 'fields': {
+ 'title': 'Document 1',
+ 'rank': 1
+ }
+ }
+ },
+ 'status': {
+ 'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
+ 'time-ms': 2,
+ 'cpu-time-ms': 0
+ }
+
+ }
+
+ self.set_http_response(status_code=200, body=json.dumps(response))
+ search_service.domain_connection = self.service_connection
+ resp = search_service.search()
+
+ headers = self.actual_request.headers
+
+ self.assertIsNotNone(headers.get('Authorization'))
+
+ def test_upload_documents_with_auth(self):
+ layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key',
+ sign_request=True)
+ domain = Domain(layer1=layer1, data=json.loads(self.domain_status))
+ document_service = domain.get_document_service()
+
+ response = {
+ 'status': 'success',
+ 'adds': 1,
+ 'deletes': 0,
+ }
+
+ document = {
+ "id": "1234",
+ "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]
+ }
+
+ self.set_http_response(status_code=200, body=json.dumps(response))
+ document_service.domain_connection = self.service_connection
+ document_service.add("1234", document)
+ resp = document_service.commit()
+
+ headers = self.actual_request.headers
+
+ self.assertIsNotNone(headers.get('Authorization'))
+
+ def test_no_host_provided(self):
+ # A host must be provided or a error is thrown.
+ with self.assertRaises(ValueError):
+ CloudSearchDomainConnection(
+ aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key'
+ )
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index 49864cf5..21d0a75f 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -823,6 +823,12 @@ class ItemTestCase(unittest.TestCase):
self.assertFalse(self.create_item({}))
+class ItemFromItemTestCase(ItemTestCase):
+ def setUp(self):
+ super(ItemFromItemTestCase, self).setUp()
+ self.johndoe = self.create_item(self.johndoe)
+
+
def fake_results(name, greeting='hello', exclusive_start_key=None, limit=None):
if exclusive_start_key is None:
exclusive_start_key = -1
@@ -2663,6 +2669,33 @@ class TableTestCase(unittest.TestCase):
self.assertIn('limit', mock_query.call_args[1])
self.assertEqual(10, mock_query.call_args[1]['limit'])
+ def test_query_count_paginated(self):
+ def return_side_effect(*args, **kwargs):
+ if kwargs.get('exclusive_start_key'):
+ return {'Count': 10, 'LastEvaluatedKey': None}
+ else:
+ return {
+ 'Count': 20,
+ 'LastEvaluatedKey': {
+ 'username': {
+ 'S': 'johndoe'
+ },
+ 'date_joined': {
+ 'N': '4118642633'
+ }
+ }
+ }
+
+ with mock.patch.object(
+ self.users.connection,
+ 'query',
+ side_effect=return_side_effect
+ ) as mock_query:
+ count = self.users.query_count(username__eq='johndoe')
+ self.assertTrue(isinstance(count, int))
+ self.assertEqual(30, count)
+ self.assertEqual(mock_query.call_count, 2)
+
def test_private_batch_get(self):
expected = {
"ConsumedCapacity": {
diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py
index 287e1a72..45a85846 100755
--- a/tests/unit/ec2/test_connection.py
+++ b/tests/unit/ec2/test_connection.py
@@ -1638,5 +1638,64 @@ class TestCreateVolume(TestEC2ConnectionBase):
self.assertEqual(result.id, 'vol-1a2b3c4d')
self.assertTrue(result.encrypted)
+
+class TestGetClassicLinkInstances(TestEC2ConnectionBase):
+ def default_body(self):
+ return b"""
+ <DescribeClassicLinkInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
+ <requestId>f4bf0cc6-5967-4687-9355-90ce48394bd3</requestId>
+ <instancesSet>
+ <item>
+ <instanceId>i-31489bd8</instanceId>
+ <vpcId>vpc-9d24f8f8</vpcId>
+ <groupSet>
+ <item>
+ <groupId>sg-9b4343fe</groupId>
+ </item>
+ </groupSet>
+ <tagSet>
+ <item>
+ <key>Name</key>
+ <value>hello</value>
+ </item>
+ </tagSet>
+ </item>
+ </instancesSet>
+ </DescribeClassicLinkInstancesResponse>
+ """
+ def test_get_classic_link_instances(self):
+ self.set_http_response(status_code=200)
+ response = self.ec2.get_all_classic_link_instances()
+ self.assertEqual(len(response), 1)
+ instance = response[0]
+ self.assertEqual(instance.id, 'i-31489bd8')
+ self.assertEqual(instance.vpc_id, 'vpc-9d24f8f8')
+ self.assertEqual(len(instance.groups), 1)
+ self.assertEqual(instance.groups[0].id, 'sg-9b4343fe')
+ self.assertEqual(instance.tags, {'Name': 'hello'})
+
+
+ def test_get_classic_link_instances_params(self):
+ self.set_http_response(status_code=200)
+ self.ec2.get_all_classic_link_instances(
+ instance_ids=['id1', 'id2'],
+ filters={'GroupId': 'sg-9b4343fe'},
+ dry_run=True,
+ next_token='next_token',
+ max_results=10
+ )
+ self.assert_request_parameters({
+ 'Action': 'DescribeClassicLinkInstances',
+ 'InstanceId.1': 'id1',
+ 'InstanceId.2': 'id2',
+ 'Filter.1.Name': 'GroupId',
+ 'Filter.1.Value.1': 'sg-9b4343fe',
+ 'DryRun': 'true',
+ 'NextToken': 'next_token',
+ 'MaxResults': 10},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp', 'Version'])
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/ec2/test_reservedinstance.py b/tests/unit/ec2/test_reservedinstance.py
new file mode 100644
index 00000000..c1ec688b
--- /dev/null
+++ b/tests/unit/ec2/test_reservedinstance.py
@@ -0,0 +1,44 @@
+from tests.unit import AWSMockServiceTestCase
+from boto.ec2.connection import EC2Connection
+from boto.ec2.reservedinstance import ReservedInstance
+
+
+class TestReservedInstancesSet(AWSMockServiceTestCase):
+ connection_class = EC2Connection
+
+ def default_body(self):
+ return b"""
+<reservedInstancesSet>
+ <item>
+ <reservedInstancesId>ididididid</reservedInstancesId>
+ <instanceType>t1.micro</instanceType>
+ <start>2014-05-03T14:10:10.944Z</start>
+ <end>2014-05-03T14:10:11.000Z</end>
+ <duration>64800000</duration>
+ <fixedPrice>62.5</fixedPrice>
+ <usagePrice>0.0</usagePrice>
+ <instanceCount>5</instanceCount>
+ <productDescription>Linux/UNIX</productDescription>
+ <state>retired</state>
+ <instanceTenancy>default</instanceTenancy>
+ <currencyCode>USD</currencyCode>
+ <offeringType>Heavy Utilization</offeringType>
+ <recurringCharges>
+ <item>
+ <frequency>Hourly</frequency>
+ <amount>0.005</amount>
+ </item>
+ </recurringCharges>
+ </item>
+</reservedInstancesSet>"""
+
+ def test_get_all_reserved_instaces(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.get_all_reserved_instances()
+
+ self.assertEqual(len(response), 1)
+ self.assertTrue(isinstance(response[0], ReservedInstance))
+ self.assertEquals(response[0].id, 'ididididid')
+ self.assertEquals(response[0].instance_count, 5)
+ self.assertEquals(response[0].start, '2014-05-03T14:10:10.944Z')
+ self.assertEquals(response[0].end, '2014-05-03T14:10:11.000Z')
diff --git a/tests/unit/emr/test_connection.py b/tests/unit/emr/test_connection.py
index c60f04a4..84d3ff8f 100644
--- a/tests/unit/emr/test_connection.py
+++ b/tests/unit/emr/test_connection.py
@@ -27,7 +27,7 @@ from tests.unit import AWSMockServiceTestCase
from boto.emr.connection import EmrConnection
from boto.emr.emrobject import BootstrapAction, BootstrapActionList, \
- ClusterStatus, ClusterSummaryList, \
+ ClusterStateChangeReason, ClusterStatus, ClusterSummaryList, \
ClusterSummary, ClusterTimeline, InstanceInfo, \
InstanceList, InstanceGroupInfo, \
InstanceGroup, InstanceGroupList, JobFlow, \
@@ -62,6 +62,7 @@ class TestListClusters(AWSMockServiceTestCase):
</Timeline>
</Status>
<Name>analytics test</Name>
+ <NormalizedInstanceHours>10</NormalizedInstanceHours>
</member>
<member>
<Id>j-aaaaaaaaaaaab</Id>
@@ -78,6 +79,7 @@ class TestListClusters(AWSMockServiceTestCase):
</Timeline>
</Status>
<Name>test job</Name>
+ <NormalizedInstanceHours>20</NormalizedInstanceHours>
</member>
</Clusters>
</ListClustersResult>
@@ -99,10 +101,13 @@ class TestListClusters(AWSMockServiceTestCase):
self.assertTrue(isinstance(response, ClusterSummaryList))
self.assertEqual(len(response.clusters), 2)
+
self.assertTrue(isinstance(response.clusters[0], ClusterSummary))
self.assertEqual(response.clusters[0].name, 'analytics test')
+ self.assertEqual(response.clusters[0].normalizedinstancehours, '10')
self.assertTrue(isinstance(response.clusters[0].status, ClusterStatus))
+ self.assertEqual(response.clusters[0].status.state, 'TERMINATED')
self.assertTrue(isinstance(response.clusters[0].status.timeline, ClusterTimeline))
@@ -110,6 +115,9 @@ class TestListClusters(AWSMockServiceTestCase):
self.assertEqual(response.clusters[0].status.timeline.readydatetime, '2014-01-24T01:25:26Z')
self.assertEqual(response.clusters[0].status.timeline.enddatetime, '2014-01-24T02:19:46Z')
+ self.assertTrue(isinstance(response.clusters[0].status.statechangereason, ClusterStateChangeReason))
+ self.assertEqual(response.clusters[0].status.statechangereason.code, 'USER_REQUEST')
+ self.assertEqual(response.clusters[0].status.statechangereason.message, 'Terminated by user request')
def test_list_clusters_created_before(self):
self.set_http_response(status_code=200)
@@ -558,6 +566,9 @@ class TestDescribeCluster(AWSMockServiceTestCase):
</member>
</Applications>
<TerminationProtected>false</TerminationProtected>
+ <MasterPublicDnsName>ec2-184-0-0-1.us-west-1.compute.amazonaws.com</MasterPublicDnsName>
+ <NormalizedInstanceHours>10</NormalizedInstanceHours>
+ <ServiceRole>my-service-role</ServiceRole>
</Cluster>
</DescribeClusterResult>
<ResponseMetadata>
@@ -587,6 +598,9 @@ class TestDescribeCluster(AWSMockServiceTestCase):
self.assertEqual(response.status.state, 'TERMINATED')
self.assertEqual(response.applications[0].name, 'hadoop')
self.assertEqual(response.applications[0].version, '1.0.3')
+ self.assertEqual(response.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com')
+ self.assertEqual(response.normalizedinstancehours, '10')
+ self.assertEqual(response.servicerole, 'my-service-role')
self.assert_request_parameters({
'Action': 'DescribeCluster',
diff --git a/tests/unit/emr/test_emr_responses.py b/tests/unit/emr/test_emr_responses.py
index 636e8023..dda6b928 100644
--- a/tests/unit/emr/test_emr_responses.py
+++ b/tests/unit/emr/test_emr_responses.py
@@ -42,6 +42,21 @@ JOB_FLOW_EXAMPLE = b"""
<StartDateTime>2009-01-28T21:49:16Z</StartDateTime>
<State>STARTING</State>
</ExecutionStatusDetail>
+ <BootstrapActions>
+ <member>
+ <BootstrapActionConfig>
+ <ScriptBootstrapAction>
+ <Args/>
+ <Path>s3://elasticmapreduce/libs/hue/install-hue</Path>
+ </ScriptBootstrapAction>
+ <Name>Install Hue</Name>
+ </BootstrapActionConfig>
+ </member>
+ </BootstrapActions>
+ <VisibleToAllUsers>true</VisibleToAllUsers>
+ <SupportedProducts>
+ <member>Hue</member>
+ </SupportedProducts>
<Name>MyJobFlowName</Name>
<LogUri>mybucket/subdir/</LogUri>
<Steps>
diff --git a/tests/unit/glacier/test_response.py b/tests/unit/glacier/test_response.py
new file mode 100644
index 00000000..1f75f64b
--- /dev/null
+++ b/tests/unit/glacier/test_response.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from tests.unit import AWSMockServiceTestCase
+from boto.glacier.layer1 import Layer1
+from boto.glacier.response import GlacierResponse
+
+class TestResponse(AWSMockServiceTestCase):
+ connection_class = Layer1
+
+ def test_204_body_isnt_passed_to_json(self):
+ response = self.create_response(status_code=204,header=[('Content-Type','application/json')])
+ result = GlacierResponse(response,response.getheaders())
+ self.assertEquals(result.status, response.status)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/iam/test_connection.py b/tests/unit/iam/test_connection.py
index e1a521ff..5c760811 100644
--- a/tests/unit/iam/test_connection.py
+++ b/tests/unit/iam/test_connection.py
@@ -393,3 +393,89 @@ class TestCreateVirtualMFADevice(AWSMockServiceTestCase):
['create_virtual_mfa_device_result']
['virtual_mfa_device']
['serial_number'], 'arn:aws:iam::123456789012:mfa/ExampleName')
+
+class TestGetAccountPasswordPolicy(AWSMockServiceTestCase):
+ connection_class = IAMConnection
+
+ def default_body(self):
+ return b"""
+ <GetAccountPasswordPolicyResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
+ <GetAccountPasswordPolicyResult>
+ <PasswordPolicy>
+ <AllowUsersToChangePassword>true</AllowUsersToChangePassword>
+ <RequireUppercaseCharacters>true</RequireUppercaseCharacters>
+ <RequireSymbols>true</RequireSymbols>
+ <ExpirePasswords>false</ExpirePasswords>
+ <PasswordReusePrevention>12</PasswordReusePrevention>
+ <RequireLowercaseCharacters>true</RequireLowercaseCharacters>
+ <MaxPasswordAge>90</MaxPasswordAge>
+ <HardExpiry>false</HardExpiry>
+ <RequireNumbers>true</RequireNumbers>
+ <MinimumPasswordLength>12</MinimumPasswordLength>
+ </PasswordPolicy>
+ </GetAccountPasswordPolicyResult>
+ <ResponseMetadata>
+ <RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
+ </ResponseMetadata>
+ </GetAccountPasswordPolicyResponse>
+ """
+
+ def test_get_account_password_policy(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.get_account_password_policy()
+
+ self.assert_request_parameters(
+ {
+ 'Action': 'GetAccountPasswordPolicy',
+ },
+ ignore_params_values=['Version'])
+ self.assertEquals(response['get_account_password_policy_response']
+ ['get_account_password_policy_result']['password_policy']
+ ['minimum_password_length'], '12')
+
+
+class TestUpdateAccountPasswordPolicy(AWSMockServiceTestCase):
+ connection_class = IAMConnection
+
+ def default_body(self):
+ return b"""
+ <UpdateAccountPasswordPolicyResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
+ <ResponseMetadata>
+ <RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
+ </ResponseMetadata>
+ </UpdateAccountPasswordPolicyResponse>
+ """
+
+ def test_update_account_password_policy(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.update_account_password_policy(minimum_password_length=88)
+
+ self.assert_request_parameters(
+ {
+ 'Action': 'UpdateAccountPasswordPolicy',
+ 'MinimumPasswordLength': 88
+ },
+ ignore_params_values=['Version'])
+
+
+class TestDeleteAccountPasswordPolicy(AWSMockServiceTestCase):
+ connection_class = IAMConnection
+
+ def default_body(self):
+ return b"""
+ <DeleteAccountPasswordPolicyResponse>
+ <ResponseMetadata>
+ <RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
+ </ResponseMetadata>
+ </DeleteAccountPasswordPolicyResponse>
+ """
+
+ def test_delete_account_password_policy(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.delete_account_password_policy()
+
+ self.assert_request_parameters(
+ {
+ 'Action': 'DeleteAccountPasswordPolicy'
+ },
+ ignore_params_values=['Version'])
diff --git a/tests/unit/mws/test_connection.py b/tests/unit/mws/test_connection.py
index e3830492..fade578d 100644
--- a/tests/unit/mws/test_connection.py
+++ b/tests/unit/mws/test_connection.py
@@ -192,6 +192,14 @@ doc/2009-01-01/">
self.assertTrue('throttled' in str(err.reason))
self.assertEqual(int(err.status), 200)
+
+ def test_sandboxify(self):
+ # Create one-off connection class that has self._sandboxed = True
+ conn = MWSConnection(https_connection_factory=self.https_connection_factory,
+ aws_access_key_id='aws_access_key_id',
+ aws_secret_access_key='aws_secret_access_key',
+ sandbox=True)
+ self.assertEqual(conn._sandboxify('a/bogus/path'), 'a/bogus_Sandbox/path')
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/provider/test_provider.py b/tests/unit/provider/test_provider.py
index 4decd4a6..89092253 100644
--- a/tests/unit/provider/test_provider.py
+++ b/tests/unit/provider/test_provider.py
@@ -104,6 +104,17 @@ class TestProvider(unittest.TestCase):
self.assertEqual(p.secret_key, 'env_secret_key')
self.assertEqual(p.security_token, 'env_security_token')
+ def test_no_credentials_provided(self):
+ p = provider.Provider(
+ 'aws',
+ provider.NO_CREDENTIALS_PROVIDED,
+ provider.NO_CREDENTIALS_PROVIDED,
+ provider.NO_CREDENTIALS_PROVIDED
+ )
+ self.assertEqual(p.access_key, provider.NO_CREDENTIALS_PROVIDED)
+ self.assertEqual(p.secret_key, provider.NO_CREDENTIALS_PROVIDED)
+ self.assertEqual(p.security_token, provider.NO_CREDENTIALS_PROVIDED)
+
def test_config_profile_values_are_used(self):
self.config = {
'profile dev': {
diff --git a/tests/unit/route53/test_connection.py b/tests/unit/route53/test_connection.py
index 3c696c7a..d1a80152 100644
--- a/tests/unit/route53/test_connection.py
+++ b/tests/unit/route53/test_connection.py
@@ -101,6 +101,16 @@ class TestRoute53Connection(AWSMockServiceTestCase):
# Unpatch.
self.service_connection._retry_handler = orig_retry
+ def test_private_zone_invalid_vpc_400(self):
+ self.set_http_response(status_code=400, header=[
+ ['Code', 'InvalidVPCId'],
+ ])
+
+ with self.assertRaises(DNSServerError) as err:
+ self.service_connection.create_hosted_zone("example.com.",
+ private_zone=True)
+ self.assertTrue('It failed.' in str(err.exception))
+
@attr(route53=True)
class TestCreateZoneRoute53(AWSMockServiceTestCase):
@@ -118,6 +128,7 @@ class TestCreateZoneRoute53(AWSMockServiceTestCase):
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
<Config>
<Comment></Comment>
+ <PrivateZone>false</PrivateZone>
</Config>
<ResourceRecordSetCount>2</ResourceRecordSetCount>
</HostedZone>
@@ -147,10 +158,76 @@ class TestCreateZoneRoute53(AWSMockServiceTestCase):
def test_create_hosted_zone(self):
self.set_http_response(status_code=201)
- response = self.service_connection.create_hosted_zone("example.com.", "my_ref", "this is a comment")
+ response = self.service_connection.create_hosted_zone("example.com.",
+ "my_ref",
+ "a comment")
+
+ self.assertEqual(response['CreateHostedZoneResponse']
+ ['DelegationSet']['NameServers'],
+ ['ns-100.awsdns-01.com',
+ 'ns-1000.awsdns-01.co.uk',
+ 'ns-1000.awsdns-01.org',
+ 'ns-900.awsdns-01.net'])
+
+ self.assertEqual(response['CreateHostedZoneResponse']
+ ['HostedZone']['Config']['PrivateZone'],
+ u'false')
+
+
+@attr(route53=True)
+class TestCreatePrivateZoneRoute53(AWSMockServiceTestCase):
+ connection_class = Route53Connection
+
+ def setUp(self):
+ super(TestCreatePrivateZoneRoute53, self).setUp()
+
+ def default_body(self):
+ return b"""
+<CreateHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
+ <HostedZone>
+ <Id>/hostedzone/Z11111</Id>
+ <Name>example.com.</Name>
+ <VPC>
+ <VPCId>vpc-1a2b3c4d</VPCId>
+ <VPCRegion>us-east-1</VPCRegion>
+ </VPC>
+ <CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
+ <Config>
+ <Comment></Comment>
+ <PrivateZone>true</PrivateZone>
+ </Config>
+ <ResourceRecordSetCount>2</ResourceRecordSetCount>
+ </HostedZone>
+ <ChangeInfo>
+ <Id>/change/C1111111111111</Id>
+ <Status>PENDING</Status>
+ <SubmittedAt>2014-02-02T10:19:29.928Z</SubmittedAt>
+ </ChangeInfo>
+ <DelegationSet>
+ <NameServers>
+ <NameServer>ns-100.awsdns-01.com</NameServer>
+ <NameServer>ns-1000.awsdns-01.co.uk</NameServer>
+ <NameServer>ns-1000.awsdns-01.org</NameServer>
+ <NameServer>ns-900.awsdns-01.net</NameServer>
+ </NameServers>
+ </DelegationSet>
+</CreateHostedZoneResponse>
+ """
+
+ def test_create_private_zone(self):
+ self.set_http_response(status_code=201)
+ r = self.service_connection.create_hosted_zone("example.com.",
+ private_zone=True,
+ vpc_id='vpc-1a2b3c4d',
+ vpc_region='us-east-1'
+ )
- self.assertEqual(response['CreateHostedZoneResponse']['DelegationSet']['NameServers'],
- ['ns-100.awsdns-01.com', 'ns-1000.awsdns-01.co.uk', 'ns-1000.awsdns-01.org', 'ns-900.awsdns-01.net'])
+ self.assertEqual(r['CreateHostedZoneResponse']['HostedZone']
+ ['Config']['PrivateZone'], u'true')
+ self.assertEqual(r['CreateHostedZoneResponse']['HostedZone']
+ ['VPC']['VPCId'], u'vpc-1a2b3c4d')
+ self.assertEqual(r['CreateHostedZoneResponse']['HostedZone']
+ ['VPC']['VPCRegion'], u'us-east-1')
@attr(route53=True)
@@ -243,10 +320,16 @@ class TestGetHostedZoneRoute53(AWSMockServiceTestCase):
self.set_http_response(status_code=201)
response = self.service_connection.get_hosted_zone("Z1111")
- self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Id'], '/hostedzone/Z1111')
- self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Name'], 'example.com.')
- self.assertEqual(response['GetHostedZoneResponse']['DelegationSet']['NameServers'],
- ['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com', 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk'])
+ self.assertEqual(response['GetHostedZoneResponse']
+ ['HostedZone']['Id'],
+ '/hostedzone/Z1111')
+ self.assertEqual(response['GetHostedZoneResponse']
+ ['HostedZone']['Name'],
+ 'example.com.')
+ self.assertEqual(response['GetHostedZoneResponse']
+ ['DelegationSet']['NameServers'],
+ ['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com',
+ 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk'])
@attr(route53=True)
@@ -336,7 +419,9 @@ class TestGetAllRRSetsRoute53(AWSMockServiceTestCase):
def test_get_all_rr_sets(self):
self.set_http_response(status_code=200)
- response = self.service_connection.get_all_rrsets("Z1111", "A", "example.com.")
+ response = self.service_connection.get_all_rrsets("Z1111",
+ "A",
+ "example.com.")
self.assertIn(self.actual_request.path,
("/2013-04-01/hostedzone/Z1111/rrset?type=A&name=example.com.",
@@ -530,6 +615,36 @@ class TestCreateHealthCheckRoute53IpAddress(AWSMockServiceTestCase):
@attr(route53=True)
+class TestGetCheckerIpRanges(AWSMockServiceTestCase):
+ connection_class = Route53Connection
+
+ def default_body(self):
+ return b"""
+<GetCheckerIpRangesResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
+ <CheckerIpRanges>
+ <member>54.183.255.128/26</member>
+ <member>54.228.16.0/26</member>
+ <member>54.232.40.64/26</member>
+ <member>177.71.207.128/26</member>
+ <member>176.34.159.192/26</member>
+ </CheckerIpRanges>
+</GetCheckerIpRangesResponse>
+ """
+
+ def test_get_checker_ip_ranges(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.get_checker_ip_ranges()
+ ip_ranges = response['GetCheckerIpRangesResponse']['CheckerIpRanges']
+
+ self.assertEqual(len(ip_ranges), 5)
+ self.assertIn('54.183.255.128/26', ip_ranges)
+ self.assertIn('54.228.16.0/26', ip_ranges)
+ self.assertIn('54.232.40.64/26', ip_ranges)
+ self.assertIn('177.71.207.128/26', ip_ranges)
+ self.assertIn('176.34.159.192/26', ip_ranges)
+
+
+@attr(route53=True)
class TestCreateHealthCheckRoute53FQDN(AWSMockServiceTestCase):
connection_class = Route53Connection
diff --git a/tests/unit/s3/test_key.py b/tests/unit/s3/test_key.py
index 6f6f6430..26e2fc82 100644
--- a/tests/unit/s3/test_key.py
+++ b/tests/unit/s3/test_key.py
@@ -103,6 +103,54 @@ class TestS3Key(AWSMockServiceTestCase):
k.set_contents_from_string('test')
k.bucket.list.assert_not_called()
+ def test_change_storage_class(self):
+ self.set_http_response(status_code=200)
+ b = Bucket(self.service_connection, 'mybucket')
+ k = b.get_key('fookey')
+
+ # Mock out Key.copy so we can record calls to it
+ k.copy = mock.MagicMock()
+ # Mock out the bucket so we don't actually need to have fake responses
+ k.bucket = mock.MagicMock()
+ k.bucket.name = 'mybucket'
+
+ self.assertEqual(k.storage_class, 'STANDARD')
+
+ # The default change_storage_class call should result in a copy to our
+ # bucket
+ k.change_storage_class('REDUCED_REDUNDANCY')
+ k.copy.assert_called_with(
+ 'mybucket',
+ 'fookey',
+ reduced_redundancy=True,
+ preserve_acl=True,
+ validate_dst_bucket=True,
+ )
+
+ def test_change_storage_class_new_bucket(self):
+ self.set_http_response(status_code=200)
+ b = Bucket(self.service_connection, 'mybucket')
+ k = b.get_key('fookey')
+
+ # Mock out Key.copy so we can record calls to it
+ k.copy = mock.MagicMock()
+ # Mock out the bucket so we don't actually need to have fake responses
+ k.bucket = mock.MagicMock()
+ k.bucket.name = 'mybucket'
+
+ self.assertEqual(k.storage_class, 'STANDARD')
+ # Specifying a different dst_bucket should result in a copy to the new
+ # bucket
+ k.copy.reset_mock()
+ k.change_storage_class('REDUCED_REDUNDANCY', dst_bucket='yourbucket')
+ k.copy.assert_called_with(
+ 'yourbucket',
+ 'fookey',
+ reduced_redundancy=True,
+ preserve_acl=True,
+ validate_dst_bucket=True,
+ )
+
def counter(fn):
def _wrapper(*args, **kwargs):
diff --git a/tests/unit/ses/test_identity.py b/tests/unit/ses/test_identity.py
index 014d68ab..1187514a 100644
--- a/tests/unit/ses/test_identity.py
+++ b/tests/unit/ses/test_identity.py
@@ -39,7 +39,7 @@ xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<GetIdentityDkimAttributesResult>
<DkimAttributes>
<entry>
- <key>amazon.com</key>
+ <key>test@amazon.com</key>
<value>
<DkimEnabled>true</DkimEnabled>
<DkimVerificationStatus>Success</DkimVerificationStatus>
@@ -50,6 +50,13 @@ xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
</DkimTokens>
</value>
</entry>
+ <entry>
+ <key>secondtest@amazon.com</key>
+ <value>
+ <DkimEnabled>false</DkimEnabled>
+ <DkimVerificationStatus>NotStarted</DkimVerificationStatus>
+ </value>
+ </entry>
</DkimAttributes>
</GetIdentityDkimAttributesResult>
<ResponseMetadata>
@@ -61,13 +68,17 @@ xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
self.set_http_response(status_code=200)
response = self.service_connection\
- .get_identity_dkim_attributes(['test@amazon.com'])
+ .get_identity_dkim_attributes(['test@amazon.com', 'secondtest@amazon.com'])
response = response['GetIdentityDkimAttributesResponse']
result = response['GetIdentityDkimAttributesResult']
- attributes = result['DkimAttributes']['entry']['value']
+
+ first_entry = result['DkimAttributes'][0]
+ entry_key = first_entry['key']
+ attributes = first_entry['value']
tokens = attributes['DkimTokens']
+ self.assertEqual(entry_key, 'test@amazon.com')
self.assertEqual(ListElement, type(tokens))
self.assertEqual(3, len(tokens))
self.assertEqual('vvjuipp74whm76gqoni7qmwwn4w4qusjiainivf6f',
@@ -77,6 +88,16 @@ xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
self.assertEqual('wrqplteh7oodxnad7hsl4mixg2uavzneazxv5sxi2',
tokens[2])
+ second_entry = result['DkimAttributes'][1]
+ entry_key = second_entry['key']
+ attributes = second_entry['value']
+ dkim_enabled = attributes['DkimEnabled']
+ dkim_verification_status = attributes['DkimVerificationStatus']
+
+ self.assertEqual(entry_key, 'secondtest@amazon.com')
+ self.assertEqual(dkim_enabled, 'false')
+ self.assertEqual(dkim_verification_status, 'NotStarted')
+
class TestSESSetIdentityNotificationTopic(AWSMockServiceTestCase):
connection_class = SESConnection
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index ec9fe997..1ad3df6e 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -387,6 +387,26 @@ class TestAWSQueryConnectionSimple(TestAWSQueryConnection):
'POST')
self.assertEqual(resp.read(), b"{'test': 'success'}")
+ def test_unhandled_exception(self):
+ HTTPretty.register_uri(HTTPretty.POST,
+ 'https://%s/temp_exception/' % self.region.endpoint,
+ responses=[])
+
+ def fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None):
+ raise socket.timeout('fake error')
+
+ socket.create_connection = fake_connection
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+ conn.num_retries = 0
+ with self.assertRaises(socket.error):
+ resp = conn.make_request('myCmd1',
+ {'par1': 'foo', 'par2': 'baz'},
+ '/temp_exception/',
+ 'POST')
+
def test_connection_close(self):
"""Check connection re-use after close header is received"""
HTTPretty.register_uri(HTTPretty.POST,
diff --git a/tests/unit/vpc/test_vpc.py b/tests/unit/vpc/test_vpc.py
index 3c02c29c..f8adaf8a 100644
--- a/tests/unit/vpc/test_vpc.py
+++ b/tests/unit/vpc/test_vpc.py
@@ -3,6 +3,7 @@ from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, VPC
+from boto.ec2.securitygroup import SecurityGroup
DESCRIBE_VPCS = b'''<?xml version="1.0" encoding="UTF-8"?>
@@ -138,5 +139,229 @@ class TestModifyVpcAttribute(AWSMockServiceTestCase):
'Version'])
self.assertEquals(api_response, True)
+
+class TestGetAllClassicLinkVpc(AWSMockServiceTestCase):
+
+ connection_class = VPCConnection
+
+ def default_body(self):
+ return b"""
+ <DescribeVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
+ <requestId>2484655d-d669-4950-bf55-7ba559805d36</requestId>
+ <vpcSet>
+ <item>
+ <vpcId>vpc-6226ab07</vpcId>
+ <classicLinkEnabled>false</classicLinkEnabled>
+ <tagSet>
+ <item>
+ <key>Name</key>
+ <value>hello</value>[
+ </item>
+ </tagSet>
+ </item>
+ <item>
+ <vpcId>vpc-9d24f8f8</vpcId>
+ <classicLinkEnabled>true</classicLinkEnabled>
+ <tagSet/>
+ </item>
+ </vpcSet>
+ </DescribeVpcClassicLinkResponse>
+ """
+
+ def test_get_all_classic_link_vpcs(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.get_all_classic_link_vpcs()
+ self.assertEqual(len(response), 2)
+ vpc = response[0]
+ self.assertEqual(vpc.id, 'vpc-6226ab07')
+ self.assertEqual(vpc.classic_link_enabled, 'false')
+ self.assertEqual(vpc.tags, {'Name': 'hello'})
+
+ def test_get_all_classic_link_vpcs_params(self):
+ self.set_http_response(status_code=200)
+ self.service_connection.get_all_classic_link_vpcs(
+ vpc_ids=['id1', 'id2'],
+ filters={'GroupId': 'sg-9b4343fe'},
+ dry_run=True,
+ )
+ self.assert_request_parameters({
+ 'Action': 'DescribeVpcClassicLink',
+ 'VpcId.1': 'id1',
+ 'VpcId.2': 'id2',
+ 'Filter.1.Name': 'GroupId',
+ 'Filter.1.Value.1': 'sg-9b4343fe',
+ 'DryRun': 'true'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp', 'Version'])
+
+
+class TestVpcClassicLink(AWSMockServiceTestCase):
+ connection_class = VPCConnection
+
+ def setUp(self):
+ super(TestVpcClassicLink, self).setUp()
+ self.vpc = VPC(self.service_connection)
+ self.vpc_id = 'myid'
+ self.vpc.id = self.vpc_id
+
+
+class TestAttachClassicLinkVpc(TestVpcClassicLink):
+ def default_body(self):
+ return b"""
+ <AttachClassicLinkVpcResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
+ <requestId>88673bdf-cd16-40bf-87a1-6132fec47257</requestId>
+ <return>true</return>
+ </AttachClassicLinkVpcResponse>
+ """
+
+ def test_attach_classic_link_instance_string_groups(self):
+ groups = ['sg-foo', 'sg-bar']
+
+ self.set_http_response(status_code=200)
+ response = self.vpc.attach_classic_instance(
+ instance_id='my_instance_id',
+ groups=groups,
+ dry_run=True
+ )
+ self.assertTrue(response)
+ self.assert_request_parameters({
+ 'Action': 'AttachClassicLinkVpc',
+ 'VpcId': self.vpc_id,
+ 'InstanceId': 'my_instance_id',
+ 'SecurityGroupId.1': 'sg-foo',
+ 'SecurityGroupId.2': 'sg-bar',
+ 'DryRun': 'true'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp', 'Version'])
+
+ def test_attach_classic_link_instance_object_groups(self):
+ sec_group_1 = SecurityGroup()
+ sec_group_1.id = 'sg-foo'
+
+ sec_group_2 = SecurityGroup()
+ sec_group_2.id = 'sg-bar'
+
+ groups = [sec_group_1, sec_group_2]
+
+ self.set_http_response(status_code=200)
+ response = self.vpc.attach_classic_instance(
+ instance_id='my_instance_id',
+ groups=groups,
+ dry_run=True
+ )
+ self.assertTrue(response)
+ self.assert_request_parameters({
+ 'Action': 'AttachClassicLinkVpc',
+ 'VpcId': self.vpc_id,
+ 'InstanceId': 'my_instance_id',
+ 'SecurityGroupId.1': 'sg-foo',
+ 'SecurityGroupId.2': 'sg-bar',
+ 'DryRun': 'true'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp', 'Version'])
+
+
+class TestDetachClassicLinkVpc(TestVpcClassicLink):
+ def default_body(self):
+ return b"""
+ <DetachClassicLinkVpcResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
+ <requestId>5565033d-1321-4eef-b121-6aa46f152ed7</requestId>
+ <return>true</return>
+ </DetachClassicLinkVpcResponse>
+ """
+
+ def test_detach_classic_link_instance(self):
+ self.set_http_response(status_code=200)
+ response = self.vpc.detach_classic_instance(
+ instance_id='my_instance_id',
+ dry_run=True
+ )
+ self.assertTrue(response)
+ self.assert_request_parameters({
+ 'Action': 'DetachClassicLinkVpc',
+ 'VpcId': self.vpc_id,
+ 'InstanceId': 'my_instance_id',
+ 'DryRun': 'true'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp', 'Version'])
+
+
+class TestEnableClassicLinkVpc(TestVpcClassicLink):
+ def default_body(self):
+ return b"""
+ <EnableVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
+ <requestId>4ab2b2b3-a267-4366-a070-bab853b5927d</requestId>
+ <return>true</return>
+ </EnableVpcClassicLinkResponse>
+ """
+
+ def test_enable_classic_link(self):
+ self.set_http_response(status_code=200)
+ response = self.vpc.enable_classic_link(
+ dry_run=True
+ )
+ self.assertTrue(response)
+ self.assert_request_parameters({
+ 'Action': 'EnableVpcClassicLink',
+ 'VpcId': self.vpc_id,
+ 'DryRun': 'true'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp', 'Version'])
+
+
+class TestDisableClassicLinkVpc(TestVpcClassicLink):
+ def default_body(self):
+ return b"""
+ <DisableVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
+ <requestId>4ab2b2b3-a267-4366-a070-bab853b5927d</requestId>
+ <return>true</return>
+ </DisableVpcClassicLinkResponse>
+ """
+
+ def test_enable_classic_link(self):
+ self.set_http_response(status_code=200)
+ response = self.vpc.disable_classic_link(
+ dry_run=True
+ )
+ self.assertTrue(response)
+ self.assert_request_parameters({
+ 'Action': 'DisableVpcClassicLink',
+ 'VpcId': self.vpc_id,
+ 'DryRun': 'true'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp', 'Version'])
+
+
+class TestUpdateClassicLinkVpc(TestVpcClassicLink):
+ def default_body(self):
+ return b"""
+ <DescribeVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
+ <requestId>2484655d-d669-4950-bf55-7ba559805d36</requestId>
+ <vpcSet>
+ <item>
+ <vpcId>myid</vpcId>
+ <classicLinkEnabled>true</classicLinkEnabled>
+ <tagSet/>
+ </item>
+ </vpcSet>
+ </DescribeVpcClassicLinkResponse>
+ """
+
+ def test_vpc_update_classic_link_enabled(self):
+ self.vpc.classic_link_enabled = False
+ self.set_http_response(status_code=200)
+ self.vpc.update_classic_link_enabled(
+ dry_run=True,
+ validate=True
+ )
+ self.assert_request_parameters({
+ 'Action': 'DescribeVpcClassicLink',
+ 'VpcId.1': self.vpc_id,
+ 'DryRun': 'true'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp', 'Version'])
+ self.assertEqual(self.vpc.classic_link_enabled, 'true')
+
+
if __name__ == '__main__':
unittest.main()