summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel G. Taylor <danielgtaylor@gmail.com>2013-12-19 16:12:26 -0800
committerDaniel G. Taylor <danielgtaylor@gmail.com>2013-12-19 16:12:26 -0800
commit87a45cd2b7b5557e22722da15f22c915ce2bec66 (patch)
tree6284f740019e5457f00721d34fc0f6e0002ad4ee
parentb98f85c8dcb5294b6764b87d60acc3344e18a631 (diff)
parentd24ca8c9c9a8468cfcad46d579f386d01b075837 (diff)
downloadboto-87a45cd2b7b5557e22722da15f22c915ce2bec66.tar.gz
Merge branch 'release-2.21.0'2.21.0
-rw-r--r--README.rst4
-rw-r--r--boto/__init__.py2
-rw-r--r--boto/auth.py232
-rw-r--r--boto/beanstalk/layer1.py58
-rw-r--r--boto/cloudformation/template.py4
-rw-r--r--boto/cloudfront/identity.py17
-rw-r--r--boto/cloudfront/signers.py13
-rw-r--r--boto/cloudtrail/layer1.py273
-rw-r--r--boto/dynamodb2/fields.py130
-rw-r--r--boto/dynamodb2/table.py126
-rw-r--r--boto/ec2/autoscale/__init__.py2
-rw-r--r--boto/ec2/connection.py8
-rw-r--r--boto/ec2/group.py7
-rw-r--r--boto/ec2/image.py6
-rw-r--r--boto/ec2/instance.py5
-rw-r--r--boto/ec2/snapshot.py5
-rw-r--r--boto/ec2/volume.py4
-rw-r--r--boto/elasticache/layer1.py2
-rw-r--r--boto/elastictranscoder/layer1.py46
-rw-r--r--boto/emr/connection.py57
-rw-r--r--boto/emr/emrobject.py4
-rw-r--r--boto/exception.py26
-rw-r--r--boto/mturk/connection.py2
-rw-r--r--boto/mturk/layoutparam.py6
-rw-r--r--boto/mturk/notification.py4
-rw-r--r--boto/mturk/price.py4
-rw-r--r--boto/mturk/qualification.py10
-rw-r--r--boto/mturk/question.py6
-rw-r--r--boto/opsworks/layer1.py549
-rw-r--r--boto/pyami/scriptbase.py4
-rw-r--r--boto/rds/__init__.py30
-rw-r--r--boto/s3/acl.py14
-rw-r--r--boto/s3/bucket.py122
-rw-r--r--boto/s3/bucketlistresultset.py49
-rw-r--r--boto/s3/bucketlogging.py6
-rw-r--r--boto/s3/connection.py4
-rw-r--r--boto/s3/deletemarker.py4
-rw-r--r--boto/s3/key.py17
-rw-r--r--boto/s3/multipart.py13
-rw-r--r--boto/s3/user.py6
-rw-r--r--boto/sdb/db/manager/xmlmanager.py18
-rw-r--r--boto/sdb/domain.py36
-rw-r--r--boto/sdb/queryresultset.py8
-rw-r--r--boto/services/result.py8
-rw-r--r--boto/services/submit.py4
-rw-r--r--boto/sqs/bigmessage.py119
-rw-r--r--boto/sqs/message.py18
-rw-r--r--boto/sqs/queue.py6
-rw-r--r--boto/support/layer1.py202
-rw-r--r--boto/swf/layer1_decisions.py2
-rw-r--r--docs/source/index.rst2
-rw-r--r--docs/source/ref/index.rst1
-rw-r--r--docs/source/ref/kinesis.rst26
-rw-r--r--docs/source/releasenotes/v2.21.0.rst32
-rw-r--r--tests/integration/dynamodb2/test_highlevel.py39
-rw-r--r--tests/integration/ec2/autoscale/test_connection.py18
-rw-r--r--tests/integration/opsworks/test_layer1.py4
-rw-r--r--tests/integration/sqs/test_bigmessage.py80
-rw-r--r--tests/unit/auth/test_sigv4.py204
-rw-r--r--tests/unit/beanstalk/test_layer1.py30
-rw-r--r--tests/unit/dynamodb2/test_table.py221
-rw-r--r--tests/unit/ec2/test_connection.py24
-rw-r--r--tests/unit/elasticache/test_api_interface.py4
-rw-r--r--tests/unit/emr/test_connection.py119
-rw-r--r--tests/unit/s3/test_bucket.py51
-rw-r--r--tests/unit/s3/test_connection.py50
-rw-r--r--tests/unit/sqs/test_message.py28
67 files changed, 2719 insertions, 516 deletions
diff --git a/README.rst b/README.rst
index b51d0759..08d51ab9 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.20.1
+boto 2.21.0
-Released: 13-December-2013
+Released: 19-December-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
diff --git a/boto/__init__.py b/boto/__init__.py
index c145cf5d..344306f6 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -36,7 +36,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.20.1'
+__version__ = '2.21.0'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s Python/%s %s/%s' % (
diff --git a/boto/auth.py b/boto/auth.py
index 0d4221d6..2192bc47 100644
--- a/boto/auth.py
+++ b/boto/auth.py
@@ -39,35 +39,15 @@ import hmac
import sys
import time
import urllib
+import urlparse
import posixpath
from boto.auth_handler import AuthHandler
from boto.exception import BotoClientError
-#
-# the following is necessary because of the incompatibilities
-# between Python 2.4, 2.5, and 2.6 as well as the fact that some
-# people running 2.4 have installed hashlib as a separate module
-# this fix was provided by boto user mccormix.
-# see: http://code.google.com/p/boto/issues/detail?id=172
-# for more details.
-#
+
try:
from hashlib import sha1 as sha
from hashlib import sha256 as sha256
-
- if sys.version[:3] == "2.4":
- # we are using an hmac that expects a .new() method.
- class Faker:
- def __init__(self, which):
- self.which = which
- self.digest_size = self.which().digest_size
-
- def new(self, *args, **kwargs):
- return self.which(*args, **kwargs)
-
- sha = Faker(sha)
- sha256 = Faker(sha256)
-
except ImportError:
import sha
sha256 = None
@@ -373,10 +353,15 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
- l = sorted(['%s:%s' % (n.lower().strip(),
- ' '.join(headers_to_sign[n].strip().split()))
- for n in headers_to_sign])
- return '\n'.join(l)
+ canonical = []
+
+ for header in headers_to_sign:
+ c_name = header.lower().strip()
+ raw_value = headers_to_sign[header]
+ c_value = ' '.join(raw_value.strip().split())
+ canonical.append('%s:%s' % (c_name, c_value))
+
+ return '\n'.join(sorted(canonical))
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in headers_to_sign]
@@ -421,14 +406,11 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
scope.append('aws4_request')
return '/'.join(scope)
- def credential_scope(self, http_request):
- scope = []
- http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
- scope.append(http_request.timestamp)
- # The service_name and region_name either come from:
- # * The service_name/region_name attrs or (if these values are None)
- # * parsed from the endpoint <service>.<region>.amazonaws.com.
- parts = http_request.host.split('.')
+ def split_host_parts(self, host):
+ return host.split('.')
+
+ def determine_region_name(self, host):
+ parts = self.split_host_parts(host)
if self.region_name is not None:
region_name = self.region_name
elif len(parts) > 1:
@@ -442,11 +424,25 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
else:
region_name = parts[0]
+ return region_name
+
+ def determine_service_name(self, host):
+ parts = self.split_host_parts(host)
if self.service_name is not None:
service_name = self.service_name
else:
service_name = parts[0]
+ return service_name
+ def credential_scope(self, http_request):
+ scope = []
+ http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
+ scope.append(http_request.timestamp)
+ # The service_name and region_name either come from:
+ # * The service_name/region_name attrs or (if these values are None)
+ # * parsed from the endpoint <service>.<region>.amazonaws.com.
+ region_name = self.determine_region_name(http_request.host)
+ service_name = self.determine_service_name(http_request.host)
http_request.service_name = service_name
http_request.region_name = region_name
@@ -516,6 +512,153 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
req.headers['Authorization'] = ','.join(l)
+class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
+ """
+ Implements a variant of Version 4 HMAC authorization specific to S3.
+ """
+ capability = ['hmac-v4-s3']
+
+ def __init__(self, *args, **kwargs):
+ super(S3HmacAuthV4Handler, self).__init__(*args, **kwargs)
+
+ if self.region_name:
+ self.region_name = self.clean_region_name(self.region_name)
+
+ def clean_region_name(self, region_name):
+ if region_name.startswith('s3-'):
+ return region_name[3:]
+
+ return region_name
+
+ def canonical_uri(self, http_request):
+ # S3 does **NOT** do path normalization that SigV4 typically does.
+ # Urlencode the path, **NOT** ``auth_path`` (because vhosting).
+ path = urlparse.urlparse(http_request.path)
+ encoded = urllib.quote(path.path)
+ return encoded
+
+ def host_header(self, host, http_request):
+ port = http_request.port
+ secure = http_request.protocol == 'https'
+ if ((port == 80 and not secure) or (port == 443 and secure)):
+ return http_request.host
+ return '%s:%s' % (http_request.host, port)
+
+ def headers_to_sign(self, http_request):
+ """
+ Select the headers from the request that need to be included
+ in the StringToSign.
+ """
+ host_header_value = self.host_header(self.host, http_request)
+ headers_to_sign = {}
+ headers_to_sign = {'Host': host_header_value}
+ for name, value in http_request.headers.items():
+ lname = name.lower()
+ # Hooray for the only difference! The main SigV4 signer only does
+ # ``Host`` + ``x-amz-*``. But S3 wants pretty much everything
+ # signed, except for authorization itself.
+ if not lname in ['authorization']:
+ headers_to_sign[name] = value
+ return headers_to_sign
+
+ def determine_region_name(self, host):
+ # S3's different format(s) of representing region/service from the
+ # rest of AWS makes this hurt too.
+ #
+ # Possible domain formats:
+ # - s3.amazonaws.com (Classic)
+ # - s3-us-west-2.amazonaws.com (Specific region)
+ # - bukkit.s3.amazonaws.com (Vhosted Classic)
+ # - bukkit.s3-ap-northeast-1.amazonaws.com (Vhosted specific region)
+ # - s3.cn-north-1.amazonaws.com.cn - (Bejing region)
+ # - bukkit.s3.cn-north-1.amazonaws.com.cn - (Vhosted Bejing region)
+ parts = self.split_host_parts(host)
+
+ if self.region_name is not None:
+ region_name = self.region_name
+ else:
+ # Classic URLs - s3-us-west-2.amazonaws.com
+ if len(parts) == 3:
+ region_name = self.clean_region_name(parts[0])
+
+ # Special-case for Classic.
+ if region_name == 's3':
+ region_name = 'us-east-1'
+ else:
+ # Iterate over the parts in reverse order.
+ for offset, part in enumerate(reversed(parts)):
+ part = part.lower()
+
+ # Look for the first thing starting with 's3'.
+ # Until there's a ``.s3`` TLD, we should be OK. :P
+ if part == 's3':
+ # If it's by itself, the region is the previous part.
+ region_name = parts[-offset]
+ break
+ elif part.startswith('s3-'):
+ region_name = self.clean_region_name(part)
+ break
+
+ return region_name
+
+ def determine_service_name(self, host):
+ # Should this signing mechanism ever be used for anything else, this
+ # will fail. Consider utilizing the logic from the parent class should
+ # you find yourself here.
+ return 's3'
+
+ def mangle_path_and_params(self, req):
+ """
+ Returns a copy of the request object with fixed ``auth_path/params``
+ attributes from the original.
+ """
+ modified_req = copy.copy(req)
+
+ # Unlike the most other services, in S3, ``req.params`` isn't the only
+ # source of query string parameters.
+ # Because of the ``query_args``, we may already have a query string
+ # **ON** the ``path/auth_path``.
+ # Rip them apart, so the ``auth_path/params`` can be signed
+ # appropriately.
+ parsed_path = urlparse.urlparse(modified_req.auth_path)
+ modified_req.auth_path = parsed_path.path
+
+ if modified_req.params is None:
+ modified_req.params = {}
+
+ raw_qs = parsed_path.query
+ existing_qs = urlparse.parse_qs(
+ raw_qs,
+ keep_blank_values=True
+ )
+
+ # ``parse_qs`` will return lists. Don't do that unless there's a real,
+ # live list provided.
+ for key, value in existing_qs.items():
+ if isinstance(value, (list, tuple)):
+ if len(value) == 1:
+ existing_qs[key] = value[0]
+
+ modified_req.params.update(existing_qs)
+ return modified_req
+
+ def payload(self, http_request):
+ if http_request.headers.get('x-amz-content-sha256'):
+ return http_request.headers['x-amz-content-sha256']
+
+ return super(S3HmacAuthV4Handler, self).payload(http_request)
+
+ def add_auth(self, req, **kwargs):
+ if not 'x-amz-content-sha256' in req.headers:
+ if '_sha256' in req.headers:
+ req.headers['x-amz-content-sha256'] = req.headers.pop('_sha256')
+ else:
+ req.headers['x-amz-content-sha256'] = self.payload(req)
+
+ req = self.mangle_path_and_params(req)
+ return super(S3HmacAuthV4Handler, self).add_auth(req, **kwargs)
+
+
class QueryAuthHandler(AuthHandler):
"""
Provides pure query construction (no actual signing).
@@ -742,3 +885,24 @@ def get_auth_handler(host, config, provider, requested_capability=None):
# user could override this with a .boto config that includes user-specific
# credentials (for access to user data).
return ready_handlers[-1]
+
+
+def detect_potential_sigv4(func):
+ def _wrapper(self):
+ if hasattr(self, 'region'):
+ if getattr(self.region, 'endpoint', ''):
+ if '.cn-' in self.region.endpoint:
+ return ['hmac-v4']
+
+ return func(self)
+ return _wrapper
+
+
+def detect_potential_s3sigv4(func):
+ def _wrapper(self):
+ if hasattr(self, 'host'):
+ if '.cn-' in self.host:
+ return ['hmac-v4-s3']
+
+ return func(self)
+ return _wrapper
diff --git a/boto/beanstalk/layer1.py b/boto/beanstalk/layer1.py
index 09612f14..fc8ca83f 100644
--- a/boto/beanstalk/layer1.py
+++ b/boto/beanstalk/layer1.py
@@ -237,7 +237,8 @@ class Layer1(AWSQueryConnection):
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
- options_to_remove=None):
+ options_to_remove=None, tier_name=None,
+ tier_type=None, tier_version='1.0'):
"""Launches an environment for the application using a configuration.
:type application_name: string
@@ -308,6 +309,25 @@ class Layer1(AWSQueryConnection):
options to remove from the configuration set for this new
environment.
+ :type tier_name: string
+ :param tier_name: The name of the tier. Valid values are
+ "WebServer" and "Worker". Defaults to "WebServer".
+ The ``tier_name`` and a ``tier_type`` parameters are
+ related and the values provided must be valid.
+ The possible combinations are:
+
+ * "WebServer" and "Standard" (the default)
+ * "Worker" and "SQS/HTTP"
+
+ :type tier_type: string
+ :param tier_type: The type of the tier. Valid values are
+ "Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
+ if ``tier_name`` is "Worker". Defaults to "Standard".
+
+ :type tier_version: string
+ :type tier_version: The version of the tier. Valid values
+ currently are "1.0". Defaults to "1.0".
+
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
@@ -330,6 +350,10 @@ class Layer1(AWSQueryConnection):
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
+ if tier_name and tier_type and tier_version:
+ params['Tier.member.Name'] = tier_name
+ params['Tier.member.Type'] = tier_type
+ params['Tier.member.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
@@ -848,9 +872,9 @@ class Layer1(AWSQueryConnection):
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
- source_environment_name=None,
- destination_environment_id=None,
- destination_environment_name=None):
+ source_environment_name=None,
+ destination_environment_id=None,
+ destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
@@ -1021,7 +1045,8 @@ class Layer1(AWSQueryConnection):
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
- options_to_remove=None):
+ options_to_remove=None, tier_name=None,
+ tier_type=None, tier_version='1.0'):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
@@ -1073,6 +1098,25 @@ class Layer1(AWSQueryConnection):
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
+ :type tier_name: string
+ :param tier_name: The name of the tier. Valid values are
+ "WebServer" and "Worker". Defaults to "WebServer".
+ The ``tier_name`` and a ``tier_type`` parameters are
+ related and the values provided must be valid.
+ The possible combinations are:
+
+ * "WebServer" and "Standard" (the default)
+ * "Worker" and "SQS/HTTP"
+
+ :type tier_type: string
+ :param tier_type: The type of the tier. Valid values are
+ "Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
+ if ``tier_name`` is "Worker". Defaults to "Standard".
+
+ :type tier_version: string
+ :type tier_version: The version of the tier. Valid values
+ currently are "1.0". Defaults to "1.0".
+
:raises: InsufficientPrivilegesException
"""
params = {}
@@ -1093,6 +1137,10 @@ class Layer1(AWSQueryConnection):
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
+ if tier_name and tier_type and tier_version:
+ params['Tier.member.Name'] = tier_name
+ params['Tier.member.Type'] = tier_type
+ params['Tier.member.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
diff --git a/boto/cloudformation/template.py b/boto/cloudformation/template.py
index f1f85018..762efce5 100644
--- a/boto/cloudformation/template.py
+++ b/boto/cloudformation/template.py
@@ -1,6 +1,6 @@
from boto.resultset import ResultSet
-class Template:
+class Template(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
@@ -19,7 +19,7 @@ class Template:
else:
setattr(self, name, value)
-class TemplateParameter:
+class TemplateParameter(object):
def __init__(self, parent):
self.parent = parent
self.default_value = None
diff --git a/boto/cloudfront/identity.py b/boto/cloudfront/identity.py
index 1571e87a..123773d1 100644
--- a/boto/cloudfront/identity.py
+++ b/boto/cloudfront/identity.py
@@ -14,15 +14,14 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
-class OriginAccessIdentity:
-
+class OriginAccessIdentity(object):
def __init__(self, connection=None, config=None, id='',
s3_user_id='', comment=''):
self.connection = connection
@@ -31,7 +30,7 @@ class OriginAccessIdentity:
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
-
+
def startElement(self, name, attrs, connection):
if name == 'CloudFrontOriginAccessIdentityConfig':
self.config = OriginAccessIdentityConfig()
@@ -63,9 +62,9 @@ class OriginAccessIdentity:
def uri(self):
return 'origin-access-identity/cloudfront/%s' % self.id
-
-class OriginAccessIdentityConfig:
+
+class OriginAccessIdentityConfig(object):
def __init__(self, connection=None, caller_reference='', comment=''):
self.connection = connection
if caller_reference:
@@ -94,8 +93,8 @@ class OriginAccessIdentityConfig:
else:
setattr(self, name, value)
-class OriginAccessIdentitySummary:
+class OriginAccessIdentitySummary(object):
def __init__(self, connection=None, id='',
s3_user_id='', comment=''):
self.connection = connection
@@ -103,7 +102,7 @@ class OriginAccessIdentitySummary:
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
-
+
def startElement(self, name, attrs, connection):
return None
@@ -119,4 +118,4 @@ class OriginAccessIdentitySummary:
def get_origin_access_identity(self):
return self.connection.get_origin_access_identity_info(self.id)
-
+
diff --git a/boto/cloudfront/signers.py b/boto/cloudfront/signers.py
index 0b0cd50a..dcc9fc9e 100644
--- a/boto/cloudfront/signers.py
+++ b/boto/cloudfront/signers.py
@@ -14,17 +14,16 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-class Signer:
-
+class Signer(object):
def __init__(self):
self.id = None
self.key_pair_ids = []
-
+
def startElement(self, name, attrs, connection):
return None
@@ -35,9 +34,9 @@ class Signer:
self.id = value
elif name == 'KeyPairId':
self.key_pair_ids.append(value)
-
-class ActiveTrustedSigners(list):
+
+class ActiveTrustedSigners(list):
def startElement(self, name, attrs, connection):
if name == 'Signer':
s = Signer()
@@ -47,8 +46,8 @@ class ActiveTrustedSigners(list):
def endElement(self, name, value, connection):
pass
-class TrustedSigners(list):
+class TrustedSigners(list):
def startElement(self, name, attrs, connection):
return None
diff --git a/boto/cloudtrail/layer1.py b/boto/cloudtrail/layer1.py
index 1ee41d4a..026bdab3 100644
--- a/boto/cloudtrail/layer1.py
+++ b/boto/cloudtrail/layer1.py
@@ -42,8 +42,8 @@ class CloudTrailConnection(AWSQueryConnection):
CloudTrail is a web service that records AWS API calls for your
AWS account and delivers log files to an Amazon S3 bucket. The
recorded information includes the identity of the user, the start
- time of the event, the source IP address, the request parameters,
- and the response elements returned by the service.
+ time of the AWS API call, the source IP address, the request
+ parameters, and the response elements returned by the service.
As an alternative to using the API, you can use one of the AWS
SDKs, which consist of libraries and sample code for various
@@ -52,11 +52,11 @@ class CloudTrailConnection(AWSQueryConnection):
programmatic access to AWSCloudTrail. For example, the SDKs take
care of cryptographically signing requests, managing errors, and
retrying requests automatically. For information about the AWS
- SDKs, including how to download and install them, see the Tools
- for Amazon Web Services page.
+ SDKs, including how to download and install them, see the `Tools
+ for Amazon Web Services page`_.
See the CloudTrail User Guide for information about the data that
- is included with each event listed in the log files.
+ is included with each AWS API call listed in the log files.
"""
APIVersion = "2013-11-01"
DefaultRegionName = "us-east-1"
@@ -71,10 +71,9 @@ class CloudTrailConnection(AWSQueryConnection):
"TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException,
"InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException,
"InvalidTrailNameException": exceptions.InvalidTrailNameException,
- "InternalErrorException": exceptions.InternalErrorException,
+ "TrailNotProvidedException": exceptions.TrailNotProvidedException,
"TrailNotFoundException": exceptions.TrailNotFoundException,
"S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException,
- "TrailNotProvidedException": exceptions.TrailNotProvidedException,
"InvalidS3PrefixException": exceptions.InvalidS3PrefixException,
"MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException,
"InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException,
@@ -96,69 +95,65 @@ class CloudTrailConnection(AWSQueryConnection):
def _required_auth_capability(self):
return ['hmac-v4']
- def create_trail(self, trail=None):
+ def create_trail(self, name=None, s3_bucket_name=None,
+ s3_key_prefix=None, sns_topic_name=None,
+ include_global_service_events=None, trail=None):
"""
- From the command line, use create-subscription.
+ From the command line, use `create-subscription`.
Creates a trail that specifies the settings for delivery of
- log data to an Amazon S3 bucket. The request includes a Trail
- structure that specifies the following:
-
-
- + Trail name.
- + The name of the Amazon S3 bucket to which CloudTrail
- delivers your log files.
- + The name of the Amazon S3 key prefix that precedes each log
- file.
- + The name of the Amazon SNS topic that notifies you that a
- new file is available in your bucket.
- + Whether the log file should include events from global
- services. Currently, the only events included in CloudTrail
- log files are from IAM and AWS STS.
-
-
- Returns the appropriate HTTP status code if successful. If
- not, it returns either one of the CommonErrors or a
- FrontEndException with one of the following error codes:
-
- **MaximumNumberOfTrailsExceeded**
-
- An attempt was made to create more trails than allowed. You
- can only create one trail for each account in each region.
-
- **TrailAlreadyExists**
+ log data to an Amazon S3 bucket.
- An attempt was made to create a trail with a name that already
- exists.
+ Support for passing Trail as a parameter ends as early as
+ February 25, 2014. The request and response examples in this
+ topic show the use of parameters as well as a Trail object.
+ Until Trail is removed, you can use either Trail or the
+ parameter list.
- **S3BucketDoesNotExist**
-
- Specified Amazon S3 bucket does not exist.
+ :type name: string
+ :param name: Specifies the name of the trail.
- **InsufficientS3BucketPolicy**
+ :type s3_bucket_name: string
+ :param s3_bucket_name: Specifies the name of the Amazon S3 bucket
+ designated for publishing log files.
- Policy on Amazon S3 bucket does not permit CloudTrail to write
- to your bucket. See the AWS CloudTrail User Guide for the
- required bucket policy.
+ :type s3_key_prefix: string
+ :param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
+ the name of the bucket you have designated for log file delivery.
- **InsufficientSnsTopicPolicy**
+ :type sns_topic_name: string
+ :param sns_topic_name: Specifies the name of the Amazon SNS topic
+ defined for notification of log file delivery.
- The policy on Amazon SNS topic does not permit CloudTrail to
- write to it. Can also occur when an Amazon SNS topic does not
- exist.
+ :type include_global_service_events: boolean
+ :param include_global_service_events: Specifies whether the trail is
+ publishing events from global services such as IAM to the log
+ files.
:type trail: dict
- :param trail: Contains the Trail structure that specifies the settings
- for each trail.
+ :param trail: Support for passing a Trail object in the CreateTrail or
+ UpdateTrail actions will end as early as February 15, 2014. Instead
+ of the Trail object and its members, use the parameters listed for
+ these actions.
"""
params = {}
+ if name is not None:
+ params['Name'] = name
+ if s3_bucket_name is not None:
+ params['S3BucketName'] = s3_bucket_name
+ if s3_key_prefix is not None:
+ params['S3KeyPrefix'] = s3_key_prefix
+ if sns_topic_name is not None:
+ params['SnsTopicName'] = sns_topic_name
+ if include_global_service_events is not None:
+ params['IncludeGlobalServiceEvents'] = include_global_service_events
if trail is not None:
params['trail'] = trail
return self.make_request(action='CreateTrail',
body=json.dumps(params))
- def delete_trail(self, name=None):
+ def delete_trail(self, name):
"""
Deletes a trail.
@@ -166,19 +161,17 @@ class CloudTrailConnection(AWSQueryConnection):
:param name: The name of a trail to be deleted.
"""
- params = {}
- if name is not None:
- params['Name'] = name
+ params = {'Name': name, }
return self.make_request(action='DeleteTrail',
body=json.dumps(params))
def describe_trails(self, trail_name_list=None):
"""
Retrieves the settings for some or all trails associated with
- an account. Returns a list of Trail structures in JSON format.
+ an account.
:type trail_name_list: list
- :param trail_name_list: The list of Trail object names.
+ :param trail_name_list: The list of trails.
"""
params = {}
@@ -187,97 +180,153 @@ class CloudTrailConnection(AWSQueryConnection):
return self.make_request(action='DescribeTrails',
body=json.dumps(params))
- def get_trail_status(self, name=None):
+ def get_trail_status(self, name):
"""
- Returns GetTrailStatusResult, which contains a JSON-formatted
- list of information about the trail specified in the request.
- JSON fields include information such as delivery errors,
- Amazon SNS and Amazon S3 errors, and times that logging
- started and stopped for each trail.
+ Returns a JSON-formatted list of information about the
+ specified trail. Fields include information on delivery
+ errors, Amazon SNS and Amazon S3 errors, and start and stop
+ logging times for each trail.
+
+ The CloudTrail API is currently undergoing revision. This
+ action currently returns both new fields and fields slated for
+ removal from the API. The following lists indicate the plans
+ for each field:
+
+ **List of Members Planned for Ongoing Support**
+
+
+ + IsLogging
+ + LatestDeliveryTime
+ + LatestNotificationTime
+ + StartLoggingTime
+ + StopLoggingTime
+ + LatestNotificationError
+ + LatestDeliveryError
+
+
+ **List of Members Scheduled for Removal**
+
+
+ + **LatestDeliveryAttemptTime**: Use LatestDeliveryTime
+ instead.
+ + **LatestNotificationAttemptTime**: Use
+ LatestNotificationTime instead.
+ + **LatestDeliveryAttemptSucceeded**: No replacement. See the
+ note following this list.
+ + **LatestNotificationAttemptSucceeded**: No replacement. See
+ the note following this list.
+ + **TimeLoggingStarted**: Use StartLoggingTime instead.
+ + **TimeLoggingStopped**: Use StopLoggingtime instead.
+
+
+ No replacements have been created for
+ LatestDeliveryAttemptSucceeded and
+ LatestNotificationAttemptSucceeded . Use LatestDeliveryError
+ and LatestNotificationError to evaluate success or failure of
+ log delivery or notification. Empty values returned for these
+ fields indicate success. An error in LatestDeliveryError
+ generally indicates either a missing bucket or insufficient
+ permissions to write to the bucket. Similarly, an error in
+ LatestNotificationError indicates either a missing topic or
+ insufficient permissions.
:type name: string
:param name: The name of the trail for which you are requesting the
current status.
"""
- params = {}
- if name is not None:
- params['Name'] = name
+ params = {'Name': name, }
return self.make_request(action='GetTrailStatus',
body=json.dumps(params))
- def start_logging(self, name=None):
+ def start_logging(self, name):
"""
- Starts the processing of recording user activity events and
- log file delivery for a trail.
+ Starts the recording of AWS API calls and log file delivery
+ for a trail.
:type name: string
- :param name: The name of the Trail for which CloudTrail logs events.
+ :param name: The name of the trail for which CloudTrail logs AWS API
+ calls.
"""
- params = {}
- if name is not None:
- params['Name'] = name
+ params = {'Name': name, }
return self.make_request(action='StartLogging',
body=json.dumps(params))
- def stop_logging(self, name=None):
+ def stop_logging(self, name):
"""
- Suspends the recording of user activity events and log file
- delivery for the specified trail. Under most circumstances,
- there is no need to use this action. You can update a trail
- without stopping it first. This action is the only way to stop
- logging activity.
+ Suspends the recording of AWS API calls and log file delivery
+ for the specified trail. Under most circumstances, there is no
+ need to use this action. You can update a trail without
+ stopping it first. This action is the only way to stop
+ recording.
:type name: string
- :param name: Communicates to CloudTrail the name of the Trail for which
- to stop logging events.
+ :param name: Communicates to CloudTrail the name of the trail for which
+ to stop logging AWS API calls.
"""
- params = {}
- if name is not None:
- params['Name'] = name
+ params = {'Name': name, }
return self.make_request(action='StopLogging',
body=json.dumps(params))
- def update_trail(self, trail=None):
+ def update_trail(self, name=None, s3_bucket_name=None,
+ s3_key_prefix=None, sns_topic_name=None,
+ include_global_service_events=None, trail=None):
"""
- From the command line, use update-subscription.
+ From the command line, use `update-subscription`.
Updates the settings that specify delivery of log files.
Changes to a trail do not require stopping the CloudTrail
- service. You can use this action to designate an existing
- bucket for log delivery, or to create a new bucket and prefix.
- If the existing bucket has previously been a target for
- CloudTrail log files, an IAM policy exists for the bucket. If
- you create a new bucket using UpdateTrail, you need to apply
- the policy to the bucket using one of the means provided by
- the Amazon S3 service.
-
- The request includes a Trail structure that specifies the
- following:
-
-
- + Trail name.
- + The name of the Amazon S3 bucket to which CloudTrail
- delivers your log files.
- + The name of the Amazon S3 key prefix that precedes each log
- file.
- + The name of the Amazon SNS topic that notifies you that a
- new file is available in your bucket.
- + Whether the log file should include events from global
- services, such as IAM or AWS STS.
-
- **CreateTrail** returns the appropriate HTTP status code if
- successful. If not, it returns either one of the common errors
- or one of the exceptions listed at the end of this page.
+ service. Use this action to designate an existing bucket for
+ log delivery. If the existing bucket has previously been a
+ target for CloudTrail log files, an IAM policy exists for the
+ bucket.
+
+ Support for passing Trail as a parameter ends as early as
+ February 25, 2014. The request and response examples in this
+ topic show the use of parameters as well as a Trail object.
+ Until Trail is removed, you can use either Trail or the
+ parameter list.
+
+ :type name: string
+ :param name: Specifies the name of the trail.
+
+ :type s3_bucket_name: string
+ :param s3_bucket_name: Specifies the name of the Amazon S3 bucket
+ designated for publishing log files.
+
+ :type s3_key_prefix: string
+ :param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
+ the name of the bucket you have designated for log file delivery.
+
+ :type sns_topic_name: string
+ :param sns_topic_name: Specifies the name of the Amazon SNS topic
+ defined for notification of log file delivery.
+
+ :type include_global_service_events: boolean
+ :param include_global_service_events: Specifies whether the trail is
+ publishing events from global services such as IAM to the log
+ files.
:type trail: dict
- :param trail: Represents the Trail structure that contains the
- CloudTrail setting for an account.
+ :param trail: Support for passing a Trail object in the CreateTrail or
+ UpdateTrail actions will end as early as February 15, 2014. Instead
+ of the Trail object and its members, use the parameters listed for
+ these actions.
"""
params = {}
+ if name is not None:
+ params['Name'] = name
+ if s3_bucket_name is not None:
+ params['S3BucketName'] = s3_bucket_name
+ if s3_key_prefix is not None:
+ params['S3KeyPrefix'] = s3_key_prefix
+ if sns_topic_name is not None:
+ params['SnsTopicName'] = sns_topic_name
+ if include_global_service_events is not None:
+ params['IncludeGlobalServiceEvents'] = include_global_service_events
if trail is not None:
params['trail'] = trail
return self.make_request(action='UpdateTrail',
diff --git a/boto/dynamodb2/fields.py b/boto/dynamodb2/fields.py
index 25abffd4..911a11b5 100644
--- a/boto/dynamodb2/fields.py
+++ b/boto/dynamodb2/fields.py
@@ -91,10 +91,10 @@ class RangeKey(BaseSchemaField):
class BaseIndexField(object):
"""
- An abstract class for defining schema fields.
+ An abstract class for defining schema indexes.
- Contains most of the core functionality for the field. Subclasses must
- define an ``attr_type`` to pass to DynamoDB.
+ Contains most of the core functionality for the index. Subclasses must
+ define a ``projection_type`` to pass to DynamoDB.
"""
def __init__(self, name, parts):
self.name = name
@@ -139,7 +139,7 @@ class BaseIndexField(object):
},
],
'Projection': {
- 'ProjectionType': 'KEYS_ONLY,
+ 'ProjectionType': 'KEYS_ONLY',
}
}
@@ -210,3 +210,125 @@ class IncludeIndex(BaseIndexField):
schema_data = super(IncludeIndex, self).schema()
schema_data['Projection']['NonKeyAttributes'] = self.includes_fields
return schema_data
+
+
+class GlobalBaseIndexField(BaseIndexField):
+ """
+ An abstract class for defining global indexes.
+
+ Contains most of the core functionality for the index. Subclasses must
+ define a ``projection_type`` to pass to DynamoDB.
+ """
+ throughput = {
+ 'read': 5,
+ 'write': 5,
+ }
+
+ def __init__(self, *args, **kwargs):
+ throughput = kwargs.pop('throughput', None)
+
+ if throughput is not None:
+ self.throughput = throughput
+
+ super(GlobalBaseIndexField, self).__init__(*args, **kwargs)
+
+ def schema(self):
+ """
+ Returns the schema structure DynamoDB expects.
+
+ Example::
+
+ >>> index.schema()
+ {
+ 'IndexName': 'LastNameIndex',
+ 'KeySchema': [
+ {
+ 'AttributeName': 'username',
+ 'KeyType': 'HASH',
+ },
+ ],
+ 'Projection': {
+ 'ProjectionType': 'KEYS_ONLY',
+ },
+ 'ProvisionedThroughput': {
+ 'ReadCapacityUnits': 5,
+ 'WriteCapacityUnits': 5
+ }
+ }
+
+ """
+ schema_data = super(GlobalBaseIndexField, self).schema()
+ schema_data['ProvisionedThroughput'] = {
+ 'ReadCapacityUnits': int(self.throughput['read']),
+ 'WriteCapacityUnits': int(self.throughput['write']),
+ }
+ return schema_data
+
+
+class GlobalAllIndex(GlobalBaseIndexField):
+ """
+ An index signifying all fields should be in the index.
+
+ Example::
+
+ >>> GlobalAllIndex('MostRecentlyJoined', parts=[
+ ... HashKey('username'),
+ ... RangeKey('date_joined')
+ ... ],
+ ... throughput={
+ ... 'read': 2,
+ ... 'write': 1,
+ ... })
+
+ """
+ projection_type = 'ALL'
+
+
+class GlobalKeysOnlyIndex(GlobalBaseIndexField):
+ """
+ An index signifying only key fields should be in the index.
+
+ Example::
+
+ >>> GlobalKeysOnlyIndex('MostRecentlyJoined', parts=[
+ ... HashKey('username'),
+ ... RangeKey('date_joined')
+ ... ],
+ ... throughput={
+ ... 'read': 2,
+ ... 'write': 1,
+ ... })
+
+ """
+ projection_type = 'KEYS_ONLY'
+
+
+class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
+ """
+ An index signifying only certain fields should be in the index.
+
+ Example::
+
+ >>> GlobalIncludeIndex('GenderIndex', parts=[
+ ... HashKey('username'),
+ ... RangeKey('date_joined')
+ ... ],
+ ... includes=['gender'],
+ ... throughput={
+ ... 'read': 2,
+ ... 'write': 1,
+ ... })
+
+ """
+ projection_type = 'INCLUDE'
+
+ def __init__(self, *args, **kwargs):
+ IncludeIndex.__init__(self, *args, **kwargs)
+ GlobalBaseIndexField.__init__(self, *args, **kwargs)
+
+ def schema(self):
+ # Pick up the includes.
+ schema_data = IncludeIndex.schema(self)
+ # Also the throughput.
+ schema_data.update(GlobalBaseIndexField.schema(self))
+ return schema_data \ No newline at end of file
diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py
index 930edeeb..78cc121b 100644
--- a/boto/dynamodb2/table.py
+++ b/boto/dynamodb2/table.py
@@ -1,7 +1,9 @@
import boto
from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import (HashKey, RangeKey,
- AllIndex, KeysOnlyIndex, IncludeIndex)
+ AllIndex, KeysOnlyIndex, IncludeIndex,
+ GlobalAllIndex, GlobalKeysOnlyIndex,
+ GlobalIncludeIndex)
from boto.dynamodb2.items import Item
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.results import ResultSet, BatchGetResultSet
@@ -21,7 +23,7 @@ class Table(object):
max_batch_get = 100
def __init__(self, table_name, schema=None, throughput=None, indexes=None,
- connection=None):
+ global_indexes=None, connection=None):
"""
Sets up a new in-memory ``Table``.
@@ -48,6 +50,10 @@ class Table(object):
Optionally accepts a ``indexes`` parameter, which should be a list of
``BaseIndexField`` subclasses representing the desired indexes.
+ Optionally accepts a ``global_indexes`` parameter, which should be a
+ list of ``GlobalBaseIndexField`` subclasses representing the desired
+ indexes.
+
Optionally accepts a ``connection`` parameter, which should be a
``DynamoDBConnection`` instance (or subclass). This is primarily useful
for specifying alternate connection parameters.
@@ -67,13 +73,22 @@ class Table(object):
... 'write': 10,
... }, indexes=[
... KeysOnlyIndex('MostRecentlyJoined', parts=[
+ ... HashKey('username')
... RangeKey('date_joined')
... ]),
- ... ],
- ... connection=dynamodb2.connect_to_region('us-west-2',
- ... aws_access_key_id='key',
- ... aws_secret_access_key='key',
- ... ))
+ ... ], global_indexes=[
+ ... GlobalAllIndex('UsersByZipcode', parts=[
+ ... HashKey('zipcode'),
+ ... RangeKey('username'),
+ ... ],
+ ... throughput={
+ ... 'read':10,
+ ... 'write":10,
+ ... }),
+ ... ], connection=dynamodb2.connect_to_region('us-west-2',
+ ... aws_access_key_id='key',
+ ... aws_secret_access_key='key',
+ ... ))
"""
self.table_name = table_name
@@ -84,6 +99,7 @@ class Table(object):
}
self.schema = schema
self.indexes = indexes
+ self.global_indexes = global_indexes
if self.connection is None:
self.connection = DynamoDBConnection()
@@ -95,7 +111,7 @@ class Table(object):
@classmethod
def create(cls, table_name, schema, throughput=None, indexes=None,
- connection=None):
+ global_indexes=None, connection=None):
"""
Creates a new table in DynamoDB & returns an in-memory ``Table`` object.
@@ -127,6 +143,10 @@ class Table(object):
Optionally accepts a ``indexes`` parameter, which should be a list of
``BaseIndexField`` subclasses representing the desired indexes.
+ Optionally accepts a ``global_indexes`` parameter, which should be a
+ list of ``GlobalBaseIndexField`` subclasses representing the desired
+ indexes.
+
Optionally accepts a ``connection`` parameter, which should be a
``DynamoDBConnection`` instance (or subclass). This is primarily useful
for specifying alternate connection parameters.
@@ -142,7 +162,15 @@ class Table(object):
... }, indexes=[
... KeysOnlyIndex('MostRecentlyJoined', parts=[
... RangeKey('date_joined')
- ... ]),
+ ... ]), global_indexes=[
+ ... GlobalAllIndex('UsersByZipcode', parts=[
+ ... HashKey('zipcode'),
+ ... RangeKey('username'),
+ ... ],
+ ... throughput={
+ ... 'read':10,
+ ... 'write":10,
+ ... }),
... ])
"""
@@ -155,13 +183,18 @@ class Table(object):
if indexes is not None:
table.indexes = indexes
+ if global_indexes is not None:
+ table.global_indexes = global_indexes
+
# Prep the schema.
raw_schema = []
attr_defs = []
+ seen_attrs = set()
for field in table.schema:
raw_schema.append(field.schema())
# Build the attributes off what we know.
+ seen_attrs.add(field.name)
attr_defs.append(field.definition())
raw_throughput = {
@@ -170,23 +203,24 @@ class Table(object):
}
kwargs = {}
- if table.indexes:
- # Prep the LSIs.
- raw_lsi = []
-
- for index_field in table.indexes:
- raw_lsi.append(index_field.schema())
- # Again, build the attributes off what we know.
- # HOWEVER, only add attributes *NOT* already seen.
- attr_define = index_field.definition()
-
- for part in attr_define:
- attr_names = [attr['AttributeName'] for attr in attr_defs]
-
- if not part['AttributeName'] in attr_names:
- attr_defs.append(part)
-
- kwargs['local_secondary_indexes'] = raw_lsi
+ kwarg_map = {
+ 'indexes': 'local_secondary_indexes',
+ 'global_indexes': 'global_secondary_indexes',
+ }
+ for index_attr in ('indexes', 'global_indexes'):
+ table_indexes = getattr(table, index_attr)
+ if table_indexes:
+ raw_indexes = []
+ for index_field in table_indexes:
+ raw_indexes.append(index_field.schema())
+ # Make sure all attributes specified in the indexes are
+ # added to the definition
+ for field in index_field.parts:
+ if field.name not in seen_attrs:
+ seen_attrs.add(field.name)
+ attr_defs.append(field.definition())
+
+ kwargs[kwarg_map[index_attr]] = raw_indexes
table.connection.create_table(
table_name=table.table_name,
@@ -294,7 +328,7 @@ class Table(object):
# This is leaky.
return result
- def update(self, throughput):
+ def update(self, throughput, global_indexes=None):
"""
Updates table attributes in DynamoDB.
@@ -316,12 +350,46 @@ class Table(object):
... })
True
+ # To also update the global index(es) throughput.
+ >>> users.update(throughput={
+ ... 'read': 20,
+ ... 'write': 10,
+ ... },
+ ... global_secondary_indexes={
+ ... 'TheIndexNameHere': {
+ ... 'read': 15,
+ ... 'write': 5,
+ ... }
+ ... })
+ True
+
"""
self.throughput = throughput
- self.connection.update_table(self.table_name, {
+ data = {
'ReadCapacityUnits': int(self.throughput['read']),
'WriteCapacityUnits': int(self.throughput['write']),
- })
+ }
+ gsi_data = None
+
+ if global_indexes:
+ gsi_data = []
+
+ for gsi_name, gsi_throughput in global_indexes.items():
+ gsi_data.append({
+ "Update": {
+ "IndexName": gsi_name,
+ "ProvisionedThroughput": {
+ "ReadCapacityUnits": int(gsi_throughput['read']),
+ "WriteCapacityUnits": int(gsi_throughput['write']),
+ },
+ },
+ })
+
+ self.connection.update_table(
+ self.table_name,
+ provisioned_throughput=data,
+ global_secondary_index_updates=gsi_data
+ )
return True
def delete(self):
diff --git a/boto/ec2/autoscale/__init__.py b/boto/ec2/autoscale/__init__.py
index 864b5dde..ad152069 100644
--- a/boto/ec2/autoscale/__init__.py
+++ b/boto/ec2/autoscale/__init__.py
@@ -163,7 +163,7 @@ class AutoScaleConnection(AWSQueryConnection):
# get availability zone information (required param)
zones = as_group.availability_zones
self.build_list_params(params, zones, 'AvailabilityZones')
- if as_group.desired_capacity:
+ if as_group.desired_capacity is not None:
params['DesiredCapacity'] = as_group.desired_capacity
if as_group.vpc_zone_identifier:
params['VPCZoneIdentifier'] = as_group.vpc_zone_identifier
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index abe192c5..e0d452ab 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -31,6 +31,7 @@ from datetime import datetime
from datetime import timedelta
import boto
+from boto.auth import detect_potential_sigv4
from boto.connection import AWSQueryConnection
from boto.resultset import ResultSet
from boto.ec2.image import Image, ImageAttribute, CopyImage
@@ -101,6 +102,7 @@ class EC2Connection(AWSQueryConnection):
if api_version:
self.APIVersion = api_version
+ @detect_potential_sigv4
def _required_auth_capability(self):
return ['ec2']
@@ -305,7 +307,7 @@ class EC2Connection(AWSQueryConnection):
:param snapshot_id: A snapshot ID for the snapshot to be used
as root device for the image. Mutually exclusive with
block_device_map, requires root_device_name
-
+
:rtype: string
:return: The new image id
"""
@@ -334,7 +336,7 @@ class EC2Connection(AWSQueryConnection):
params['DryRun'] = 'true'
if virtualization_type:
params['VirtualizationType'] = virtualization_type
-
+
rs = self.get_object('RegisterImage', params, ResultSet, verb='POST')
image_id = getattr(rs, 'imageId', None)
@@ -1062,6 +1064,7 @@ class EC2Connection(AWSQueryConnection):
* sourceDestCheck
* groupSet
* ebsOptimized
+ * sriovNetSupport
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
@@ -1171,6 +1174,7 @@ class EC2Connection(AWSQueryConnection):
* sourceDestCheck - Boolean (true)
* groupSet - Set of Security Groups or IDs
* ebsOptimized - Boolean (false)
+ * sriovNetSupport - String - ie: 'simple'
:type value: string
:param value: The new value for the attribute
diff --git a/boto/ec2/group.py b/boto/ec2/group.py
index 9e017b8a..fef54977 100644
--- a/boto/ec2/group.py
+++ b/boto/ec2/group.py
@@ -15,13 +15,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-class Group:
-
+class Group(object):
def __init__(self, parent=None):
self.id = None
self.name = None
@@ -36,4 +35,4 @@ class Group:
self.name = value
else:
setattr(self, name, value)
-
+
diff --git a/boto/ec2/image.py b/boto/ec2/image.py
index f424b085..cc7e8349 100644
--- a/boto/ec2/image.py
+++ b/boto/ec2/image.py
@@ -23,8 +23,8 @@
from boto.ec2.ec2object import EC2Object, TaggedEC2Object
from boto.ec2.blockdevicemapping import BlockDeviceMapping
-class ProductCodes(list):
+class ProductCodes(list):
def startElement(self, name, attrs, connection):
pass
@@ -32,8 +32,8 @@ class ProductCodes(list):
if name == 'productCode':
self.append(value)
-class BillingProducts(list):
+class BillingProducts(list):
def startElement(self, name, attrs, connection):
pass
@@ -370,8 +370,8 @@ class Image(TaggedEC2Object):
)
return img_attrs.ramdisk
-class ImageAttribute:
+class ImageAttribute(object):
def __init__(self, parent=None):
self.name = None
self.kernel = None
diff --git a/boto/ec2/instance.py b/boto/ec2/instance.py
index 430647e8..241fa234 100644
--- a/boto/ec2/instance.py
+++ b/boto/ec2/instance.py
@@ -606,8 +606,7 @@ class Instance(TaggedEC2Object):
)
-class ConsoleOutput:
-
+class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
@@ -629,7 +628,6 @@ class ConsoleOutput:
class InstanceAttribute(dict):
-
ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData',
'disableApiTermination',
'instanceInitiatedShutdownBehavior',
@@ -668,7 +666,6 @@ class InstanceAttribute(dict):
class SubParse(dict):
-
def __init__(self, section, parent=None):
dict.__init__(self)
self.section = section
diff --git a/boto/ec2/snapshot.py b/boto/ec2/snapshot.py
index 24bffe6b..38999e25 100644
--- a/boto/ec2/snapshot.py
+++ b/boto/ec2/snapshot.py
@@ -26,8 +26,8 @@ Represents an EC2 Elastic Block Store Snapshot
from boto.ec2.ec2object import TaggedEC2Object
from boto.ec2.zone import Zone
-class Snapshot(TaggedEC2Object):
+class Snapshot(TaggedEC2Object):
AttrName = 'createVolumePermission'
def __init__(self, connection=None):
@@ -156,8 +156,7 @@ class Snapshot(TaggedEC2Object):
)
-class SnapshotAttribute:
-
+class SnapshotAttribute(object):
def __init__(self, parent=None):
self.snapshot_id = None
self.attrs = {}
diff --git a/boto/ec2/volume.py b/boto/ec2/volume.py
index 2127b260..a084f647 100644
--- a/boto/ec2/volume.py
+++ b/boto/ec2/volume.py
@@ -260,7 +260,6 @@ class AttachmentSet(object):
:ivar attach_time: Attached since
:ivar device: The device the instance has mapped
"""
-
def __init__(self):
self.id = None
self.instance_id = None
@@ -289,8 +288,7 @@ class AttachmentSet(object):
setattr(self, name, value)
-class VolumeAttribute:
-
+class VolumeAttribute(object):
def __init__(self, parent=None):
self.id = None
self._key_name = None
diff --git a/boto/elasticache/layer1.py b/boto/elasticache/layer1.py
index fadd1f7c..44cba974 100644
--- a/boto/elasticache/layer1.py
+++ b/boto/elasticache/layer1.py
@@ -60,7 +60,7 @@ class ElastiCacheConnection(AWSQueryConnection):
def _required_auth_capability(self):
- return ['sign-v2']
+ return ['hmac-v4']
def authorize_cache_security_group_ingress(self,
cache_security_group_name,
diff --git a/boto/elastictranscoder/layer1.py b/boto/elastictranscoder/layer1.py
index 8799753c..2c4962d8 100644
--- a/boto/elastictranscoder/layer1.py
+++ b/boto/elastictranscoder/layer1.py
@@ -523,26 +523,56 @@ class ElasticTranscoderConnection(AWSAuthConnection):
return self.make_request('GET', uri, expected_status=200,
params=params)
- def list_pipelines(self):
+ def list_pipelines(self, ascending=None, page_token=None):
"""
The ListPipelines operation gets a list of the pipelines
associated with the current AWS account.
-
+ :type ascending: string
+ :param ascending: To list pipelines in chronological order by the date
+ and time that they were created, enter `True`. To list pipelines in
+ reverse chronological order, enter `False`.
+
+ :type page_token: string
+ :param page_token: When Elastic Transcoder returns more than one page
+ of results, use `pageToken` in subsequent `GET` requests to get
+ each successive page of results.
+
"""
- uri = '/2012-09-25/pipelines'
- return self.make_request('GET', uri, expected_status=200)
+ uri = '/2012-09-25/pipelines'.format()
+ params = {}
+ if ascending is not None:
+ params['Ascending'] = ascending
+ if page_token is not None:
+ params['PageToken'] = page_token
+ return self.make_request('GET', uri, expected_status=200,
+ params=params)
- def list_presets(self):
+ def list_presets(self, ascending=None, page_token=None):
"""
The ListPresets operation gets a list of the default presets
included with Elastic Transcoder and the presets that you've
added in an AWS region.
-
+ :type ascending: string
+ :param ascending: To list presets in chronological order by the date
+ and time that they were created, enter `True`. To list presets in
+ reverse chronological order, enter `False`.
+
+ :type page_token: string
+ :param page_token: When Elastic Transcoder returns more than one page
+ of results, use `pageToken` in subsequent `GET` requests to get
+ each successive page of results.
+
"""
- uri = '/2012-09-25/presets'
- return self.make_request('GET', uri, expected_status=200)
+ uri = '/2012-09-25/presets'.format()
+ params = {}
+ if ascending is not None:
+ params['Ascending'] = ascending
+ if page_token is not None:
+ params['PageToken'] = page_token
+ return self.make_request('GET', uri, expected_status=200,
+ params=params)
def read_job(self, id=None):
"""
diff --git a/boto/emr/connection.py b/boto/emr/connection.py
index 7b1c434e..9ec5b569 100644
--- a/boto/emr/connection.py
+++ b/boto/emr/connection.py
@@ -267,6 +267,42 @@ class EmrConnection(AWSQueryConnection):
self.get_object('ListSteps', params, StepSummaryList)
+ def add_tags(self, resource_id, tags):
+ """
+ Create new metadata tags for the specified resource id.
+
+ :type resource_id: str
+ :param resource_id: The cluster id
+
+ :type tags: dict
+ :param tags: A dictionary containing the name/value pairs.
+ If you want to create only a tag name, the
+ value for that tag should be the empty string
+ (e.g. '') or None.
+ """
+ assert isinstance(resource_id, basestring)
+ params = {
+ 'ResourceId': resource_id,
+ }
+ params.update(self._build_tag_list(tags))
+ return self.get_status('AddTags', params, verb='POST')
+
+ def remove_tags(self, resource_id, tags):
+ """
+ Remove metadata tags for the specified resource id.
+
+ :type resource_id: str
+ :param resource_id: The cluster id
+
+ :type tags: list
+ :param tags: A list of tag names to remove.
+ """
+ params = {
+ 'ResourceId': resource_id,
+ }
+ params.update(self._build_string_list('TagKeys', tags))
+ return self.get_status('RemoveTags', params, verb='POST')
+
def terminate_jobflow(self, jobflow_id):
"""
Terminate an Elastic MapReduce job flow
@@ -623,6 +659,27 @@ class EmrConnection(AWSQueryConnection):
params['Steps.member.%s.%s' % (i+1, key)] = value
return params
+ def _build_string_list(self, field, items):
+ if not isinstance(items, types.ListType):
+ items = [items]
+
+ params = {}
+ for i, item in enumerate(items):
+ params['%s.member.%s' % (field, i + 1)] = item
+ return params
+
+ def _build_tag_list(self, tags):
+ assert isinstance(tags, dict)
+
+ params = {}
+ for i, key_value in enumerate(sorted(tags.iteritems()), start=1):
+ key, value = key_value
+ current_prefix = 'Tags.member.%s' % i
+ params['%s.Key' % current_prefix] = key
+ if value:
+ params['%s.Value' % current_prefix] = value
+ return params
+
def _build_instance_common_args(self, ec2_keyname, availability_zone,
keep_alive, hadoop_version):
"""
diff --git a/boto/emr/emrobject.py b/boto/emr/emrobject.py
index caf51f76..a397a55a 100644
--- a/boto/emr/emrobject.py
+++ b/boto/emr/emrobject.py
@@ -256,6 +256,7 @@ class Cluster(EmrObject):
self.status = None
self.ec2instanceattributes = None
self.applications = None
+ self.tags = None
def startElement(self, name, attrs, connection):
if name == 'Status':
@@ -266,6 +267,9 @@ class Cluster(EmrObject):
return self.ec2instanceattributes
elif name == 'Applications':
self.applications = ResultSet([('member', Application)])
+ elif name == 'Tags':
+ self.tags = ResultSet([('member', KeyValue)])
+ return self.tags
else:
return None
diff --git a/boto/exception.py b/boto/exception.py
index 419aac15..f0e1daaf 100644
--- a/boto/exception.py
+++ b/boto/exception.py
@@ -34,7 +34,6 @@ class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
-
def __init__(self, reason, *args):
StandardError.__init__(self, reason, *args)
self.reason = reason
@@ -45,30 +44,33 @@ class BotoClientError(StandardError):
def __str__(self):
return 'BotoClientError: %s' % self.reason
-class SDBPersistenceError(StandardError):
+class SDBPersistenceError(StandardError):
pass
+
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
+
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
+
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
-class BotoServerError(StandardError):
+class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
StandardError.__init__(self, status, reason, body, *args)
self.status = status
@@ -134,8 +136,8 @@ class BotoServerError(StandardError):
self.message = None
self.box_usage = None
-class ConsoleOutput:
+class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
@@ -154,6 +156,7 @@ class ConsoleOutput:
else:
setattr(self, name, value)
+
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
@@ -174,30 +177,35 @@ class S3CreateError(StorageCreateError):
"""
pass
+
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
+
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
+
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
+
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
+
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
@@ -223,6 +231,7 @@ class SQSError(BotoServerError):
for p in ('detail', 'type'):
setattr(self, p, None)
+
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
@@ -237,6 +246,7 @@ class SQSDecodeError(BotoClientError):
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
+
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
@@ -259,23 +269,25 @@ class StorageResponseError(BotoServerError):
for p in ('resource'):
setattr(self, p, None)
+
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
+
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
+
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
-
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
@@ -304,6 +316,7 @@ class EC2ResponseError(BotoServerError):
for p in ('errors'):
setattr(self, p, None)
+
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
@@ -342,8 +355,8 @@ class EmrResponseError(BotoServerError):
"""
pass
-class _EC2Error:
+class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
@@ -360,6 +373,7 @@ class _EC2Error:
else:
return None
+
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
diff --git a/boto/mturk/connection.py b/boto/mturk/connection.py
index ad667849..c85ad6ec 100644
--- a/boto/mturk/connection.py
+++ b/boto/mturk/connection.py
@@ -875,7 +875,7 @@ class MTurkConnection(AWSQueryConnection):
return duration
-class BaseAutoResultElement:
+class BaseAutoResultElement(object):
"""
Base class to automatically add attributes when parsing XML
"""
diff --git a/boto/mturk/layoutparam.py b/boto/mturk/layoutparam.py
index 16e59328..781f981d 100644
--- a/boto/mturk/layoutparam.py
+++ b/boto/mturk/layoutparam.py
@@ -14,12 +14,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-class LayoutParameters:
+class LayoutParameters(object):
def __init__(self, layoutParameters=None):
if layoutParameters == None:
@@ -46,7 +46,7 @@ class LayoutParameter(object):
def __init__(self, name, value):
self.name = name
self.value = value
-
+
def get_as_params(self):
params = {
"Name": self.name,
diff --git a/boto/mturk/notification.py b/boto/mturk/notification.py
index 02c93aab..118daaab 100644
--- a/boto/mturk/notification.py
+++ b/boto/mturk/notification.py
@@ -32,7 +32,7 @@ except ImportError:
import base64
import re
-class NotificationMessage:
+class NotificationMessage(object):
NOTIFICATION_WSDL = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurk/2006-05-05/AWSMechanicalTurkRequesterNotification.wsdl"
NOTIFICATION_VERSION = '2006-05-05'
@@ -88,7 +88,7 @@ class NotificationMessage:
signature_calc = base64.b64encode(h.digest())
return self.signature == signature_calc
-class Event:
+class Event(object):
def __init__(self, d):
self.event_type = d['EventType']
self.event_time_str = d['EventTime']
diff --git a/boto/mturk/price.py b/boto/mturk/price.py
index 3c88a965..8e194e42 100644
--- a/boto/mturk/price.py
+++ b/boto/mturk/price.py
@@ -14,12 +14,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-class Price:
+class Price(object):
def __init__(self, amount=0.0, currency_code='USD'):
self.amount = amount
diff --git a/boto/mturk/qualification.py b/boto/mturk/qualification.py
index 8272d6d1..c59cabd2 100644
--- a/boto/mturk/qualification.py
+++ b/boto/mturk/qualification.py
@@ -14,12 +14,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-class Qualifications:
+class Qualifications(object):
def __init__(self, requirements=None):
if requirements == None:
@@ -49,7 +49,7 @@ class Requirement(object):
self.comparator = comparator
self.integer_value = integer_value
self.required_to_preview = required_to_preview
-
+
def get_as_params(self):
params = {
"QualificationTypeId": self.qualification_type_id,
@@ -105,7 +105,7 @@ class NumberHitsApprovedRequirement(Requirement):
"""
Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0.
"""
-
+
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
@@ -132,6 +132,6 @@ class AdultRequirement(Requirement):
"""
Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default).
"""
-
+
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
diff --git a/boto/mturk/question.py b/boto/mturk/question.py
index 90ab00db..1be3e7df 100644
--- a/boto/mturk/question.py
+++ b/boto/mturk/question.py
@@ -82,12 +82,12 @@ class ExternalQuestion(ValidatingXML):
return self.template % vars(self)
-class XMLTemplate:
+class XMLTemplate(object):
def get_as_xml(self):
return self.template % vars(self)
-class SimpleField(object, XMLTemplate):
+class SimpleField(XMLTemplate):
"""
A Simple name/value pair that can be easily rendered as XML.
@@ -101,7 +101,7 @@ class SimpleField(object, XMLTemplate):
self.value = value
-class Binary(object, XMLTemplate):
+class Binary(XMLTemplate):
template = """<Binary><MimeType><Type>%(type)s</Type><SubType>%(subtype)s</SubType></MimeType><DataURL>%(url)s</DataURL><AltText>%(alt_text)s</AltText></Binary>"""
def __init__(self, type, subtype, url, alt_text):
diff --git a/boto/opsworks/layer1.py b/boto/opsworks/layer1.py
index 0d79a05b..1970edbd 100644
--- a/boto/opsworks/layer1.py
+++ b/boto/opsworks/layer1.py
@@ -20,7 +20,11 @@
# IN THE SOFTWARE.
#
-import json
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
@@ -41,6 +45,23 @@ class OpsWorksConnection(AWSQueryConnection):
lifecycle. For information about this product, go to the `AWS
OpsWorks`_ details page.
+ **SDKs and CLI**
+
+ The most common way to use the AWS OpsWorks API is by using the
+ AWS Command Line Interface (CLI) or by using one of the AWS SDKs
+ to implement applications in your preferred language. For more
+ information, see:
+
+
+ + `AWS CLI`_
+ + `AWS SDK for Java`_
+ + `AWS SDK for .NET`_
+ + `AWS SDK for PHP 2`_
+ + `AWS SDK for Ruby`_
+ + `AWS SDK for Node.js`_
+ + `AWS SDK for Python(Boto)`_
+
+
**Endpoints**
AWS OpsWorks supports only one endpoint, opsworks.us-
@@ -53,7 +74,8 @@ class OpsWorksConnection(AWSQueryConnection):
When you call CreateStack, CloneStack, or UpdateStack we recommend
you use the `ConfigurationManager` parameter to specify the Chef
version, 0.9 or 11.4. The default value is currently 0.9. However,
- we expect to change the default value to 11.4 in September 2013.
+ we expect to change the default value to 11.4 in October 2013. For
+ more information, see `Using AWS OpsWorks with Chef 11`_.
"""
APIVersion = "2013-02-18"
DefaultRegionName = "us-east-1"
@@ -85,7 +107,13 @@ class OpsWorksConnection(AWSQueryConnection):
Assigns one of the stack's registered Amazon EBS volumes to a
specified instance. The volume must first be registered with
the stack by calling RegisterVolume. For more information, see
- ``_.
+ `Resource Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
@@ -105,7 +133,13 @@ class OpsWorksConnection(AWSQueryConnection):
Associates one of the stack's registered Elastic IP addresses
with a specified instance. The address must first be
registered with the stack by calling RegisterElasticIp. For
- more information, see ``_.
+ more information, see `Resource Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
@@ -131,6 +165,12 @@ class OpsWorksConnection(AWSQueryConnection):
or CLI. For more information, see ` Elastic Load Balancing
Developer Guide`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
@@ -160,6 +200,11 @@ class OpsWorksConnection(AWSQueryConnection):
Creates a clone of a specified stack. For more information,
see `Clone a Stack`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have an attached policy that explicitly grants permissions.
+ For more information on user permissions, see `Managing User
+ Permissions`_.
+
:type source_stack_id: string
:param source_stack_id: The source stack ID.
@@ -233,20 +278,20 @@ class OpsWorksConnection(AWSQueryConnection):
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
- Layer_Dependent, which creates host names by appending integers to
- the layer's short name. The other themes are:
-
- + Baked_Goods
- + Clouds
- + European_Cities
- + Fruits
- + Greek_Deities
- + Legendary_Creatures_from_Japan
- + Planets_and_Moons
- + Roman_Deities
- + Scottish_Islands
- + US_Cities
- + Wild_Cats
+ `Layer_Dependent`, which creates host names by appending integers
+ to the layer's short name. The other themes are:
+
+ + `Baked_Goods`
+ + `Clouds`
+ + `European_Cities`
+ + `Fruits`
+ + `Greek_Deities`
+ + `Legendary_Creatures_from_Japan`
+ + `Planets_and_Moons`
+ + `Roman_Deities`
+ + `Scottish_Islands`
+ + `US_Cities`
+ + `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
@@ -359,6 +404,12 @@ class OpsWorksConnection(AWSQueryConnection):
Creates an app for a specified stack. For more information,
see `Creating Apps`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -430,6 +481,12 @@ class OpsWorksConnection(AWSQueryConnection):
For more information, see `Deploying Apps`_ and `Run Stack
Commands`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Deploy or Manage permissions level for the stack, or an
+ attached policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -479,6 +536,12 @@ class OpsWorksConnection(AWSQueryConnection):
Creates an instance in a specified stack. For more
information, see `Adding an Instance to a Layer`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -614,6 +677,12 @@ class OpsWorksConnection(AWSQueryConnection):
number of custom layers, so you can call **CreateLayer** as
many times as you like for that layer type.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The layer stack ID.
@@ -736,6 +805,11 @@ class OpsWorksConnection(AWSQueryConnection):
Creates a new stack. For more information, see `Create a New
Stack`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have an attached policy that explicitly grants permissions.
+ For more information on user permissions, see `Managing User
+ Permissions`_.
+
:type name: string
:param name: The stack name.
@@ -798,20 +872,20 @@ class OpsWorksConnection(AWSQueryConnection):
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
- Layer_Dependent, which creates host names by appending integers to
- the layer's short name. The other themes are:
-
- + Baked_Goods
- + Clouds
- + European_Cities
- + Fruits
- + Greek_Deities
- + Legendary_Creatures_from_Japan
- + Planets_and_Moons
- + Roman_Deities
- + Scottish_Islands
- + US_Cities
- + Wild_Cats
+ `Layer_Dependent`, which creates host names by appending integers
+ to the layer's short name. The other themes are:
+
+ + `Baked_Goods`
+ + `Clouds`
+ + `European_Cities`
+ + `Fruits`
+ + `Greek_Deities`
+ + `Legendary_Creatures_from_Japan`
+ + `Planets_and_Moons`
+ + `Roman_Deities`
+ + `Scottish_Islands`
+ + `US_Cities`
+ + `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
@@ -902,10 +976,15 @@ class OpsWorksConnection(AWSQueryConnection):
body=json.dumps(params))
def create_user_profile(self, iam_user_arn, ssh_username=None,
- ssh_public_key=None):
+ ssh_public_key=None, allow_self_management=None):
"""
Creates a new user profile.
+ **Required Permissions**: To use this action, an IAM user must
+ have an attached policy that explicitly grants permissions.
+ For more information on user permissions, see `Managing User
+ Permissions`_.
+
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
@@ -915,12 +994,19 @@ class OpsWorksConnection(AWSQueryConnection):
:type ssh_public_key: string
:param ssh_public_key: The user's public SSH key.
+ :type allow_self_management: boolean
+ :param allow_self_management: Whether users can specify their own SSH
+ public key through the My Settings page. For more information, see
+ ``_.
+
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
+ if allow_self_management is not None:
+ params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='CreateUserProfile',
body=json.dumps(params))
@@ -928,6 +1014,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Deletes a specified app.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type app_id: string
:param app_id: The app ID.
@@ -943,6 +1035,12 @@ class OpsWorksConnection(AWSQueryConnection):
you can delete it. For more information, see `Deleting
Instances`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID.
@@ -969,6 +1067,12 @@ class OpsWorksConnection(AWSQueryConnection):
all associated instances. For more information, see `How to
Delete a Layer`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type layer_id: string
:param layer_id: The layer ID.
@@ -983,6 +1087,12 @@ class OpsWorksConnection(AWSQueryConnection):
instances, layers, and apps. For more information, see `Shut
Down a Stack`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -995,6 +1105,11 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Deletes a user profile.
+ **Required Permissions**: To use this action, an IAM user must
+ have an attached policy that explicitly grants permissions.
+ For more information on user permissions, see `Managing User
+ Permissions`_.
+
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
@@ -1007,7 +1122,13 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Deregisters a specified Elastic IP address. The address can
then be registered by another stack. For more information, see
- ``_.
+ `Resource Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
@@ -1020,7 +1141,14 @@ class OpsWorksConnection(AWSQueryConnection):
def deregister_volume(self, volume_id):
"""
Deregisters an Amazon EBS volume. The volume can then be
- registered by another stack. For more information, see ``_.
+ registered by another stack. For more information, see
+ `Resource Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
@@ -1036,6 +1164,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type stack_id: string
:param stack_id: The app stack ID. If you use this parameter,
`DescribeApps` returns a description of the apps in the specified
@@ -1062,6 +1196,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type deployment_id: string
:param deployment_id: The deployment ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
@@ -1096,6 +1236,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
@@ -1129,6 +1275,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
@@ -1162,6 +1314,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's Elastic
Load Balancing instances.
@@ -1186,6 +1344,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type stack_id: string
:param stack_id: A stack ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
@@ -1220,6 +1384,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -1244,6 +1414,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type layer_ids: list
:param layer_ids: An array of layer IDs.
@@ -1252,10 +1428,31 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DescribeLoadBasedAutoScaling',
body=json.dumps(params))
- def describe_permissions(self, iam_user_arn, stack_id):
+ def describe_my_user_profile(self):
+ """
+ Describes a user's SSH information.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have self-management enabled or an attached policy that
+ explicitly grants permissions. For more information on user
+ permissions, see `Managing User Permissions`_.
+
+
+ """
+ params = {}
+ return self.make_request(action='DescribeMyUserProfile',
+ body=json.dumps(params))
+
+ def describe_permissions(self, iam_user_arn=None, stack_id=None):
"""
Describes the permissions for a specified stack.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN. For more information about IAM
ARNs, see `Using Identifiers`_.
@@ -1264,7 +1461,11 @@ class OpsWorksConnection(AWSQueryConnection):
:param stack_id: The stack ID.
"""
- params = {'IamUserArn': iam_user_arn, 'StackId': stack_id, }
+ params = {}
+ if iam_user_arn is not None:
+ params['IamUserArn'] = iam_user_arn
+ if stack_id is not None:
+ params['StackId'] = stack_id
return self.make_request(action='DescribePermissions',
body=json.dumps(params))
@@ -1274,6 +1475,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeRaidArrays` returns descriptions of the RAID arrays
@@ -1299,6 +1506,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Describes AWS OpsWorks service errors.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
@@ -1326,10 +1539,36 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DescribeServiceErrors',
body=json.dumps(params))
+ def describe_stack_summary(self, stack_id):
+ """
+ Describes the number of layers and apps in a specified stack,
+ and the number of instances in each state, such as
+ `running_setup` or `online`.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ """
+ params = {'StackId': stack_id, }
+ return self.make_request(action='DescribeStackSummary',
+ body=json.dumps(params))
+
def describe_stacks(self, stack_ids=None):
"""
Requests a description of one or more stacks.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type stack_ids: list
:param stack_ids: An array of stack IDs that specify the stacks to be
described. If you omit this parameter, `DescribeStacks` returns a
@@ -1349,6 +1588,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type instance_ids: list
:param instance_ids: An array of instance IDs.
@@ -1357,16 +1602,23 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DescribeTimeBasedAutoScaling',
body=json.dumps(params))
- def describe_user_profiles(self, iam_user_arns):
+ def describe_user_profiles(self, iam_user_arns=None):
"""
Describe specified users.
+ **Required Permissions**: To use this action, an IAM user must
+ have an attached policy that explicitly grants permissions.
+ For more information on user permissions, see `Managing User
+ Permissions`_.
+
:type iam_user_arns: list
:param iam_user_arns: An array of IAM user ARNs that identify the users
to be described.
"""
- params = {'IamUserArns': iam_user_arns, }
+ params = {}
+ if iam_user_arns is not None:
+ params['IamUserArns'] = iam_user_arns
return self.make_request(action='DescribeUserProfiles',
body=json.dumps(params))
@@ -1377,6 +1629,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
@@ -1415,6 +1673,12 @@ class OpsWorksConnection(AWSQueryConnection):
Detaches a specified Elastic Load Balancing instance from its
layer.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
@@ -1435,7 +1699,13 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Disassociates an Elastic IP address from its instance. The
address remains registered with the stack. For more
- information, see ``_.
+ information, see `Resource Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
@@ -1450,6 +1720,12 @@ class OpsWorksConnection(AWSQueryConnection):
Gets a generated host name for the specified layer, based on
the current host name theme.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type layer_id: string
:param layer_id: The layer ID.
@@ -1463,6 +1739,12 @@ class OpsWorksConnection(AWSQueryConnection):
Reboots a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID.
@@ -1477,7 +1759,13 @@ class OpsWorksConnection(AWSQueryConnection):
address can be registered with only one stack at a time. If
the address is already registered, you must first deregister
it by calling DeregisterElasticIp. For more information, see
- ``_.
+ `Resource Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
@@ -1495,7 +1783,14 @@ class OpsWorksConnection(AWSQueryConnection):
Registers an Amazon EBS volume with a specified stack. A
volume can be registered with only one stack at a time. If the
volume is already registered, you must first deregister it by
- calling DeregisterVolume. For more information, see ``_.
+ calling DeregisterVolume. For more information, see `Resource
+ Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type ec_2_volume_id: string
:param ec_2_volume_id: The Amazon EBS volume ID.
@@ -1523,6 +1818,12 @@ class OpsWorksConnection(AWSQueryConnection):
you have created enough instances to handle the maximum
anticipated load.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type layer_id: string
:param layer_id: The layer ID.
@@ -1553,11 +1854,17 @@ class OpsWorksConnection(AWSQueryConnection):
body=json.dumps(params))
def set_permission(self, stack_id, iam_user_arn, allow_ssh=None,
- allow_sudo=None):
+ allow_sudo=None, level=None):
"""
Specifies a stack's permissions. For more information, see
`Security and Permissions`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -1572,12 +1879,28 @@ class OpsWorksConnection(AWSQueryConnection):
:param allow_sudo: The user is allowed to use **sudo** to elevate
privileges.
+ :type level: string
+ :param level: The user's permission level, which must be set to one of
+ the following strings. You cannot set your own permissions level.
+
+ + `deny`
+ + `show`
+ + `deploy`
+ + `manage`
+ + `iam_only`
+
+
+ For more information on the permissions associated with these levels,
+ see `Managing User Permissions`_
+
"""
params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, }
if allow_ssh is not None:
params['AllowSsh'] = allow_ssh
if allow_sudo is not None:
params['AllowSudo'] = allow_sudo
+ if level is not None:
+ params['Level'] = level
return self.make_request(action='SetPermission',
body=json.dumps(params))
@@ -1588,6 +1911,12 @@ class OpsWorksConnection(AWSQueryConnection):
specified instance. For more information, see `Managing Load
with Time-based and Load-based Instances`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID.
@@ -1607,6 +1936,12 @@ class OpsWorksConnection(AWSQueryConnection):
Starts a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID.
@@ -1619,6 +1954,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Starts stack's instances.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -1635,6 +1976,12 @@ class OpsWorksConnection(AWSQueryConnection):
without losing data. For more information, see `Starting,
Stopping, and Rebooting Instances`_.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID.
@@ -1647,6 +1994,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Stops a specified stack.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -1658,7 +2011,14 @@ class OpsWorksConnection(AWSQueryConnection):
def unassign_volume(self, volume_id):
"""
Unassigns an assigned Amazon EBS volume. The volume remains
- registered with the stack. For more information, see ``_.
+ registered with the stack. For more information, see `Resource
+ Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
@@ -1674,6 +2034,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates a specified app.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Deploy or Manage permissions level for the stack, or an
+ attached policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type app_id: string
:param app_id: The app ID.
@@ -1728,7 +2094,13 @@ class OpsWorksConnection(AWSQueryConnection):
def update_elastic_ip(self, elastic_ip, name=None):
"""
Updates a registered Elastic IP address's name. For more
- information, see ``_.
+ information, see `Resource Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type elastic_ip: string
:param elastic_ip: The address.
@@ -1751,6 +2123,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates a specified instance.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type instance_id: string
:param instance_id: The instance ID.
@@ -1854,6 +2232,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates a specified layer.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type layer_id: string
:param layer_id: The layer ID.
@@ -1947,6 +2331,25 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='UpdateLayer',
body=json.dumps(params))
+ def update_my_user_profile(self, ssh_public_key=None):
+ """
+ Updates a user's SSH public key.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have self-management enabled or an attached policy that
+ explicitly grants permissions. For more information on user
+ permissions, see `Managing User Permissions`_.
+
+ :type ssh_public_key: string
+ :param ssh_public_key: The user's SSH public key.
+
+ """
+ params = {}
+ if ssh_public_key is not None:
+ params['SshPublicKey'] = ssh_public_key
+ return self.make_request(action='UpdateMyUserProfile',
+ body=json.dumps(params))
+
def update_stack(self, stack_id, name=None, attributes=None,
service_role_arn=None,
default_instance_profile_arn=None, default_os=None,
@@ -1958,6 +2361,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates a specified stack.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -1995,20 +2404,20 @@ class OpsWorksConnection(AWSQueryConnection):
:param hostname_theme: The stack's new host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
- Layer_Dependent, which creates host names by appending integers to
- the layer's short name. The other themes are:
-
- + Baked_Goods
- + Clouds
- + European_Cities
- + Fruits
- + Greek_Deities
- + Legendary_Creatures_from_Japan
- + Planets_and_Moons
- + Roman_Deities
- + Scottish_Islands
- + US_Cities
- + Wild_Cats
+ `Layer_Dependent`, which creates host names by appending integers
+ to the layer's short name. The other themes are:
+
+ + `Baked_Goods`
+ + `Clouds`
+ + `European_Cities`
+ + `Fruits`
+ + `Greek_Deities`
+ + `Legendary_Creatures_from_Japan`
+ + `Planets_and_Moons`
+ + `Roman_Deities`
+ + `Scottish_Islands`
+ + `US_Cities`
+ + `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
@@ -2096,10 +2505,15 @@ class OpsWorksConnection(AWSQueryConnection):
body=json.dumps(params))
def update_user_profile(self, iam_user_arn, ssh_username=None,
- ssh_public_key=None):
+ ssh_public_key=None, allow_self_management=None):
"""
Updates a specified user profile.
+ **Required Permissions**: To use this action, an IAM user must
+ have an attached policy that explicitly grants permissions.
+ For more information on user permissions, see `Managing User
+ Permissions`_.
+
:type iam_user_arn: string
:param iam_user_arn: The user IAM ARN.
@@ -2109,19 +2523,32 @@ class OpsWorksConnection(AWSQueryConnection):
:type ssh_public_key: string
:param ssh_public_key: The user's new SSH public key.
+ :type allow_self_management: boolean
+ :param allow_self_management: Whether users can specify their own SSH
+ public key through the My Settings page. For more information, see
+ `Managing User Permissions`_.
+
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
+ if allow_self_management is not None:
+ params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='UpdateUserProfile',
body=json.dumps(params))
def update_volume(self, volume_id, name=None, mount_point=None):
"""
Updates an Amazon EBS volume's name or mount point. For more
- information, see ``_.
+ information, see `Resource Management`_.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
diff --git a/boto/pyami/scriptbase.py b/boto/pyami/scriptbase.py
index 90522cad..8e8cb0c0 100644
--- a/boto/pyami/scriptbase.py
+++ b/boto/pyami/scriptbase.py
@@ -4,7 +4,7 @@ from boto.utils import ShellCommand, get_ts
import boto
import boto.utils
-class ScriptBase:
+class ScriptBase(object):
def __init__(self, config_file=None):
self.instance_id = boto.config.get('Instance', 'instance-id', 'default')
@@ -41,4 +41,4 @@ class ScriptBase:
def main(self):
pass
-
+
diff --git a/boto/rds/__init__.py b/boto/rds/__init__.py
index bfb0b221..7b2873c4 100644
--- a/boto/rds/__init__.py
+++ b/boto/rds/__init__.py
@@ -169,7 +169,7 @@ class RDSConnection(AWSQueryConnection):
iops=None,
vpc_security_groups=None,
):
- # API version: 2012-09-17
+ # API version: 2013-09-09
# Parameter notes:
# =================
# id should be db_instance_identifier according to API docs but has been left
@@ -196,20 +196,23 @@ class RDSConnection(AWSQueryConnection):
:param allocated_storage: Initially allocated storage size, in GBs.
Valid values are depending on the engine value.
- * MySQL = 5--1024
- * oracle-se1 = 10--1024
- * oracle-se = 10--1024
- * oracle-ee = 10--1024
+ * MySQL = 5--3072
+ * oracle-se1 = 10--3072
+ * oracle-se = 10--3072
+ * oracle-ee = 10--3072
* sqlserver-ee = 200--1024
* sqlserver-se = 200--1024
* sqlserver-ex = 30--1024
* sqlserver-web = 30--1024
+ * postgres = 5--3072
:type instance_class: str
:param instance_class: The compute and memory capacity of
the DBInstance. Valid values are:
+ * db.t1.micro
* db.m1.small
+ * db.m1.medium
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
@@ -227,6 +230,7 @@ class RDSConnection(AWSQueryConnection):
* sqlserver-se
* sqlserver-ex
* sqlserver-web
+ * postgres
:type master_username: str
:param master_username: Name of master user for the DBInstance.
@@ -263,7 +267,10 @@ class RDSConnection(AWSQueryConnection):
* Oracle defaults to 1521
- * SQL Server defaults to 1433 and _cannot_ be 1434 or 3389
+ * SQL Server defaults to 1433 and _cannot_ be 1434, 3389,
+ 47001, 49152, and 49152 through 49156.
+
+ * PostgreSQL defaults to 5432
:type db_name: str
:param db_name: * MySQL:
@@ -280,6 +287,15 @@ class RDSConnection(AWSQueryConnection):
* SQL Server:
Not applicable and must be None.
+ * PostgreSQL:
+ Name of a database to create when the DBInstance
+ is created. Default is to create no databases.
+
+ Must contain 1--63 alphanumeric characters. Must
+ begin with a letter or an underscore. Subsequent
+ characters can be letters, underscores, or digits (0-9)
+ and cannot be a reserved PostgreSQL word.
+
:type param_group: str or ParameterGroup object
:param param_group: Name of DBParameterGroup or ParameterGroup instance
to associate with this DBInstance. If no groups are
@@ -326,6 +342,8 @@ class RDSConnection(AWSQueryConnection):
* SQL Server format example: 10.50.2789.0.v1
+ * PostgreSQL format example: 9.3
+
:type auto_minor_version_upgrade: bool
:param auto_minor_version_upgrade: Indicates that minor engine
upgrades will be applied
diff --git a/boto/s3/acl.py b/boto/s3/acl.py
index a7bca8c9..c54ddc62 100644
--- a/boto/s3/acl.py
+++ b/boto/s3/acl.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -28,7 +28,7 @@ CannedACLStrings = ['private', 'public-read',
'log-delivery-write']
-class Policy:
+class Policy(object):
def __init__(self, parent=None):
self.parent = parent
@@ -74,7 +74,7 @@ class Policy:
s += '</AccessControlPolicy>'
return s
-class ACL:
+class ACL(object):
def __init__(self, policy=None):
self.policy = policy
@@ -111,8 +111,8 @@ class ACL:
s += grant.to_xml()
s += '</AccessControlList>'
return s
-
-class Grant:
+
+class Grant(object):
NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
@@ -160,5 +160,5 @@ class Grant:
s += '<Permission>%s</Permission>' % self.permission
s += '</Grant>'
return s
-
-
+
+
diff --git a/boto/s3/bucket.py b/boto/s3/bucket.py
index 756915ca..2c9b99ba 100644
--- a/boto/s3/bucket.py
+++ b/boto/s3/bucket.py
@@ -54,7 +54,7 @@ from collections import defaultdict
# as per http://goo.gl/BDuud (02/19/2011)
-class S3WebsiteEndpointTranslate:
+class S3WebsiteEndpointTranslate(object):
trans_region = defaultdict(lambda: 's3-website-us-east-1')
trans_region['eu-west-1'] = 's3-website-eu-west-1'
@@ -211,7 +211,8 @@ class Bucket(object):
raise self.connection.provider.storage_response_error(
response.status, response.reason, '')
- def list(self, prefix='', delimiter='', marker='', headers=None):
+ def list(self, prefix='', delimiter='', marker='', headers=None,
+ encoding_type=None):
"""
List key objects within a bucket. This returns an instance of an
BucketListResultSet that automatically handles all of the result
@@ -243,13 +244,26 @@ class Bucket(object):
:type marker: string
:param marker: The "marker" of where you are in the result set
+ :param encoding_type: Requests Amazon S3 to encode the response and
+ specifies the encoding method to use.
+
+ An object key can contain any Unicode character; however, XML 1.0
+ parser cannot parse some characters, such as characters with an
+ ASCII value from 0 to 10. For characters that are not supported in
+ XML 1.0, you can add this parameter to request that Amazon S3
+ encode the keys in the response.
+
+ Valid options: ``url``
+ :type encoding_type: string
+
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
- return BucketListResultSet(self, prefix, delimiter, marker, headers)
+ return BucketListResultSet(self, prefix, delimiter, marker, headers,
+ encoding_type=encoding_type)
def list_versions(self, prefix='', delimiter='', key_marker='',
- version_id_marker='', headers=None):
+ version_id_marker='', headers=None, encoding_type=None):
"""
List version objects within a bucket. This returns an
instance of an VersionedBucketListResultSet that automatically
@@ -273,34 +287,63 @@ class Bucket(object):
for more details.
- :type marker: string
- :param marker: The "marker" of where you are in the result set
+ :type key_marker: string
+ :param key_marker: The "marker" of where you are in the result set
+
+ :param encoding_type: Requests Amazon S3 to encode the response and
+ specifies the encoding method to use.
+
+ An object key can contain any Unicode character; however, XML 1.0
+ parser cannot parse some characters, such as characters with an
+ ASCII value from 0 to 10. For characters that are not supported in
+ XML 1.0, you can add this parameter to request that Amazon S3
+ encode the keys in the response.
+
+ Valid options: ``url``
+ :type encoding_type: string
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
return VersionedBucketListResultSet(self, prefix, delimiter,
key_marker, version_id_marker,
- headers)
+ headers,
+ encoding_type=encoding_type)
def list_multipart_uploads(self, key_marker='',
upload_id_marker='',
- headers=None):
+ headers=None, encoding_type=None):
"""
List multipart upload objects within a bucket. This returns an
instance of an MultiPartUploadListResultSet that automatically
handles all of the result paging, etc. from S3. You just need
to keep iterating until there are no more results.
- :type marker: string
- :param marker: The "marker" of where you are in the result set
+ :type key_marker: string
+ :param key_marker: The "marker" of where you are in the result set
+
+ :type upload_id_marker: string
+ :param upload_id_marker: The upload identifier
+
+ :param encoding_type: Requests Amazon S3 to encode the response and
+ specifies the encoding method to use.
+
+ An object key can contain any Unicode character; however, XML 1.0
+ parser cannot parse some characters, such as characters with an
+ ASCII value from 0 to 10. For characters that are not supported in
+ XML 1.0, you can add this parameter to request that Amazon S3
+ encode the keys in the response.
+
+ Valid options: ``url``
+ :type encoding_type: string
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
return MultiPartUploadListResultSet(self, key_marker,
upload_id_marker,
- headers)
+ headers,
+ encoding_type=encoding_type)
def _get_all_query_args(self, params, initial_query_string=''):
pairs = []
@@ -381,12 +424,25 @@ class Bucket(object):
element in the CommonPrefixes collection. These rolled-up
keys are not returned elsewhere in the response.
+ :param encoding_type: Requests Amazon S3 to encode the response and
+ specifies the encoding method to use.
+
+ An object key can contain any Unicode character; however, XML 1.0
+ parser cannot parse some characters, such as characters with an
+ ASCII value from 0 to 10. For characters that are not supported in
+ XML 1.0, you can add this parameter to request that Amazon S3
+ encode the keys in the response.
+
+ Valid options: ``url``
+ :type encoding_type: string
+
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix',
- 'marker', 'delimiter'])
+ 'marker', 'delimiter',
+ 'encoding_type'])
return self._get_all([('Contents', self.key_class),
('CommonPrefixes', Prefix)],
'', headers, **params)
@@ -421,6 +477,18 @@ class Bucket(object):
element in the CommonPrefixes collection. These rolled-up
keys are not returned elsewhere in the response.
+ :param encoding_type: Requests Amazon S3 to encode the response and
+ specifies the encoding method to use.
+
+ An object key can contain any Unicode character; however, XML 1.0
+ parser cannot parse some characters, such as characters with an
+ ASCII value from 0 to 10. For characters that are not supported in
+ XML 1.0, you can add this parameter to request that Amazon S3
+ encode the keys in the response.
+
+ Valid options: ``url``
+ :type encoding_type: string
+
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
@@ -440,7 +508,7 @@ class Bucket(object):
"""
self.validate_kwarg_names(
params, ['maxkeys', 'max_keys', 'prefix', 'key_marker',
- 'version_id_marker', 'delimiter'])
+ 'version_id_marker', 'delimiter', 'encoding_type'])
def get_all_multipart_uploads(self, headers=None, **params):
"""
@@ -476,12 +544,24 @@ class Bucket(object):
list only if they have an upload ID lexicographically
greater than the specified upload_id_marker.
+ :param encoding_type: Requests Amazon S3 to encode the response and
+ specifies the encoding method to use.
+
+ An object key can contain any Unicode character; however, XML 1.0
+ parser cannot parse some characters, such as characters with an
+ ASCII value from 0 to 10. For characters that are not supported in
+ XML 1.0, you can add this parameter to request that Amazon S3
+ encode the keys in the response.
+
+ Valid options: ``url``
+ :type encoding_type: string
+
:rtype: ResultSet
:return: The result from S3 listing the uploads requested
"""
self.validate_kwarg_names(params, ['max_uploads', 'key_marker',
- 'upload_id_marker'])
+ 'upload_id_marker', 'encoding_type'])
return self._get_all([('Upload', MultiPartUpload),
('CommonPrefixes', Prefix)],
'uploads', headers, **params)
@@ -1549,6 +1629,15 @@ class Bucket(object):
"""
Start a multipart upload operation.
+ .. note::
+
+ Note: After you initiate multipart upload and upload one or more
+ parts, you must either complete or abort multipart upload in order
+ to stop getting charged for storage of the uploaded parts. Only
+ after you either complete or abort multipart upload, Amazon S3
+ frees up the parts storage and stops charging you for the parts
+ storage.
+
:type key_name: string
:param key_name: The name of the key that will ultimately
result from this multipart upload operation. This will be
@@ -1649,6 +1738,11 @@ class Bucket(object):
response.status, response.reason, body)
def cancel_multipart_upload(self, key_name, upload_id, headers=None):
+ """
+ To verify that all parts have been removed, so you don't get charged
+ for the part storage, you should call the List Parts operation and
+ ensure the parts list is empty.
+ """
query_args = 'uploadId=%s' % upload_id
response = self.connection.make_request('DELETE', self.name, key_name,
query_args=query_args,
diff --git a/boto/s3/bucketlistresultset.py b/boto/s3/bucketlistresultset.py
index e11eb493..f0bc0602 100644
--- a/boto/s3/bucketlistresultset.py
+++ b/boto/s3/bucketlistresultset.py
@@ -14,12 +14,13 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):
+def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None,
+ encoding_type=None):
"""
A generator function for listing keys in a bucket.
"""
@@ -27,14 +28,15 @@ def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):
k = None
while more_results:
rs = bucket.get_all_keys(prefix=prefix, marker=marker,
- delimiter=delimiter, headers=headers)
+ delimiter=delimiter, headers=headers,
+ encoding_type=encoding_type)
for k in rs:
yield k
if k:
marker = rs.next_marker or k.name
more_results= rs.is_truncated
-
-class BucketListResultSet:
+
+class BucketListResultSet(object):
"""
A resultset for listing keys within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
@@ -43,20 +45,24 @@ class BucketListResultSet:
keys in a reasonably efficient manner.
"""
- def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None):
+ def __init__(self, bucket=None, prefix='', delimiter='', marker='',
+ headers=None, encoding_type=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.marker = marker
self.headers = headers
+ self.encoding_type = encoding_type
def __iter__(self):
return bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter, marker=self.marker,
- headers=self.headers)
+ headers=self.headers,
+ encoding_type=self.encoding_type)
def versioned_bucket_lister(bucket, prefix='', delimiter='',
- key_marker='', version_id_marker='', headers=None):
+ key_marker='', version_id_marker='', headers=None,
+ encoding_type=None):
"""
A generator function for listing versions in a bucket.
"""
@@ -66,14 +72,14 @@ def versioned_bucket_lister(bucket, prefix='', delimiter='',
rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker,
version_id_marker=version_id_marker,
delimiter=delimiter, headers=headers,
- max_keys=999)
+ max_keys=999, encoding_type=encoding_type)
for k in rs:
yield k
key_marker = rs.next_key_marker
version_id_marker = rs.next_version_id_marker
more_results= rs.is_truncated
-
-class VersionedBucketListResultSet:
+
+class VersionedBucketListResultSet(object):
"""
A resultset for listing versions within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
@@ -83,24 +89,26 @@ class VersionedBucketListResultSet:
"""
def __init__(self, bucket=None, prefix='', delimiter='', key_marker='',
- version_id_marker='', headers=None):
+ version_id_marker='', headers=None, encoding_type=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.key_marker = key_marker
self.version_id_marker = version_id_marker
self.headers = headers
+ self.encoding_type = encoding_type
def __iter__(self):
return versioned_bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter,
key_marker=self.key_marker,
version_id_marker=self.version_id_marker,
- headers=self.headers)
+ headers=self.headers,
+ encoding_type=self.encoding_type)
def multipart_upload_lister(bucket, key_marker='',
upload_id_marker='',
- headers=None):
+ headers=None, encoding_type=None):
"""
A generator function for listing multipart uploads in a bucket.
"""
@@ -109,14 +117,15 @@ def multipart_upload_lister(bucket, key_marker='',
while more_results:
rs = bucket.get_all_multipart_uploads(key_marker=key_marker,
upload_id_marker=upload_id_marker,
- headers=headers)
+ headers=headers,
+ encoding_type=encoding_type)
for k in rs:
yield k
key_marker = rs.next_key_marker
upload_id_marker = rs.next_upload_id_marker
more_results= rs.is_truncated
-
-class MultiPartUploadListResultSet:
+
+class MultiPartUploadListResultSet(object):
"""
A resultset for listing multipart uploads within a bucket.
Uses the multipart_upload_lister generator function and
@@ -126,14 +135,16 @@ class MultiPartUploadListResultSet:
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, key_marker='',
- upload_id_marker='', headers=None):
+ upload_id_marker='', headers=None, encoding_type=None):
self.bucket = bucket
self.key_marker = key_marker
self.upload_id_marker = upload_id_marker
self.headers = headers
+ self.encoding_type = encoding_type
def __iter__(self):
return multipart_upload_lister(self.bucket,
key_marker=self.key_marker,
upload_id_marker=self.upload_id_marker,
- headers=self.headers)
+ headers=self.headers,
+ encoding_type=self.encoding_type)
diff --git a/boto/s3/bucketlogging.py b/boto/s3/bucketlogging.py
index 9e3c050d..ab368392 100644
--- a/boto/s3/bucketlogging.py
+++ b/boto/s3/bucketlogging.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -22,7 +22,7 @@
import xml.sax.saxutils
from acl import Grant
-class BucketLogging:
+class BucketLogging(object):
def __init__(self, target=None, prefix=None, grants=None):
self.target = target
@@ -68,7 +68,7 @@ class BucketLogging:
# caller is responsible to encode to utf-8
s = u'<?xml version="1.0" encoding="UTF-8"?>'
s += u'<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">'
- if self.target is not None:
+ if self.target is not None:
s += u'<LoggingEnabled>'
s += u'<TargetBucket>%s</TargetBucket>' % self.target
prefix = self.prefix or ''
diff --git a/boto/s3/connection.py b/boto/s3/connection.py
index 583fa168..d670cfc6 100644
--- a/boto/s3/connection.py
+++ b/boto/s3/connection.py
@@ -27,6 +27,7 @@ import urllib
import base64
import time
+from boto.auth import detect_potential_s3sigv4
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
@@ -134,7 +135,7 @@ class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
return url_base
-class Location:
+class Location(object):
DEFAULT = '' # US Classic Region
EU = 'EU'
@@ -173,6 +174,7 @@ class S3Connection(AWSAuthConnection):
suppress_consec_slashes=suppress_consec_slashes,
validate_certs=validate_certs)
+ @detect_potential_s3sigv4
def _required_auth_capability(self):
if self.anon:
return ['anon']
diff --git a/boto/s3/deletemarker.py b/boto/s3/deletemarker.py
index 5db4343a..d8e7cc8b 100644
--- a/boto/s3/deletemarker.py
+++ b/boto/s3/deletemarker.py
@@ -14,14 +14,14 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
-class DeleteMarker:
+class DeleteMarker(object):
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
diff --git a/boto/s3/key.py b/boto/s3/key.py
index 493e2e88..0849584d 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -23,6 +23,7 @@
from __future__ import with_statement
import errno
+import hashlib
import mimetypes
import os
import re
@@ -40,7 +41,7 @@ from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
-from boto.utils import compute_md5
+from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
try:
@@ -894,6 +895,12 @@ class Key(object):
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
+ # This is terrible. We need a SHA256 of the body for SigV4, but to do
+ # the chunked ``sender`` behavior above, the ``fp`` isn't available to
+ # the auth mechanism (because closures). Detect if it's SigV4 & embelish
+ # while we can before the auth calculations occur.
+ if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
+ headers['_sha256'] = compute_hash(fp, hash_algorithm=hashlib.sha256)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
@@ -1305,7 +1312,7 @@ class Key(object):
reduced_redundancy,
encrypt_key=encrypt_key)
- def set_contents_from_string(self, s, headers=None, replace=True,
+ def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
@@ -1362,9 +1369,9 @@ class Key(object):
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
- if isinstance(s, unicode):
- s = s.encode("utf-8")
- fp = StringIO.StringIO(s)
+ if isinstance(string_data, unicode):
+ string_data = string_data.encode("utf-8")
+ fp = StringIO.StringIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
diff --git a/boto/s3/multipart.py b/boto/s3/multipart.py
index fae3389e..ba89d735 100644
--- a/boto/s3/multipart.py
+++ b/boto/s3/multipart.py
@@ -199,7 +199,8 @@ class MultiPartUpload(object):
else:
setattr(self, name, value)
- def get_all_parts(self, max_parts=None, part_number_marker=None):
+ def get_all_parts(self, max_parts=None, part_number_marker=None,
+ encoding_type=None):
"""
Return the uploaded parts of this MultiPart Upload. This is
a lower-level method that requires you to manually page through
@@ -213,6 +214,8 @@ class MultiPartUpload(object):
query_args += '&max-parts=%d' % max_parts
if part_number_marker:
query_args += '&part-number-marker=%s' % part_number_marker
+ if encoding_type:
+ query_args += '&encoding-type=%s' % encoding_type
response = self.bucket.connection.make_request('GET', self.bucket.name,
self.key_name,
query_args=query_args)
@@ -227,6 +230,14 @@ class MultiPartUpload(object):
"""
Upload another part of this MultiPart Upload.
+ .. note::
+
+ After you initiate multipart upload and upload one or more parts,
+ you must either complete or abort multipart upload in order to stop
+ getting charged for storage of the uploaded parts. Only after you
+ either complete or abort multipart upload, Amazon S3 frees up the
+ parts storage and stops charging you for the parts storage.
+
:type fp: file
:param fp: The file object you want to upload.
diff --git a/boto/s3/user.py b/boto/s3/user.py
index f45f0381..f2cbbb7f 100644
--- a/boto/s3/user.py
+++ b/boto/s3/user.py
@@ -14,12 +14,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-class User:
+class User(object):
def __init__(self, parent=None, id='', display_name=''):
if parent:
parent.owner = self
@@ -46,4 +46,4 @@ class User:
s += '<ID>%s</ID>' % self.id
s += '<DisplayName>%s</DisplayName>' % self.display_name
s += '</%s>' % element_name
- return s
+ return s
diff --git a/boto/sdb/db/manager/xmlmanager.py b/boto/sdb/db/manager/xmlmanager.py
index 04210db8..c4cc5d3e 100644
--- a/boto/sdb/db/manager/xmlmanager.py
+++ b/boto/sdb/db/manager/xmlmanager.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -27,7 +27,7 @@ from xml.dom.minidom import getDOMImplementation, parse, parseString, Node
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
-class XMLConverter:
+class XMLConverter(object):
"""
Responsible for converting base Python types to format compatible with underlying
database. For SimpleDB, that means everything needs to be converted to a string
@@ -179,7 +179,7 @@ class XMLConverter:
class XMLManager(object):
-
+
def __init__(self, cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, ddl_dir, enable_ssl):
self.cls = cls
@@ -260,7 +260,7 @@ class XMLManager(object):
def get_doc(self):
return self.doc
-
+
def encode_value(self, prop, value):
return self.converter.encode_prop(prop, value)
@@ -324,8 +324,8 @@ class XMLManager(object):
if value != None:
props[prop.name] = value
return (cls, props, id)
-
-
+
+
def get_object(self, cls, id):
if not self.connection:
self._connect()
@@ -352,7 +352,7 @@ class XMLManager(object):
query = str(self._build_query(cls, filters, limit, order_by))
if query:
url = "/%s?%s" % (self.db_name, urlencode({"query": query}))
- else:
+ else:
url = "/%s" % self.db_name
resp = self._make_request('GET', url)
if resp.status == 200:
@@ -471,7 +471,7 @@ class XMLManager(object):
else:
doc = parse(fp)
return self.get_object_from_doc(cls, id, doc)
-
+
def unmarshal_props(self, fp, cls=None, id=None):
"""
Same as unmarshalling an object, except it returns
@@ -499,7 +499,7 @@ class XMLManager(object):
return a[name]
else:
return None
-
+
def get_raw_item(self, obj):
return self.domain.get_item(obj.id)
diff --git a/boto/sdb/domain.py b/boto/sdb/domain.py
index d4faf046..137709dc 100644
--- a/boto/sdb/domain.py
+++ b/boto/sdb/domain.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -24,7 +24,7 @@ Represents an SDB Domain
"""
from boto.sdb.queryresultset import SelectResultSet
-class Domain:
+class Domain(object):
def __init__(self, connection=None, name=None):
self.connection = connection
@@ -64,19 +64,19 @@ class Domain:
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
- of a single attribute name and expected value. The list can be
+ of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
- In which case the call will first verify that the attribute
+ In which case the call will first verify that the attribute
"name" of this item has a value of "value". If it does, the delete
- will proceed, otherwise a ConditionalCheckFailed error will be
+ will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
-
+
* ['name', True|False]
-
- which will simply check for the existence (True) or non-existence
+
+ which will simply check for the existence (True) or non-existence
(False) of the attribute.
:type replace: bool
@@ -144,22 +144,22 @@ class Domain:
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
-
+
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
- of a single attribute name and expected value. The list can be of
+ of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
- will proceed, otherwise a ConditionalCheckFailed error will be
+ will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
- which will simply check for the existence (True) or
+ which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
@@ -171,7 +171,7 @@ class Domain:
def batch_delete_attributes(self, items):
"""
Delete multiple items in this domain.
-
+
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are either:
@@ -182,7 +182,7 @@ class Domain:
will only be deleted if they match the name/value
pairs passed in.
* None which means that all attributes associated
- with the item should be deleted.
+ with the item should be deleted.
:rtype: bool
:return: True if successful
@@ -209,12 +209,12 @@ class Domain:
def get_item(self, item_name, consistent_read=False):
"""
Retrieves an item from the domain, along with all of its attributes.
-
+
:param string item_name: The name of the item to retrieve.
:rtype: :class:`boto.sdb.item.Item` or ``None``
- :keyword bool consistent_read: When set to true, ensures that the most
+ :keyword bool consistent_read: When set to true, ensures that the most
recent data is returned.
- :return: The requested item, or ``None`` if there was no match found
+ :return: The requested item, or ``None`` if there was no match found
"""
item = self.get_attributes(item_name, consistent_read=consistent_read)
if item:
@@ -279,7 +279,7 @@ class Domain:
return self.connection.delete_domain(self)
-class DomainMetaData:
+class DomainMetaData(object):
def __init__(self, domain=None):
self.domain = domain
diff --git a/boto/sdb/queryresultset.py b/boto/sdb/queryresultset.py
index 10bafd1c..f943949f 100644
--- a/boto/sdb/queryresultset.py
+++ b/boto/sdb/queryresultset.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -34,8 +34,8 @@ def query_lister(domain, query='', max_items=None, attr_names=None):
num_results += 1
next_token = rs.next_token
more_results = next_token != None
-
-class QueryResultSet:
+
+class QueryResultSet(object):
def __init__(self, domain=None, query='', max_items=None, attr_names=None):
self.max_items = max_items
@@ -60,7 +60,7 @@ def select_lister(domain, query='', max_items=None):
num_results += 1
next_token = rs.next_token
more_results = next_token != None
-
+
class SelectResultSet(object):
def __init__(self, domain=None, query='', max_items=None,
diff --git a/boto/services/result.py b/boto/services/result.py
index 48549764..5f6d800d 100644
--- a/boto/services/result.py
+++ b/boto/services/result.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -25,8 +25,8 @@ from datetime import datetime, timedelta
from boto.utils import parse_ts
import boto
-class ResultProcessor:
-
+class ResultProcessor(object):
+
LogFileName = 'log.csv'
def __init__(self, batch_name, sd, mimetype_files=None):
@@ -133,4 +133,4 @@ class ResultProcessor:
print 'Elapsed Time: %d' % self.elapsed_time.seconds
tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files)
print 'Throughput: %f transactions / minute' % tput
-
+
diff --git a/boto/services/submit.py b/boto/services/submit.py
index 89c439c5..2bc72241 100644
--- a/boto/services/submit.py
+++ b/boto/services/submit.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -23,7 +23,7 @@ import time
import os
-class Submitter:
+class Submitter(object):
def __init__(self, sd):
self.sd = sd
diff --git a/boto/sqs/bigmessage.py b/boto/sqs/bigmessage.py
new file mode 100644
index 00000000..be04db1a
--- /dev/null
+++ b/boto/sqs/bigmessage.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import uuid
+
+import boto
+from boto.sqs.message import RawMessage
+from boto.exception import SQSDecodeError
+
+
+class BigMessage(RawMessage):
+ """
+ The BigMessage class provides large payloads (up to 5GB)
+ by storing the payload itself in S3 and then placing a reference
+ to the S3 object in the actual SQS message payload.
+
+ To create a BigMessage, you should create a BigMessage object
+ and pass in a file-like object as the ``body`` param and also
+ pass in the an S3 URL specifying the bucket in which to store
+ the message body::
+
+ import boto.sqs
+ from boto.sqs.bigmessage import BigMessage
+
+ sqs = boto.sqs.connect_to_region('us-west-2')
+ queue = sqs.get_queue('myqueue')
+ fp = open('/path/to/bigmessage/data')
+ msg = BigMessage(queue, fp, 's3://mybucket')
+ queue.write(msg)
+
+ Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo)
+ is interpreted to mean that the body of the message is already
+ stored in S3 and the that S3 URL is then used directly with no
+ content uploaded by BigMessage.
+ """
+
+ def __init__(self, queue=None, body=None, s3_url=None):
+ self.s3_url = s3_url
+ RawMessage.__init__(self, queue, body)
+
+ def _get_bucket_key(self, s3_url):
+ bucket_name = key_name = None
+ if s3_url:
+ if s3_url.startswith('s3://'):
+ # We need to split out the bucket from the key (if
+ # supplied). We also have to be aware that someone
+ # may provide a trailing '/' character as in:
+ # s3://foo/ and we want to handle that.
+ s3_components = s3_url[5:].split('/', 1)
+ bucket_name = s3_components[0]
+ if len(s3_components) > 1:
+ if s3_components[1]:
+ key_name = s3_components[1]
+ else:
+ msg = 's3_url parameter should start with s3://'
+ raise SQSDecodeError(msg, self)
+ return bucket_name, key_name
+
+ def encode(self, value):
+ """
+ :type value: file-like object
+ :param value: A file-like object containing the content
+ of the message. The actual content will be stored
+ in S3 and a link to the S3 object will be stored in
+ the message body.
+ """
+ bucket_name, key_name = self._get_bucket_key(self.s3_url)
+ if bucket_name and key_name:
+ return self.s3_url
+ key_name = uuid.uuid4()
+ s3_conn = boto.connect_s3()
+ s3_bucket = s3_conn.get_bucket(bucket_name)
+ key = s3_bucket.new_key(key_name)
+ key.set_contents_from_file(value)
+ self.s3_url = 's3://%s/%s' % (bucket_name, key_name)
+ return self.s3_url
+
+ def _get_s3_object(self, s3_url):
+ bucket_name, key_name = self._get_bucket_key(s3_url)
+ if bucket_name and key_name:
+ s3_conn = boto.connect_s3()
+ s3_bucket = s3_conn.get_bucket(bucket_name)
+ key = s3_bucket.get_key(key_name)
+ return key
+ else:
+ msg = 'Unable to decode S3 URL: %s' % s3_url
+ raise SQSDecodeError(msg, self)
+
+ def decode(self, value):
+ self.s3_url = value
+ key = self._get_s3_object(value)
+ return key.get_contents_as_string()
+
+ def delete(self):
+ # Delete the object in S3 first, then delete the SQS message
+ if self.s3_url:
+ key = self._get_s3_object(self.s3_url)
+ key.delete()
+ RawMessage.delete(self)
+
diff --git a/boto/sqs/message.py b/boto/sqs/message.py
index f7aa6980..0afc15a9 100644
--- a/boto/sqs/message.py
+++ b/boto/sqs/message.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -28,7 +28,7 @@ Message are here:
http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html
So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes
-back out. However, to allow messages to have richer semantics, the Message class must support the
+back out. However, to allow messages to have richer semantics, the Message class must support the
following interfaces:
The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a
@@ -69,14 +69,14 @@ from boto.sqs.attributes import Attributes
from boto.exception import SQSDecodeError
import boto
-class RawMessage:
+class RawMessage(object):
"""
Base class for SQS messages. RawMessage does not encode the message
in any way. Whatever you store in the body of the message is what
will be written to SQS and whatever is returned from SQS is stored
directly into the body of the message.
"""
-
+
def __init__(self, queue=None, body=''):
self.queue = queue
self.set_body(body)
@@ -115,14 +115,14 @@ class RawMessage:
def decode(self, value):
"""Transform seralized byte array into any object."""
return value
-
+
def set_body(self, body):
"""Override the current body for this object, using decoded format."""
self._body = body
def get_body(self):
return self._body
-
+
def get_body_encoded(self):
"""
This method is really a semi-private method used by the Queue.write
@@ -140,7 +140,7 @@ class RawMessage:
self.queue.connection.change_message_visibility(self.queue,
self.receipt_handle,
visibility_timeout)
-
+
class Message(RawMessage):
"""
The default Message class used for SQS queues. This class automatically
@@ -152,7 +152,7 @@ class Message(RawMessage):
for details on why this is a good idea. The encode/decode is meant to
be transparent to the end-user.
"""
-
+
def encode(self, value):
return base64.b64encode(value)
@@ -256,4 +256,4 @@ class EncodedMHMessage(MHMessage):
def encode(self, value):
value = MHMessage.encode(self, value)
return base64.b64encode(value)
-
+
diff --git a/boto/sqs/queue.py b/boto/sqs/queue.py
index 603faaae..054b839e 100644
--- a/boto/sqs/queue.py
+++ b/boto/sqs/queue.py
@@ -27,7 +27,7 @@ import urlparse
from boto.sqs.message import Message
-class Queue:
+class Queue(object):
def __init__(self, connection=None, url=None, message_class=Message):
self.connection = connection
@@ -238,7 +238,7 @@ class Queue:
"""
return self.connection.send_message_batch(self, messages)
- def new_message(self, body=''):
+ def new_message(self, body='', **kwargs):
"""
Create new message of appropriate class.
@@ -248,7 +248,7 @@ class Queue:
:rtype: :class:`boto.sqs.message.Message`
:return: A new Message object
"""
- m = self.message_class(self, body)
+ m = self.message_class(self, body, **kwargs)
m.queue = self
return m
diff --git a/boto/support/layer1.py b/boto/support/layer1.py
index 5e73db26..c5180636 100644
--- a/boto/support/layer1.py
+++ b/boto/support/layer1.py
@@ -20,7 +20,11 @@
# IN THE SOFTWARE.
#
-import json
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
@@ -33,10 +37,9 @@ class SupportConnection(AWSQueryConnection):
AWS Support
The AWS Support API reference is intended for programmers who need
detailed information about the AWS Support actions and data types.
- This service enables you to manage with your AWS Support cases
- programmatically. It is built on the AWS Query API programming
- model and provides HTTP methods that take parameters and return
- results in JSON format.
+ This service enables you to manage your AWS Support cases
+ programmatically. It uses HTTP methods that return results in JSON
+ format.
The AWS Support service also exposes a set of `Trusted Advisor`_
features. You can retrieve a list of checks you can run on your
@@ -55,34 +58,35 @@ class SupportConnection(AWSQueryConnection):
+ **Case Creation, case details, and case resolution**. The
actions `CreateCase`_, `DescribeCases`_, and `ResolveCase`_ enable
you to create AWS Support cases, retrieve them, and resolve them.
- + **Case communication**. The actions
- `DescribeCaseCommunications`_ and `AddCommunicationToCase`_ enable
- you to retrieve and add communication to AWS Support cases.
+ + **Case communication**. The actions `DescribeCommunications`_
+ and `AddCommunicationToCase`_ enable you to retrieve and add
+ communication to AWS Support cases.
The following list describes the actions available from the AWS
Support service for Trusted Advisor:
- + `DescribeTrustedAdviserChecks`_ returns the list of checks that you can run against your AWS
- resources.
+ + `DescribeTrustedAdvisorChecks`_ returns the list of checks that
+ you can run against your AWS resources.
+ Using the CheckId for a specific check returned by
- DescribeTrustedAdviserChecks, you can call
+ DescribeTrustedAdvisorChecks, you can call
`DescribeTrustedAdvisorCheckResult`_ and obtain a new result for the check you specified.
+ Using `DescribeTrustedAdvisorCheckSummaries`_, you can get
summaries for a set of Trusted Advisor checks.
+ `RefreshTrustedAdvisorCheck`_ enables you to request that
Trusted Advisor run the check again.
- + ``_ gets statuses on the checks you are running.
+ + `DescribeTrustedAdvisorCheckRefreshStatuses`_ gets statuses on
+ the checks you are running.
For authentication of requests, the AWS Support uses `Signature
Version 4 Signing Process`_.
- See the AWS Support Developer Guide for information about how to
- use this service to manage create and manage your support cases,
- and how to call Trusted Advisor for results of checks on your
- resources.
+ See the AWS Support `Developer Guide`_ for information about how
+ to use this service to manage create and manage your support
+ cases, and how to call Trusted Advisor for results of checks on
+ your resources.
"""
APIVersion = "2013-04-15"
DefaultRegionName = "us-east-1"
@@ -127,13 +131,18 @@ class SupportConnection(AWSQueryConnection):
Support `Your Support Cases`_ web form.
:type case_id: string
- :param case_id:
+ :param case_id: String that indicates the AWS Support caseID requested
+ or returned in the call. The caseID is an alphanumeric string
+ formatted as shown in this example CaseId:
+ case-12345678910-2013-c4c1d2bf33c5cf47
:type communication_body: string
- :param communication_body:
+ :param communication_body: Represents the body of an email
+ communication added to the support case.
:type cc_email_addresses: list
- :param cc_email_addresses:
+ :param cc_email_addresses: Represents any email addresses contained in
+ the CC line of an email added to the support case.
"""
params = {'communicationBody': communication_body, }
@@ -144,8 +153,8 @@ class SupportConnection(AWSQueryConnection):
return self.make_request(action='AddCommunicationToCase',
body=json.dumps(params))
- def create_case(self, subject, service_code, category_code,
- communication_body, severity_code=None,
+ def create_case(self, subject, communication_body, service_code=None,
+ severity_code=None, category_code=None,
cc_email_addresses=None, language=None, issue_type=None):
"""
Creates a new case in the AWS Support Center. This action is
@@ -179,6 +188,10 @@ class SupportConnection(AWSQueryConnection):
passing the AWS Credentials in the HTTP POST method or in a
method or function call from one of the programming languages
supported by an `AWS SDK`_.
+ #. **IssueType**. Indicates the type of issue for the case.
+ You can specify either "customer-service" or "technical." If
+ you do not indicate a value, this parameter defaults to
+ "technical."
The AWS Support API does not currently support the ability to
@@ -190,38 +203,55 @@ class SupportConnection(AWSQueryConnection):
retrieve existing AWS Support support cases.
:type subject: string
- :param subject:
+ :param subject: Title of the AWS Support case.
:type service_code: string
- :param service_code:
+ :param service_code: Code for the AWS service returned by the call to
+ `DescribeServices`_.
:type severity_code: string
:param severity_code:
+ Code for the severity level returned by the call to
+ `DescribeSeverityLevels`_.
+
+ The availability of severity levels depends on each customer's support
+ subscription. In other words, your subscription may not necessarily
+ require the urgent level of response time.
:type category_code: string
- :param category_code:
+ :param category_code: Specifies the category of problem for the AWS
+ Support case.
:type communication_body: string
- :param communication_body:
+ :param communication_body: Parameter that represents the communication
+ body text when you create an AWS Support case by calling
+ `CreateCase`_.
:type cc_email_addresses: list
- :param cc_email_addresses:
+ :param cc_email_addresses: List of email addresses that AWS Support
+ copies on case correspondence.
:type language: string
- :param language:
+ :param language: Specifies the ISO 639-1 code for the language in which
+ AWS provides support. AWS Support currently supports English and
+ Japanese, for which the codes are en and ja , respectively.
+ Language parameters must be passed explicitly for operations that
+ take them.
:type issue_type: string
- :param issue_type:
+ :param issue_type: Field passed as a parameter in a `CreateCase`_ call.
"""
params = {
'subject': subject,
- 'serviceCode': service_code,
- 'categoryCode': category_code,
'communicationBody': communication_body,
}
+ if service_code is not None:
+ params['serviceCode'] = service_code
if severity_code is not None:
params['severityCode'] = severity_code
+ if category_code is not None:
+ params['categoryCode'] = category_code
if cc_email_addresses is not None:
params['ccEmailAddresses'] = cc_email_addresses
if language is not None:
@@ -247,28 +277,40 @@ class SupportConnection(AWSQueryConnection):
to paginate the returned records represented by CaseDetails .
:type case_id_list: list
- :param case_id_list:
+ :param case_id_list: A list of Strings comprising ID numbers for
+ support cases you want returned. The maximum number of cases is
+ 100.
:type display_id: string
- :param display_id:
+ :param display_id: String that corresponds to the ID value displayed
+ for a case in the AWS Support Center user interface.
:type after_time: string
- :param after_time:
+ :param after_time: Start date for a filtered date search on support
+ case communications.
:type before_time: string
- :param before_time:
+ :param before_time: End date for a filtered date search on support case
+ communications.
:type include_resolved_cases: boolean
- :param include_resolved_cases:
+ :param include_resolved_cases: Boolean that indicates whether or not
+ resolved support cases should be listed in the `DescribeCases`_
+ search.
:type next_token: string
- :param next_token:
+ :param next_token: Defines a resumption point for pagination.
:type max_results: integer
- :param max_results:
+ :param max_results: Integer that sets the maximum number of results to
+ return before paginating.
:type language: string
- :param language:
+ :param language: Specifies the ISO 639-1 code for the language in which
+ AWS provides support. AWS Support currently supports English and
+ Japanese, for which the codes are en and ja , respectively.
+ Language parameters must be passed explicitly for operations that
+ take them.
"""
params = {}
@@ -306,19 +348,25 @@ class SupportConnection(AWSQueryConnection):
specify the resumption of pagination.
:type case_id: string
- :param case_id:
+ :param case_id: String that indicates the AWS Support caseID requested
+ or returned in the call. The caseID is an alphanumeric string
+ formatted as shown in this example CaseId:
+ case-12345678910-2013-c4c1d2bf33c5cf47
:type before_time: string
- :param before_time:
+ :param before_time: End date for a filtered date search on support case
+ communications.
:type after_time: string
- :param after_time:
+ :param after_time: Start date for a filtered date search on support
+ case communications.
:type next_token: string
- :param next_token:
+ :param next_token: Defines a resumption point for pagination.
:type max_results: integer
- :param max_results:
+ :param max_results: Integer that sets the maximum number of results to
+ return before paginating.
"""
params = {'caseId': case_id, }
@@ -351,10 +399,15 @@ class SupportConnection(AWSQueryConnection):
category codes.
:type service_code_list: list
- :param service_code_list:
+ :param service_code_list: List in JSON format of service codes
+ available for AWS services.
:type language: string
- :param language:
+ :param language: Specifies the ISO 639-1 code for the language in which
+ AWS provides support. AWS Support currently supports English and
+ Japanese, for which the codes are en and ja , respectively.
+ Language parameters must be passed explicitly for operations that
+ take them.
"""
params = {}
@@ -373,7 +426,11 @@ class SupportConnection(AWSQueryConnection):
any `CreateCase`_ request.
:type language: string
- :param language:
+ :param language: Specifies the ISO 639-1 code for the language in which
+ AWS provides support. AWS Support currently supports English and
+ Japanese, for which the codes are en and ja , respectively.
+ Language parameters must be passed explicitly for operations that
+ take them.
"""
params = {}
@@ -382,29 +439,15 @@ class SupportConnection(AWSQueryConnection):
return self.make_request(action='DescribeSeverityLevels',
body=json.dumps(params))
- def resolve_case(self, case_id=None):
- """
- Takes a CaseId and returns the initial state of the case along
- with the state of the case after the call to `ResolveCase`_
- completed.
-
- :type case_id: string
- :param case_id:
-
- """
- params = {}
- if case_id is not None:
- params['caseId'] = case_id
- return self.make_request(action='ResolveCase',
- body=json.dumps(params))
-
def describe_trusted_advisor_check_refresh_statuses(self, check_ids):
"""
Returns the status of all refresh requests Trusted Advisor
checks called using `RefreshTrustedAdvisorCheck`_.
:type check_ids: list
- :param check_ids:
+ :param check_ids: List of the CheckId values for the Trusted Advisor
+ checks for which you want to refresh the status. You obtain the
+ CheckId values by calling `DescribeTrustedAdvisorChecks`_.
"""
params = {'checkIds': check_ids, }
@@ -443,7 +486,11 @@ class SupportConnection(AWSQueryConnection):
:param check_id:
:type language: string
- :param language:
+ :param language: Specifies the ISO 639-1 code for the language in which
+ AWS provides support. AWS Support currently supports English and
+ Japanese, for which the codes are en and ja , respectively.
+ Language parameters must be passed explicitly for operations that
+ take them.
"""
params = {'checkId': check_id, }
@@ -464,7 +511,7 @@ class SupportConnection(AWSQueryConnection):
`TrustedAdvisorCheckSummary`_ objects.
:type check_ids: list
- :param check_ids:
+ :param check_ids: Unique identifier for a Trusted Advisor check.
"""
params = {'checkIds': check_ids, }
@@ -480,7 +527,11 @@ class SupportConnection(AWSQueryConnection):
objects.
:type language: string
- :param language:
+ :param language: Specifies the ISO 639-1 code for the language in which
+ AWS provides support. AWS Support currently supports English and
+ Japanese, for which the codes are en and ja , respectively.
+ Language parameters must be passed explicitly for operations that
+ take them.
"""
params = {'language': language, }
@@ -503,6 +554,25 @@ class SupportConnection(AWSQueryConnection):
return self.make_request(action='RefreshTrustedAdvisorCheck',
body=json.dumps(params))
+ def resolve_case(self, case_id=None):
+ """
+ Takes a CaseId and returns the initial state of the case along
+ with the state of the case after the call to `ResolveCase`_
+ completed.
+
+ :type case_id: string
+ :param case_id: String that indicates the AWS Support caseID requested
+ or returned in the call. The caseID is an alphanumeric string
+ formatted as shown in this example CaseId:
+ case-12345678910-2013-c4c1d2bf33c5cf47
+
+ """
+ params = {}
+ if case_id is not None:
+ params['caseId'] = case_id
+ return self.make_request(action='ResolveCase',
+ body=json.dumps(params))
+
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
diff --git a/boto/swf/layer1_decisions.py b/boto/swf/layer1_decisions.py
index 6c273aa1..7649da17 100644
--- a/boto/swf/layer1_decisions.py
+++ b/boto/swf/layer1_decisions.py
@@ -3,7 +3,7 @@ Helper class for creating decision responses.
"""
-class Layer1Decisions:
+class Layer1Decisions(object):
"""
Use this object to build a list of decisions for a decision response.
Each method call will add append a new decision. Retrieve the list
diff --git a/docs/source/index.rst b/docs/source/index.rst
index d61b0a73..3e1904ce 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -24,6 +24,7 @@ Currently Supported Services
* :doc:`Elastic Compute Cloud (EC2) <ec2_tut>` -- (:doc:`API Reference <ref/ec2>`)
* :doc:`Elastic MapReduce (EMR) <emr_tut>` -- (:doc:`API Reference <ref/emr>`)
* :doc:`Auto Scaling <autoscale_tut>` -- (:doc:`API Reference <ref/autoscale>`)
+ * Kinesis -- (:doc:`API Reference <ref/kinesis>`)
* **Content Delivery**
@@ -115,6 +116,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.21.0
releasenotes/v2.20.1
releasenotes/v2.20.0
releasenotes/v2.19.0
diff --git a/docs/source/ref/index.rst b/docs/source/ref/index.rst
index 3def15d7..a477ac06 100644
--- a/docs/source/ref/index.rst
+++ b/docs/source/ref/index.rst
@@ -22,6 +22,7 @@ API Reference
glacier
gs
iam
+ kinesis
manage
mturk
mws
diff --git a/docs/source/ref/kinesis.rst b/docs/source/ref/kinesis.rst
new file mode 100644
index 00000000..288ee62e
--- /dev/null
+++ b/docs/source/ref/kinesis.rst
@@ -0,0 +1,26 @@
+.. ref-kinesis
+
+=======
+Kinesis
+=======
+
+boto.kinesis
+------------
+
+.. automodule:: boto.kinesis
+ :members:
+ :undoc-members:
+
+boto.kinesis.layer1
+-------------------
+
+.. automodule:: boto.kinesis.layer1
+ :members:
+ :undoc-members:
+
+boto.kinesis.exceptions
+-----------------------
+
+.. automodule:: boto.kinesis.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/releasenotes/v2.21.0.rst b/docs/source/releasenotes/v2.21.0.rst
new file mode 100644
index 00000000..c5cf622b
--- /dev/null
+++ b/docs/source/releasenotes/v2.21.0.rst
@@ -0,0 +1,32 @@
+boto v2.21.0
+============
+
+:date: 2013/12/19
+
+This release adds support for the latest AWS OpsWorks, AWS Elastic Beanstalk, Amazon DynamoDB, Amazon Elastic MapReduce (EMR), Amazon Simple Storage Service (S3), Amazon Elastic Transcoder, AWS CloudTrail, and AWS Support APIs. It also includes documentation and other fixes.
+
+
+Features
+--------
+* Add support for Elastic Transcoder pagination and new codecs (:sha:`dcb1c5a`)
+* Add support for new CloudTrail calling format (:sha:`aeafe9b`)
+* Update to the latest Support API (:sha:`45e1884`)
+* Add support for arbitrarily large SQS messages stored in S3 via BigMessage. (:issue:`1917`, :sha:`e6cd665`)
+* Add support for ``encoding_type`` to S3 (:sha:`6b2d967`)
+* Add support for Elastic MapReduce tags (:issue:`1928`, :issue:`1920`, :sha:`b9749c6`, :sha:`8e4c595`)
+* Add high level support for global secondary indexes in DynamoDB (:issue:`1924`, :issue:`1913`, :sha:`32dac5b`)
+* Add support for Elastic Beanstalk worker environments. (:issue:`1911`, :sha:`bbd4fbf`)
+* Add support for OpsWorks IAM user permissions per stack (:sha:`ac6e4e7`)
+* Add support for SigV4 to S3 (:sha:`deb9e18`)
+* Add support for SigV4 to EC2 (:sha:`bdebfe0`)
+* Add support for SigV4 to ElastiCache (:sha:`b892b45`)
+
+
+Bugfixes
+--------
+* Add documentation describing account usage for multipart uploads in S3 (:sha:`af03d8d`)
+* Update DesiredCapacity if AutoScalingGroup.desired_capacity is not None. (:issue:`1906`, :issue:`1906`, :issue:`1757`, :sha:`b6670ce`)
+* Documentation: add Kinesis API reference (:issue:`1921`, :sha:`c169836`)
+* Documentation: sriovNetSupport instance attribute (:issue:`1915`, :sha:`e1bafcc`)
+* Update RDS documentation for API version: 2013-09-09 (:issue:`1914`, :sha:`fcf702a`)
+* Switch all classes to new style classes which results in memory use improvements (:sha:`ca36fa2`)
diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py
index 46848fae..87951d66 100644
--- a/tests/integration/dynamodb2/test_highlevel.py
+++ b/tests/integration/dynamodb2/test_highlevel.py
@@ -29,7 +29,8 @@ import time
from tests.unit import unittest
from boto.dynamodb2 import exceptions
-from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex
+from boto.dynamodb2.fields import (HashKey, RangeKey, KeysOnlyIndex,
+ GlobalKeysOnlyIndex)
from boto.dynamodb2.items import Item
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import NUMBER
@@ -343,3 +344,39 @@ class DynamoDBv2Test(unittest.TestCase):
# Post-__exit__, they should all be gone.
self.assertEqual(len(batch._unprocessed), 0)
+
+ def test_gsi(self):
+ users = Table.create('gsi_users', schema=[
+ HashKey('user_id'),
+ ], throughput={
+ 'read': 5,
+ 'write': 3,
+ },
+ global_indexes=[
+ GlobalKeysOnlyIndex('StuffIndex', parts=[
+ HashKey('user_id')
+ ], throughput={
+ 'read': 2,
+ 'write': 1,
+ }),
+ ])
+ self.addCleanup(users.delete)
+
+ # Wait for it.
+ time.sleep(60)
+
+ users.update(
+ throughput={
+ 'read': 3,
+ 'write': 4
+ },
+ global_indexes={
+ 'StuffIndex': {
+ 'read': 1,
+ 'write': 2
+ }
+ }
+ )
+
+ # Wait again for the changes to finish propagating.
+ time.sleep(120)
diff --git a/tests/integration/ec2/autoscale/test_connection.py b/tests/integration/ec2/autoscale/test_connection.py
index 094adb14..5a3ea57e 100644
--- a/tests/integration/ec2/autoscale/test_connection.py
+++ b/tests/integration/ec2/autoscale/test_connection.py
@@ -53,47 +53,47 @@ class AutoscaleConnectionTest(unittest.TestCase):
groups = c.get_all_groups()
for group in groups:
- self.assertTrue(type(group), AutoScalingGroup)
+ self.assertIsInstance(group, AutoScalingGroup)
# get activities
activities = group.get_activities()
for activity in activities:
- self.assertEqual(type(activity), Activity)
+ self.assertIsInstance(activity, Activity)
# get launch configs
configs = c.get_all_launch_configurations()
for config in configs:
- self.assertTrue(type(config), LaunchConfiguration)
+ self.assertIsInstance(config, LaunchConfiguration)
# get policies
policies = c.get_all_policies()
for policy in policies:
- self.assertTrue(type(policy), ScalingPolicy)
+ self.assertIsInstance(policy, ScalingPolicy)
# get scheduled actions
actions = c.get_all_scheduled_actions()
for action in actions:
- self.assertTrue(type(action), ScheduledUpdateGroupAction)
+ self.assertIsInstance(action, ScheduledUpdateGroupAction)
# get instances
instances = c.get_all_autoscaling_instances()
for instance in instances:
- self.assertTrue(type(instance), Instance)
+ self.assertIsInstance(instance, Instance)
# get all scaling process types
ptypes = c.get_all_scaling_process_types()
for ptype in ptypes:
- self.assertTrue(type(ptype), ProcessType)
+ self.assertTrue(ptype, ProcessType)
# get adjustment types
adjustments = c.get_all_adjustment_types()
for adjustment in adjustments:
- self.assertTrue(type(adjustment), AdjustmentType)
+ self.assertIsInstance(adjustment, AdjustmentType)
# get metrics collection types
types = c.get_all_metric_collection_types()
- self.assertTrue(type(types), MetricCollectionTypes)
+ self.assertIsInstance(types, MetricCollectionTypes)
# create the simplest possible AutoScale group
# first create the launch configuration
diff --git a/tests/integration/opsworks/test_layer1.py b/tests/integration/opsworks/test_layer1.py
index a9887cde..a2503952 100644
--- a/tests/integration/opsworks/test_layer1.py
+++ b/tests/integration/opsworks/test_layer1.py
@@ -22,8 +22,8 @@
import unittest
import time
+from boto.exception import JSONResponseError
from boto.opsworks.layer1 import OpsWorksConnection
-from boto.opsworks.exceptions import ValidationException
class TestOpsWorksConnection(unittest.TestCase):
@@ -35,6 +35,6 @@ class TestOpsWorksConnection(unittest.TestCase):
self.assertIn('Stacks', response)
def test_validation_errors(self):
- with self.assertRaises(ValidationException):
+ with self.assertRaises(JSONResponseError):
self.api.create_stack('testbotostack', 'us-east-1',
'badarn', 'badarn2')
diff --git a/tests/integration/sqs/test_bigmessage.py b/tests/integration/sqs/test_bigmessage.py
new file mode 100644
index 00000000..03de4e1a
--- /dev/null
+++ b/tests/integration/sqs/test_bigmessage.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the SQSConnection
+"""
+from __future__ import with_statement
+
+import time
+from threading import Timer
+from tests.unit import unittest
+import StringIO
+
+import boto
+from boto.sqs.bigmessage import BigMessage
+from boto.exception import SQSError
+
+
+class TestBigMessage(unittest.TestCase):
+
+ sqs = True
+
+ def test_1_basic(self):
+ c = boto.connect_sqs()
+
+ # create a queue so we can test BigMessage
+ queue_name = 'test%d' % int(time.time())
+ timeout = 60
+ queue = c.create_queue(queue_name, timeout)
+ self.addCleanup(c.delete_queue, queue, True)
+ queue.set_message_class(BigMessage)
+
+ # create a bucket with the same name to store the message in
+ s3 = boto.connect_s3()
+ bucket = s3.create_bucket(queue_name)
+ self.addCleanup(s3.delete_bucket, queue_name)
+ time.sleep(30)
+
+ # now add a message
+ msg_body = 'This is a test of the big message'
+ fp = StringIO.StringIO(msg_body)
+ s3_url = 's3://%s' % queue_name
+ message = queue.new_message(fp, s3_url=s3_url)
+
+ queue.write(message)
+ time.sleep(30)
+
+ s3_object_name = message.s3_url.split('/')[-1]
+
+ # Make sure msg body is in bucket
+ self.assertTrue(bucket.lookup(s3_object_name))
+
+ m = queue.read()
+ self.assertEqual(m.get_body(), msg_body)
+
+ m.delete()
+ time.sleep(30)
+
+ # Make sure msg is deleted from bucket
+ self.assertIsNone(bucket.lookup(s3_object_name))
diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py
index 670ce664..2c98e9bd 100644
--- a/tests/unit/auth/test_sigv4.py
+++ b/tests/unit/auth/test_sigv4.py
@@ -19,10 +19,12 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+import copy
from mock import Mock
from tests.unit import unittest
from boto.auth import HmacAuthV4Handler
+from boto.auth import S3HmacAuthV4Handler
from boto.connection import HTTPRequest
@@ -208,3 +210,205 @@ class TestSigV4Handler(unittest.TestCase):
auth.service_name = 'sqs'
scope = auth.credential_scope(self.request)
self.assertEqual(scope, '20121121/us-west-2/sqs/aws4_request')
+
+
+class TestS3HmacAuthV4Handler(unittest.TestCase):
+ def setUp(self):
+ self.provider = Mock()
+ self.provider.access_key = 'access_key'
+ self.provider.secret_key = 'secret_key'
+ self.provider.security_token = 'sekret_tokens'
+ self.request = HTTPRequest(
+ 'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
+ '/awesome-bucket/?max-keys=0', None, {},
+ {}, ''
+ )
+ self.awesome_bucket_request = HTTPRequest(
+ method='GET',
+ protocol='https',
+ host='awesome-bucket.s3-us-west-2.amazonaws.com',
+ port=443,
+ path='/',
+ auth_path=None,
+ params={
+ 'max-keys': 0,
+ },
+ headers={
+ 'User-Agent': 'Boto',
+ 'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
+ 'X-AMZ-Date': '20130605T193245Z',
+ },
+ body=''
+ )
+ self.auth = S3HmacAuthV4Handler(
+ host='awesome-bucket.s3-us-west-2.amazonaws.com',
+ config=Mock(),
+ provider=self.provider,
+ region_name='s3-us-west-2'
+ )
+
+ def test_clean_region_name(self):
+ # Untouched.
+ cleaned = self.auth.clean_region_name('us-west-2')
+ self.assertEqual(cleaned, 'us-west-2')
+
+ # Stripped of the ``s3-`` prefix.
+ cleaned = self.auth.clean_region_name('s3-us-west-2')
+ self.assertEqual(cleaned, 'us-west-2')
+
+ # Untouched (classic).
+ cleaned = self.auth.clean_region_name('s3.amazonaws.com')
+ self.assertEqual(cleaned, 's3.amazonaws.com')
+
+ # Untouched.
+ cleaned = self.auth.clean_region_name('something-s3-us-west-2')
+ self.assertEqual(cleaned, 'something-s3-us-west-2')
+
+ def test_region_stripping(self):
+ auth = S3HmacAuthV4Handler(
+ host='s3-us-west-2.amazonaws.com',
+ config=Mock(),
+ provider=self.provider
+ )
+ self.assertEqual(auth.region_name, None)
+
+ # What we wish we got.
+ auth = S3HmacAuthV4Handler(
+ host='s3-us-west-2.amazonaws.com',
+ config=Mock(),
+ provider=self.provider,
+ region_name='us-west-2'
+ )
+ self.assertEqual(auth.region_name, 'us-west-2')
+
+ # What we actually get (i.e. ``s3-us-west-2``).
+ self.assertEqual(self.auth.region_name, 'us-west-2')
+
+ def test_determine_region_name(self):
+ name = self.auth.determine_region_name('s3-us-west-2.amazonaws.com')
+ self.assertEqual(name, 'us-west-2')
+
+ def test_canonical_uri(self):
+ request = HTTPRequest(
+ 'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
+ 'x/./././x .html', None, {},
+ {}, ''
+ )
+ canonical_uri = self.auth.canonical_uri(request)
+ # S3 doesn't canonicalize the way other SigV4 services do.
+ # This just urlencoded, no normalization of the path.
+ self.assertEqual(canonical_uri, 'x/./././x%20.html')
+
+ def test_determine_service_name(self):
+ # What we wish we got.
+ name = self.auth.determine_service_name(
+ 's3.us-west-2.amazonaws.com'
+ )
+ self.assertEqual(name, 's3')
+
+ # What we actually get.
+ name = self.auth.determine_service_name(
+ 's3-us-west-2.amazonaws.com'
+ )
+ self.assertEqual(name, 's3')
+
+ # What we wish we got with virtual hosting.
+ name = self.auth.determine_service_name(
+ 'bucket.s3.us-west-2.amazonaws.com'
+ )
+ self.assertEqual(name, 's3')
+
+ # What we actually get with virtual hosting.
+ name = self.auth.determine_service_name(
+ 'bucket.s3-us-west-2.amazonaws.com'
+ )
+ self.assertEqual(name, 's3')
+
+ def test_add_auth(self):
+ # The side-effects sideshow.
+ self.assertFalse('x-amz-content-sha256' in self.request.headers)
+ self.auth.add_auth(self.request)
+ self.assertTrue('x-amz-content-sha256' in self.request.headers)
+ the_sha = self.request.headers['x-amz-content-sha256']
+ self.assertEqual(
+ the_sha,
+ 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
+ )
+
+ def test_host_header(self):
+ host = self.auth.host_header(
+ self.awesome_bucket_request.host,
+ self.awesome_bucket_request
+ )
+ self.assertEqual(host, 'awesome-bucket.s3-us-west-2.amazonaws.com')
+
+ def test_canonical_query_string(self):
+ qs = self.auth.canonical_query_string(self.awesome_bucket_request)
+ self.assertEqual(qs, 'max-keys=0')
+
+ def test_mangle_path_and_params(self):
+ request = HTTPRequest(
+ method='GET',
+ protocol='https',
+ host='awesome-bucket.s3-us-west-2.amazonaws.com',
+ port=443,
+ # LOOK AT THIS PATH. JUST LOOK AT IT.
+ path='/?delete&max-keys=0',
+ auth_path=None,
+ params={
+ 'key': 'why hello there',
+ # This gets overwritten, to make sure back-compat is maintained.
+ 'max-keys': 1,
+ },
+ headers={
+ 'User-Agent': 'Boto',
+ 'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
+ 'X-AMZ-Date': '20130605T193245Z',
+ },
+ body=''
+ )
+
+ mod_req = self.auth.mangle_path_and_params(request)
+ self.assertEqual(mod_req.path, '/?delete&max-keys=0')
+ self.assertEqual(mod_req.auth_path, '/')
+ self.assertEqual(mod_req.params, {
+ 'max-keys': '0',
+ 'key': 'why hello there',
+ 'delete': ''
+ })
+
+ def test_canonical_request(self):
+ expected = """GET
+/
+max-keys=0
+host:awesome-bucket.s3-us-west-2.amazonaws.com
+user-agent:Boto
+x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+x-amz-date:20130605T193245Z
+
+host;user-agent;x-amz-content-sha256;x-amz-date
+e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"""
+
+ authed_req = self.auth.canonical_request(self.awesome_bucket_request)
+ self.assertEqual(authed_req, expected)
+
+ # Now the way ``boto.s3`` actually sends data.
+ request = copy.copy(self.awesome_bucket_request)
+ request.path = request.auth_path = '/?max-keys=0'
+ request.params = {}
+ expected = """GET
+/
+max-keys=0
+host:awesome-bucket.s3-us-west-2.amazonaws.com
+user-agent:Boto
+x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+x-amz-date:20130605T193245Z
+
+host;user-agent;x-amz-content-sha256;x-amz-date
+e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"""
+
+ # Pre-mangle it. In practice, this happens as part of ``add_auth``,
+ # but that's a side-effect that's hard to test.
+ request = self.auth.mangle_path_and_params(request)
+ authed_req = self.auth.canonical_request(request)
+ self.assertEqual(authed_req, expected)
diff --git a/tests/unit/beanstalk/test_layer1.py b/tests/unit/beanstalk/test_layer1.py
index 2ecec0d2..fad51e65 100644
--- a/tests/unit/beanstalk/test_layer1.py
+++ b/tests/unit/beanstalk/test_layer1.py
@@ -117,3 +117,33 @@ class TestCreateEnvironment(AWSMockServiceTestCase):
'OptionSettings.member.2.OptionName': 'ENVVAR',
'OptionSettings.member.2.Value': 'VALUE1',
})
+
+ def test_create_environment_with_tier(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_environment(
+ 'application1', 'environment1', 'version1',
+ '32bit Amazon Linux running Tomcat 7',
+ option_settings=[
+ ('aws:autoscaling:launchconfiguration', 'Ec2KeyName',
+ 'mykeypair'),
+ ('aws:elasticbeanstalk:application:environment', 'ENVVAR',
+ 'VALUE1')],
+ tier_name='Worker', tier_type='SQS/HTTP', tier_version='1.0')
+ self.assert_request_parameters({
+ 'Action': 'CreateEnvironment',
+ 'ApplicationName': 'application1',
+ 'EnvironmentName': 'environment1',
+ 'TemplateName': '32bit Amazon Linux running Tomcat 7',
+ 'ContentType': 'JSON',
+ 'Version': '2010-12-01',
+ 'VersionLabel': 'version1',
+ 'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration',
+ 'OptionSettings.member.1.OptionName': 'Ec2KeyName',
+ 'OptionSettings.member.1.Value': 'mykeypair',
+ 'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment',
+ 'OptionSettings.member.2.OptionName': 'ENVVAR',
+ 'OptionSettings.member.2.Value': 'VALUE1',
+ 'Tier.member.Name': 'Worker',
+ 'Tier.member.Type': 'SQS/HTTP',
+ 'Tier.member.Version': '1.0',
+ })
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index 16b62fd8..cd740285 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -2,7 +2,9 @@ import mock
import unittest
from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import (HashKey, RangeKey,
- AllIndex, KeysOnlyIndex, IncludeIndex)
+ AllIndex, KeysOnlyIndex, IncludeIndex,
+ GlobalAllIndex, GlobalKeysOnlyIndex,
+ GlobalIncludeIndex)
from boto.dynamodb2.items import Item
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.results import ResultSet, BatchGetResultSet
@@ -169,6 +171,133 @@ class IndexFieldTestCase(unittest.TestCase):
}
})
+ def test_global_all_index(self):
+ all_index = GlobalAllIndex('AllKeys', parts=[
+ HashKey('username'),
+ RangeKey('date_joined')
+ ],
+ throughput={
+ 'read': 6,
+ 'write': 2,
+ })
+ self.assertEqual(all_index.name, 'AllKeys')
+ self.assertEqual([part.attr_type for part in all_index.parts], [
+ 'HASH',
+ 'RANGE'
+ ])
+ self.assertEqual(all_index.projection_type, 'ALL')
+
+ self.assertEqual(all_index.definition(), [
+ {'AttributeName': 'username', 'AttributeType': 'S'},
+ {'AttributeName': 'date_joined', 'AttributeType': 'S'}
+ ])
+ self.assertEqual(all_index.schema(), {
+ 'IndexName': 'AllKeys',
+ 'KeySchema': [
+ {
+ 'AttributeName': 'username',
+ 'KeyType': 'HASH'
+ },
+ {
+ 'AttributeName': 'date_joined',
+ 'KeyType': 'RANGE'
+ }
+ ],
+ 'Projection': {
+ 'ProjectionType': 'ALL'
+ },
+ 'ProvisionedThroughput': {
+ 'ReadCapacityUnits': 6,
+ 'WriteCapacityUnits': 2
+ }
+ })
+
+ def test_global_keys_only_index(self):
+ keys_only = GlobalKeysOnlyIndex('KeysOnly', parts=[
+ HashKey('username'),
+ RangeKey('date_joined')
+ ],
+ throughput={
+ 'read': 3,
+ 'write': 4,
+ })
+ self.assertEqual(keys_only.name, 'KeysOnly')
+ self.assertEqual([part.attr_type for part in keys_only.parts], [
+ 'HASH',
+ 'RANGE'
+ ])
+ self.assertEqual(keys_only.projection_type, 'KEYS_ONLY')
+
+ self.assertEqual(keys_only.definition(), [
+ {'AttributeName': 'username', 'AttributeType': 'S'},
+ {'AttributeName': 'date_joined', 'AttributeType': 'S'}
+ ])
+ self.assertEqual(keys_only.schema(), {
+ 'IndexName': 'KeysOnly',
+ 'KeySchema': [
+ {
+ 'AttributeName': 'username',
+ 'KeyType': 'HASH'
+ },
+ {
+ 'AttributeName': 'date_joined',
+ 'KeyType': 'RANGE'
+ }
+ ],
+ 'Projection': {
+ 'ProjectionType': 'KEYS_ONLY'
+ },
+ 'ProvisionedThroughput': {
+ 'ReadCapacityUnits': 3,
+ 'WriteCapacityUnits': 4
+ }
+ })
+
+ def test_global_include_index(self):
+ # Lean on the default throughput
+ include_index = GlobalIncludeIndex('IncludeKeys', parts=[
+ HashKey('username'),
+ RangeKey('date_joined')
+ ], includes=[
+ 'gender',
+ 'friend_count'
+ ])
+ self.assertEqual(include_index.name, 'IncludeKeys')
+ self.assertEqual([part.attr_type for part in include_index.parts], [
+ 'HASH',
+ 'RANGE'
+ ])
+ self.assertEqual(include_index.projection_type, 'INCLUDE')
+
+ self.assertEqual(include_index.definition(), [
+ {'AttributeName': 'username', 'AttributeType': 'S'},
+ {'AttributeName': 'date_joined', 'AttributeType': 'S'}
+ ])
+ self.assertEqual(include_index.schema(), {
+ 'IndexName': 'IncludeKeys',
+ 'KeySchema': [
+ {
+ 'AttributeName': 'username',
+ 'KeyType': 'HASH'
+ },
+ {
+ 'AttributeName': 'date_joined',
+ 'KeyType': 'RANGE'
+ }
+ ],
+ 'Projection': {
+ 'ProjectionType': 'INCLUDE',
+ 'NonKeyAttributes': [
+ 'gender',
+ 'friend_count',
+ ]
+ },
+ 'ProvisionedThroughput': {
+ 'ReadCapacityUnits': 5,
+ 'WriteCapacityUnits': 5
+ }
+ })
+
class ItemTestCase(unittest.TestCase):
def setUp(self):
@@ -476,7 +605,7 @@ class ItemTestCase(unittest.TestCase):
'date_joined'
]))
- def test_prepare_partial(self):
+ def test_prepare_partial_empty_set(self):
self.johndoe.mark_clean()
# Change some data.
self.johndoe['first_name'] = 'Johann'
@@ -1132,6 +1261,13 @@ class TableTestCase(unittest.TestCase):
KeysOnlyIndex('FriendCountIndex', parts=[
RangeKey('friend_count')
]),
+ ], global_indexes=[
+ GlobalKeysOnlyIndex('FullFriendCountIndex', parts=[
+ RangeKey('friend_count')
+ ], throughput={
+ 'read': 10,
+ 'write': 8,
+ }),
], connection=conn)
self.assertTrue(retval)
@@ -1165,6 +1301,24 @@ class TableTestCase(unittest.TestCase):
'WriteCapacityUnits': 10,
'ReadCapacityUnits': 20
},
+ global_secondary_indexes=[
+ {
+ 'KeySchema': [
+ {
+ 'KeyType': 'RANGE',
+ 'AttributeName': 'friend_count'
+ }
+ ],
+ 'IndexName': 'FullFriendCountIndex',
+ 'Projection': {
+ 'ProjectionType': 'KEYS_ONLY'
+ },
+ 'ProvisionedThroughput': {
+ 'WriteCapacityUnits': 8,
+ 'ReadCapacityUnits': 10
+ }
+ }
+ ],
local_secondary_indexes=[
{
'KeySchema': [
@@ -1252,10 +1406,65 @@ class TableTestCase(unittest.TestCase):
self.assertEqual(self.users.throughput['read'], 7)
self.assertEqual(self.users.throughput['write'], 2)
- mock_update.assert_called_once_with('users', {
- 'WriteCapacityUnits': 2,
- 'ReadCapacityUnits': 7
- })
+ mock_update.assert_called_once_with(
+ 'users',
+ global_secondary_index_updates=None,
+ provisioned_throughput={
+ 'WriteCapacityUnits': 2,
+ 'ReadCapacityUnits': 7
+ }
+ )
+
+ with mock.patch.object(
+ self.users.connection,
+ 'update_table',
+ return_value={}) as mock_update:
+ self.assertEqual(self.users.throughput['read'], 7)
+ self.assertEqual(self.users.throughput['write'], 2)
+ self.users.update(throughput={
+ 'read': 9,
+ 'write': 5,
+ },
+ global_indexes={
+ 'WhateverIndex': {
+ 'read': 6,
+ 'write': 1
+ },
+ 'AnotherIndex': {
+ 'read': 1,
+ 'write': 2
+ }
+ })
+ self.assertEqual(self.users.throughput['read'], 9)
+ self.assertEqual(self.users.throughput['write'], 5)
+
+ mock_update.assert_called_once_with(
+ 'users',
+ global_secondary_index_updates=[
+ {
+ 'Update': {
+ 'IndexName': 'AnotherIndex',
+ 'ProvisionedThroughput': {
+ 'WriteCapacityUnits': 2,
+ 'ReadCapacityUnits': 1
+ }
+ }
+ },
+ {
+ 'Update': {
+ 'IndexName': 'WhateverIndex',
+ 'ProvisionedThroughput': {
+ 'WriteCapacityUnits': 1,
+ 'ReadCapacityUnits': 6
+ }
+ }
+ }
+ ],
+ provisioned_throughput={
+ 'WriteCapacityUnits': 5,
+ 'ReadCapacityUnits': 9,
+ }
+ )
def test_delete(self):
with mock.patch.object(
diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py
index 0ae34013..87a2d167 100644
--- a/tests/unit/ec2/test_connection.py
+++ b/tests/unit/ec2/test_connection.py
@@ -1312,6 +1312,30 @@ class TestDescribeTags(TestEC2ConnectionBase):
'SignatureVersion', 'Timestamp', 'Version'])
+class TestSignatureAlteration(TestEC2ConnectionBase):
+ def test_unchanged(self):
+ self.assertEqual(
+ self.service_connection._required_auth_capability(),
+ ['ec2']
+ )
+
+ def test_switched(self):
+ region = RegionInfo(
+ name='cn-north-1',
+ endpoint='ec2.cn-north-1.amazonaws.com.cn',
+ connection_cls=EC2Connection
+ )
+
+ conn = self.connection_class(
+ aws_access_key_id='less',
+ aws_secret_access_key='more',
+ region=region
+ )
+ self.assertEqual(
+ conn._required_auth_capability(),
+ ['hmac-v4']
+ )
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/elasticache/test_api_interface.py b/tests/unit/elasticache/test_api_interface.py
index 51432d38..650522c2 100644
--- a/tests/unit/elasticache/test_api_interface.py
+++ b/tests/unit/elasticache/test_api_interface.py
@@ -15,6 +15,6 @@ class TestAPIInterface(AWSMockServiceTestCase):
'Action': 'CreateCacheCluster',
'CacheClusterId': name,
}, ignore_params_values=[
- 'Version', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion',
- 'Timestamp', 'ContentType',
+ 'Version',
+ 'ContentType',
])
diff --git a/tests/unit/emr/test_connection.py b/tests/unit/emr/test_connection.py
index eeff8842..189e6741 100644
--- a/tests/unit/emr/test_connection.py
+++ b/tests/unit/emr/test_connection.py
@@ -291,3 +291,122 @@ class TestAddJobFlowSteps(AWSMockServiceTestCase):
self.assertTrue(isinstance(response, JobFlowStepList))
self.assertEqual(response.stepids[0].value, 'Foo')
self.assertEqual(response.stepids[1].value, 'Bar')
+
+
+class TestBuildTagList(AWSMockServiceTestCase):
+ connection_class = EmrConnection
+
+ def test_key_without_value_encoding(self):
+ input_dict = {
+ 'KeyWithNoValue': '',
+ 'AnotherKeyWithNoValue': None
+ }
+ res = self.service_connection._build_tag_list(input_dict)
+ # Keys are outputted in ascending key order.
+ expected = {
+ 'Tags.member.1.Key': 'AnotherKeyWithNoValue',
+ 'Tags.member.2.Key': 'KeyWithNoValue'
+ }
+ self.assertEqual(expected, res)
+
+ def test_key_full_key_value_encoding(self):
+ input_dict = {
+ 'FirstKey': 'One',
+ 'SecondKey': 'Two'
+ }
+ res = self.service_connection._build_tag_list(input_dict)
+ # Keys are outputted in ascending key order.
+ expected = {
+ 'Tags.member.1.Key': 'FirstKey',
+ 'Tags.member.1.Value': 'One',
+ 'Tags.member.2.Key': 'SecondKey',
+ 'Tags.member.2.Value': 'Two'
+ }
+ self.assertEqual(expected, res)
+
+
+class TestAddTag(AWSMockServiceTestCase):
+ connection_class = EmrConnection
+
+ def default_body(self):
+ return """<AddTagsResponse
+ xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
+ <AddTagsResult/>
+ <ResponseMetadata>
+ <RequestId>88888888-8888-8888-8888-888888888888</RequestId>
+ </ResponseMetadata>
+ </AddTagsResponse>
+ """
+
+ def test_add_mix_of_tags_with_without_values(self):
+ input_tags = {
+ 'FirstKey': 'One',
+ 'SecondKey': 'Two',
+ 'ZzzNoValue': ''
+ }
+ self.set_http_response(200)
+
+ with self.assertRaises(TypeError):
+ self.service_connection.add_tags()
+
+ with self.assertRaises(TypeError):
+ self.service_connection.add_tags('j-123')
+
+ with self.assertRaises(AssertionError):
+ self.service_connection.add_tags('j-123', [])
+
+ response = self.service_connection.add_tags('j-123', input_tags)
+
+ self.assertTrue(response)
+ self.assert_request_parameters({
+ 'Action': 'AddTags',
+ 'ResourceId': 'j-123',
+ 'Tags.member.1.Key': 'FirstKey',
+ 'Tags.member.1.Value': 'One',
+ 'Tags.member.2.Key': 'SecondKey',
+ 'Tags.member.2.Value': 'Two',
+ 'Tags.member.3.Key': 'ZzzNoValue',
+ 'Version': '2009-03-31'
+ })
+
+
+class TestRemoveTag(AWSMockServiceTestCase):
+ connection_class = EmrConnection
+
+ def default_body(self):
+ return """<RemoveTagsResponse
+ xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
+ <RemoveTagsResult/>
+ <ResponseMetadata>
+ <RequestId>88888888-8888-8888-8888-888888888888</RequestId>
+ </ResponseMetadata>
+ </RemoveTagsResponse>
+ """
+
+ def test_remove_tags(self):
+ input_tags = {
+ 'FirstKey': 'One',
+ 'SecondKey': 'Two',
+ 'ZzzNoValue': ''
+ }
+ self.set_http_response(200)
+
+ with self.assertRaises(TypeError):
+ self.service_connection.add_tags()
+
+ with self.assertRaises(TypeError):
+ self.service_connection.add_tags('j-123')
+
+ with self.assertRaises(AssertionError):
+ self.service_connection.add_tags('j-123', [])
+
+ response = self.service_connection.remove_tags('j-123', ['FirstKey', 'SecondKey'])
+
+ self.assertTrue(response)
+ self.assert_request_parameters({
+ 'Action': 'RemoveTags',
+ 'ResourceId': 'j-123',
+ 'TagKeys.member.1': 'FirstKey',
+ 'TagKeys.member.2': 'SecondKey',
+ 'Version': '2009-03-31'
+ })
diff --git a/tests/unit/s3/test_bucket.py b/tests/unit/s3/test_bucket.py
index e82b3787..5bbb2121 100644
--- a/tests/unit/s3/test_bucket.py
+++ b/tests/unit/s3/test_bucket.py
@@ -6,6 +6,10 @@ from tests.unit import AWSMockServiceTestCase
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
+from boto.s3.deletemarker import DeleteMarker
+from boto.s3.key import Key
+from boto.s3.multipart import MultiPartUpload
+from boto.s3.prefix import Prefix
class TestS3Bucket(AWSMockServiceTestCase):
@@ -125,3 +129,50 @@ class TestS3Bucket(AWSMockServiceTestCase):
# Will throw because of empty response.
pass
self.assertFalse(mock_get_all_keys.called)
+
+ @patch.object(Bucket, '_get_all')
+ def test_bucket_encoding(self, mock_get_all):
+ self.set_http_response(status_code=200)
+ bucket = self.service_connection.get_bucket('mybucket')
+
+ # First, without the encoding.
+ mock_get_all.reset_mock()
+ bucket.get_all_keys()
+ mock_get_all.assert_called_with(
+ [
+ ('Contents', Key),
+ ('CommonPrefixes', Prefix)
+ ], '', None
+ )
+
+ # Now the variants with the encoding.
+ mock_get_all.reset_mock()
+ bucket.get_all_keys(encoding_type='url')
+ mock_get_all.assert_called_with(
+ [
+ ('Contents', Key),
+ ('CommonPrefixes', Prefix)
+ ], '', None,
+ encoding_type='url'
+ )
+
+ mock_get_all.reset_mock()
+ bucket.get_all_versions(encoding_type='url')
+ mock_get_all.assert_called_with(
+ [
+ ('Version', Key),
+ ('CommonPrefixes', Prefix),
+ ('DeleteMarker', DeleteMarker),
+ ], 'versions', None,
+ encoding_type='url'
+ )
+
+ mock_get_all.reset_mock()
+ bucket.get_all_multipart_uploads(encoding_type='url')
+ mock_get_all.assert_called_with(
+ [
+ ('Upload', MultiPartUpload),
+ ('CommonPrefixes', Prefix)
+ ], 'uploads', None,
+ encoding_type='url'
+ )
diff --git a/tests/unit/s3/test_connection.py b/tests/unit/s3/test_connection.py
new file mode 100644
index 00000000..3644fd19
--- /dev/null
+++ b/tests/unit/s3/test_connection.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+from boto.s3.connection import S3Connection
+
+
+class TestSignatureAlteration(AWSMockServiceTestCase):
+ connection_class = S3Connection
+
+ def test_unchanged(self):
+ self.assertEqual(
+ self.service_connection._required_auth_capability(),
+ ['s3']
+ )
+
+ def test_switched(self):
+ conn = self.connection_class(
+ aws_access_key_id='less',
+ aws_secret_access_key='more',
+ host='s3.cn-north-1.amazonaws.com.cn'
+ )
+ self.assertEqual(
+ conn._required_auth_capability(),
+ ['hmac-v4-s3']
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit/sqs/test_message.py b/tests/unit/sqs/test_message.py
index e91fd233..daae9971 100644
--- a/tests/unit/sqs/test_message.py
+++ b/tests/unit/sqs/test_message.py
@@ -23,6 +23,7 @@ from tests.unit import unittest
from boto.sqs.message import MHMessage
from boto.sqs.message import RawMessage
+from boto.sqs.bigmessage import BigMessage
from boto.exception import SQSDecodeError
@@ -62,5 +63,32 @@ class TestEncodeMessage(unittest.TestCase):
self.assertEquals(message.id, sample_value)
self.assertEquals(message.receipt_handle, sample_value)
+
+class TestBigMessage(unittest.TestCase):
+
+ def test_s3url_parsing(self):
+ msg = BigMessage()
+ # Try just a bucket name
+ bucket, key = msg._get_bucket_key('s3://foo')
+ self.assertEquals(bucket, 'foo')
+ self.assertEquals(key, None)
+ # Try just a bucket name with trailing "/"
+ bucket, key = msg._get_bucket_key('s3://foo/')
+ self.assertEquals(bucket, 'foo')
+ self.assertEquals(key, None)
+ # Try a bucket and a key
+ bucket, key = msg._get_bucket_key('s3://foo/bar')
+ self.assertEquals(bucket, 'foo')
+ self.assertEquals(key, 'bar')
+ # Try a bucket and a key with "/"
+ bucket, key = msg._get_bucket_key('s3://foo/bar/fie/baz')
+ self.assertEquals(bucket, 'foo')
+ self.assertEquals(key, 'bar/fie/baz')
+ # Try it with no s3:// prefix
+ with self.assertRaises(SQSDecodeError) as context:
+ bucket, key = msg._get_bucket_key('foo/bar')
+
+
+
if __name__ == '__main__':
unittest.main()