summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.rst12
-rw-r--r--boto/__init__.py133
-rw-r--r--boto/awslambda/__init__.py40
-rw-r--r--boto/awslambda/exceptions.py38
-rw-r--r--boto/awslambda/layer1.py517
-rw-r--r--boto/cloudhsm/__init__.py41
-rw-r--r--boto/cloudhsm/exceptions.py35
-rw-r--r--boto/cloudhsm/layer1.py448
-rw-r--r--boto/codedeploy/__init__.py40
-rw-r--r--boto/codedeploy/exceptions.py199
-rw-r--r--boto/codedeploy/layer1.py899
-rw-r--r--boto/cognito/identity/exceptions.py4
-rw-r--r--boto/cognito/identity/layer1.py286
-rw-r--r--boto/cognito/sync/exceptions.py4
-rw-r--r--boto/cognito/sync/layer1.py209
-rw-r--r--boto/configservice/__init__.py41
-rw-r--r--boto/configservice/exceptions.py103
-rw-r--r--boto/configservice/layer1.py381
-rw-r--r--boto/dynamodb2/layer1.py551
-rw-r--r--boto/ec2containerservice/__init__.py41
-rw-r--r--boto/ec2containerservice/exceptions.py31
-rw-r--r--boto/ec2containerservice/layer1.py748
-rw-r--r--boto/endpoints.json36
-rw-r--r--boto/kinesis/layer1.py571
-rw-r--r--boto/kms/__init__.py41
-rw-r--r--boto/kms/exceptions.py72
-rw-r--r--boto/kms/layer1.py821
-rw-r--r--boto/opsworks/layer1.py616
-rw-r--r--boto/sqs/message.py5
-rw-r--r--docs/source/index.rst9
-rw-r--r--docs/source/ref/awslamba.rst26
-rw-r--r--docs/source/ref/cloudhsm.rst26
-rw-r--r--docs/source/ref/codedeploy.rst26
-rw-r--r--docs/source/ref/configservice.rst26
-rw-r--r--docs/source/ref/ec2containerservice.rst26
-rw-r--r--docs/source/ref/kms.rst26
-rw-r--r--docs/source/releasenotes/v2.36.0.rst27
-rw-r--r--setup.py4
-rw-r--r--tests/integration/awslambda/__init__.py21
-rw-r--r--tests/integration/awslambda/test_awslambda.py38
-rw-r--r--tests/integration/cloudhsm/__init__.py21
-rw-r--r--tests/integration/cloudhsm/test_cloudhsm.py44
-rw-r--r--tests/integration/codedeploy/__init__.py21
-rw-r--r--tests/integration/codedeploy/test_codedeploy.py41
-rw-r--r--tests/integration/configservice/__init__.py21
-rw-r--r--tests/integration/configservice/test_configservice.py39
-rw-r--r--tests/integration/ec2containerservice/__init__.py21
-rw-r--r--tests/integration/ec2containerservice/test_ec2containerservice.py40
-rw-r--r--tests/integration/kinesis/test_kinesis.py34
-rw-r--r--tests/integration/kms/test_kms.py41
-rw-r--r--tests/unit/awslambda/__init__.py21
-rw-r--r--tests/unit/awslambda/test_awslambda.py117
-rw-r--r--tests/unit/kinesis/__init__.py0
-rw-r--r--tests/unit/kinesis/test_kinesis.py74
-rw-r--r--tests/unit/kms/__init__.py21
-rw-r--r--tests/unit/kms/test_kms.py63
-rw-r--r--tests/unit/sqs/test_message.py15
57 files changed, 7228 insertions, 624 deletions
diff --git a/README.rst b/README.rst
index 1ca6992d..01823a31 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.35.2
+boto 2.36.0
-Released: 19-Jan-2015
+Released: 27-Jan-2015
.. image:: https://travis-ci.org/boto/boto.svg?branch=develop
:target: https://travis-ci.org/boto/boto
@@ -34,6 +34,8 @@ At the moment, boto supports:
* Amazon Elastic Map Reduce (EMR) (Python 3)
* AutoScaling (Python 3)
* Amazon Kinesis (Python 3)
+ * AWS Lambda (Python 3)
+ * Amazon EC2 Container Service (Python 3)
* Content Delivery
@@ -54,10 +56,14 @@ At the moment, boto supports:
* AWS Data Pipeline (Python 3)
* AWS Opsworks (Python 3)
* AWS CloudTrail (Python 3)
+ * AWS CodeDeploy (Python 3)
-* Identity & Access
+* Administration & Security
* AWS Identity and Access Management (IAM) (Python 3)
+ * AWS Key Management Service (KMS) (Python 3)
+ * AWS Config (Python 3)
+ * AWS CloudHSM (Python 3)
* Application Services
diff --git a/boto/__init__.py b/boto/__init__.py
index b076f2f1..19beaa11 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -38,7 +38,7 @@ import logging.config
from boto.compat import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.35.2'
+__version__ = '2.36.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
@@ -952,6 +952,137 @@ def connect_cognito_sync(aws_access_key_id=None,
)
+def connect_kms(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to AWS Key Management Service
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ rtype: :class:`boto.kms.layer1.KMSConnection`
+ :return: A connection to the AWS Key Management Service
+ """
+ from boto.kms.layer1 import KMSConnection
+ return KMSConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+
+def connect_awslambda(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to AWS Lambda
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ rtype: :class:`boto.awslambda.layer1.AWSLambdaConnection`
+ :return: A connection to the AWS Lambda service
+ """
+ from boto.awslambda.layer1 import AWSLambdaConnection
+ return AWSLambdaConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+
+def connect_codedeploy(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to AWS CodeDeploy
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ rtype: :class:`boto.cognito.sync.layer1.CodeDeployConnection`
+ :return: A connection to the AWS CodeDeploy service
+ """
+ from boto.codedeploy.layer1 import CodeDeployConnection
+ return CodeDeployConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+
+def connect_configservice(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to AWS Config
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ rtype: :class:`boto.kms.layer1.ConfigServiceConnection`
+ :return: A connection to the AWS Config service
+ """
+ from boto.configservice.layer1 import ConfigServiceConnection
+ return ConfigServiceConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+
+def connect_cloudhsm(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to AWS CloudHSM
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ rtype: :class:`boto.cloudhsm.layer1.CloudHSMConnection`
+ :return: A connection to the AWS CloudHSM service
+ """
+ from boto.cloudhsm.layer1 import CloudHSMConnection
+ return CloudHSMConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+
+def connect_ec2containerservice(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to Amazon EC2 Container Service
+ rtype: :class:`boto.ec2containerservice.layer1.EC2ContainerServiceConnection`
+ :return: A connection to the Amazon EC2 Container Service
+ """
+ from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection
+ return EC2ContainerServiceConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True, is_latest=False):
diff --git a/boto/awslambda/__init__.py b/boto/awslambda/__init__.py
new file mode 100644
index 00000000..1e1f782c
--- /dev/null
+++ b/boto/awslambda/__init__.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo, get_regions
+
+
+def regions():
+ """
+ Get all available regions for the AWS Lambda service.
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.awslambda.layer1 import AWSLambdaConnection
+ return get_regions('awslambda',
+ connection_cls=AWSLambdaConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/awslambda/exceptions.py b/boto/awslambda/exceptions.py
new file mode 100644
index 00000000..7e151511
--- /dev/null
+++ b/boto/awslambda/exceptions.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import BotoServerError
+
+
+class InvalidRequestContentException(BotoServerError):
+ pass
+
+
+class ResourceNotFoundException(BotoServerError):
+ pass
+
+
+class InvalidParameterValueException(BotoServerError):
+ pass
+
+
+class ServiceException(BotoServerError):
+ pass
diff --git a/boto/awslambda/layer1.py b/boto/awslambda/layer1.py
new file mode 100644
index 00000000..01603f6f
--- /dev/null
+++ b/boto/awslambda/layer1.py
@@ -0,0 +1,517 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import os
+
+from boto.compat import json
+from boto.exception import JSONResponseError
+from boto.connection import AWSAuthConnection
+from boto.regioninfo import RegionInfo
+from boto.awslambda import exceptions
+
+
+class AWSLambdaConnection(AWSAuthConnection):
+ """
+ AWS Lambda
+ **Overview**
+
+ This is the AWS Lambda API Reference. The AWS Lambda Developer
+ Guide provides additional information. For the service overview,
+ go to `What is AWS Lambda`_, and for information about how the
+ service works, go to `AWS LambdaL How it Works`_ in the AWS Lambda
+ Developer Guide.
+ """
+ APIVersion = "2014-11-11"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "lambda.us-east-1.amazonaws.com"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "InvalidRequestContentException": exceptions.InvalidRequestContentException,
+ "ResourceNotFoundException": exceptions.ResourceNotFoundException,
+ "InvalidParameterValueException": exceptions.InvalidParameterValueException,
+ "ServiceException": exceptions.ServiceException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.get('region')
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ else:
+ del kwargs['region']
+ kwargs['host'] = region.endpoint
+ super(AWSLambdaConnection, self).__init__(**kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def add_event_source(self, event_source, function_name, role,
+ batch_size=None, parameters=None):
+ """
+ Identifies an Amazon Kinesis stream as the event source for an
+ AWS Lambda function. AWS Lambda invokes the specified function
+ when records are posted to the stream.
+
+ This is the pull model, where AWS Lambda invokes the function.
+ For more information, go to `AWS LambdaL How it Works`_ in the
+ AWS Lambda Developer Guide.
+
+ This association between an Amazon Kinesis stream and an AWS
+ Lambda function is called the event source mapping. You
+ provide the configuration information (for example, which
+ stream to read from and which AWS Lambda function to invoke)
+ for the event source mapping in the request body.
+
+ This operation requires permission for the `iam:PassRole`
+ action for the IAM role. It also requires permission for the
+ `lambda:AddEventSource` action.
+
+ :type event_source: string
+ :param event_source: The Amazon Resource Name (ARN) of the Amazon
+ Kinesis stream that is the event source. Any record added to this
+ stream causes AWS Lambda to invoke your Lambda function. AWS Lambda
+ POSTs the Amazon Kinesis event, containing records, to your Lambda
+ function as JSON.
+
+ :type function_name: string
+ :param function_name: The Lambda function to invoke when AWS Lambda
+ detects an event on the stream.
+
+ :type role: string
+ :param role: The ARN of the IAM role (invocation role) that AWS Lambda
+ can assume to read from the stream and invoke the function.
+
+ :type batch_size: integer
+ :param batch_size: The largest number of records that AWS Lambda will
+ give to your function in a single event. The default is 100
+ records.
+
+ :type parameters: map
+ :param parameters: A map (key-value pairs) defining the configuration
+ for AWS Lambda to use when reading the event source. Currently, AWS
+ Lambda supports only the `InitialPositionInStream` key. The valid
+ values are: "TRIM_HORIZON" and "LATEST". The default value is
+ "TRIM_HORIZON". For more information, go to `ShardIteratorType`_ in
+ the Amazon Kinesis Service API Reference.
+
+ """
+
+ uri = '/2014-11-13/event-source-mappings/'
+ params = {
+ 'EventSource': event_source,
+ 'FunctionName': function_name,
+ 'Role': role,
+ }
+ headers = {}
+ query_params = {}
+ if batch_size is not None:
+ params['BatchSize'] = batch_size
+ if parameters is not None:
+ params['Parameters'] = parameters
+ return self.make_request('POST', uri, expected_status=200,
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def delete_function(self, function_name):
+ """
+ Deletes the specified Lambda function code and configuration.
+
+ This operation requires permission for the
+ `lambda:DeleteFunction` action.
+
+ :type function_name: string
+ :param function_name: The Lambda function to delete.
+
+ """
+
+ uri = '/2014-11-13/functions/{0}'.format(function_name)
+ return self.make_request('DELETE', uri, expected_status=204)
+
+ def get_event_source(self, uuid):
+ """
+ Returns configuration information for the specified event
+ source mapping (see AddEventSource).
+
+ This operation requires permission for the
+ `lambda:GetEventSource` action.
+
+ :type uuid: string
+ :param uuid: The AWS Lambda assigned ID of the event source mapping.
+
+ """
+
+ uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid)
+ return self.make_request('GET', uri, expected_status=200)
+
+ def get_function(self, function_name):
+ """
+ Returns the configuration information of the Lambda function
+ and a presigned URL link to the .zip file you uploaded with
+ UploadFunction so you can download the .zip file. Note that
+ the URL is valid for up to 10 minutes. The configuration
+ information is the same information you provided as parameters
+ when uploading the function.
+
+ This operation requires permission for the
+ `lambda:GetFunction` action.
+
+ :type function_name: string
+ :param function_name: The Lambda function name.
+
+ """
+
+ uri = '/2014-11-13/functions/{0}'.format(function_name)
+ return self.make_request('GET', uri, expected_status=200)
+
+ def get_function_configuration(self, function_name):
+ """
+ Returns the configuration information of the Lambda function.
+ This the same information you provided as parameters when
+ uploading the function by using UploadFunction.
+
+ This operation requires permission for the
+ `lambda:GetFunctionConfiguration` operation.
+
+ :type function_name: string
+ :param function_name: The name of the Lambda function for which you
+ want to retrieve the configuration information.
+
+ """
+
+ uri = '/2014-11-13/functions/{0}/configuration'.format(function_name)
+ return self.make_request('GET', uri, expected_status=200)
+
+ def invoke_async(self, function_name, invoke_args):
+ """
+ Submits an invocation request to AWS Lambda. Upon receiving
+ the request, Lambda executes the specified function
+ asynchronously. To see the logs generated by the Lambda
+ function execution, see the CloudWatch logs console.
+
+ This operation requires permission for the
+ `lambda:InvokeAsync` action.
+
+ :type function_name: string
+ :param function_name: The Lambda function name.
+
+ :type invoke_args: blob
+ :param invoke_args: JSON that you want to provide to your Lambda
+ function as input.
+
+ """
+ uri = '/2014-11-13/functions/{0}/invoke-async/'.format(function_name)
+ headers = {}
+ query_params = {}
+ try:
+ content_length = str(len(invoke_args))
+ except (TypeError, AttributeError):
+ # If a file like object is provided and seekable, try to retrieve
+ # the file size via fstat.
+ try:
+ invoke_args.tell()
+ except (AttributeError, OSError, IOError):
+ raise TypeError(
+ "File-like object passed to parameter "
+ "``invoke_args`` must be seekable."
+ )
+ content_length = str(os.fstat(invoke_args.fileno()).st_size)
+ headers['Content-Length'] = content_length
+ return self.make_request('POST', uri, expected_status=202,
+ data=invoke_args, headers=headers,
+ params=query_params)
+
+ def list_event_sources(self, event_source_arn=None, function_name=None,
+ marker=None, max_items=None):
+ """
+ Returns a list of event source mappings. For each mapping, the
+ API returns configuration information (see AddEventSource).
+ You can optionally specify filters to retrieve specific event
+ source mappings.
+
+ This operation requires permission for the
+ `lambda:ListEventSources` action.
+
+ :type event_source_arn: string
+ :param event_source_arn: The Amazon Resource Name (ARN) of the Amazon
+ Kinesis stream.
+
+ :type function_name: string
+ :param function_name: The name of the AWS Lambda function.
+
+ :type marker: string
+ :param marker: Optional string. An opaque pagination token returned
+ from a previous `ListEventSources` operation. If present, specifies
+ to continue the list from where the returning call left off.
+
+ :type max_items: integer
+ :param max_items: Optional integer. Specifies the maximum number of
+ event sources to return in response. This value must be greater
+ than 0.
+
+ """
+
+ uri = '/2014-11-13/event-source-mappings/'
+ params = {}
+ headers = {}
+ query_params = {}
+ if event_source_arn is not None:
+ query_params['EventSource'] = event_source_arn
+ if function_name is not None:
+ query_params['FunctionName'] = function_name
+ if marker is not None:
+ query_params['Marker'] = marker
+ if max_items is not None:
+ query_params['MaxItems'] = max_items
+ return self.make_request('GET', uri, expected_status=200,
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def list_functions(self, marker=None, max_items=None):
+ """
+ Returns a list of your Lambda functions. For each function,
+ the response includes the function configuration information.
+ You must use GetFunction to retrieve the code for your
+ function.
+
+ This operation requires permission for the
+ `lambda:ListFunctions` action.
+
+ :type marker: string
+ :param marker: Optional string. An opaque pagination token returned
+ from a previous `ListFunctions` operation. If present, indicates
+ where to continue the listing.
+
+ :type max_items: integer
+ :param max_items: Optional integer. Specifies the maximum number of AWS
+ Lambda functions to return in response. This parameter value must
+ be greater than 0.
+
+ """
+
+ uri = '/2014-11-13/functions/'
+ params = {}
+ headers = {}
+ query_params = {}
+ if marker is not None:
+ query_params['Marker'] = marker
+ if max_items is not None:
+ query_params['MaxItems'] = max_items
+ return self.make_request('GET', uri, expected_status=200,
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def remove_event_source(self, uuid):
+ """
+ Removes an event source mapping. This means AWS Lambda will no
+ longer invoke the function for events in the associated
+ source.
+
+ This operation requires permission for the
+ `lambda:RemoveEventSource` action.
+
+ :type uuid: string
+ :param uuid: The event source mapping ID.
+
+ """
+
+ uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid)
+ return self.make_request('DELETE', uri, expected_status=204)
+
+ def update_function_configuration(self, function_name, role=None,
+ handler=None, description=None,
+ timeout=None, memory_size=None):
+ """
+ Updates the configuration parameters for the specified Lambda
+ function by using the values provided in the request. You
+ provide only the parameters you want to change. This operation
+ must only be used on an existing Lambda function and cannot be
+ used to update the function's code.
+
+ This operation requires permission for the
+ `lambda:UpdateFunctionConfiguration` action.
+
+ :type function_name: string
+ :param function_name: The name of the Lambda function.
+
+ :type role: string
+ :param role: The Amazon Resource Name (ARN) of the IAM role that Lambda
+ will assume when it executes your function.
+
+ :type handler: string
+ :param handler: The function that Lambda calls to begin executing your
+ function. For Node.js, it is the module-name.export value in your
+ function.
+
+ :type description: string
+ :param description: A short user-defined function description. Lambda
+ does not use this value. Assign a meaningful description as you see
+ fit.
+
+ :type timeout: integer
+ :param timeout: The function execution time at which Lambda should
+ terminate the function. Because the execution time has cost
+ implications, we recommend you set this value based on your
+ expected execution time. The default is 3 seconds.
+
+ :type memory_size: integer
+ :param memory_size: The amount of memory, in MB, your Lambda function
+ is given. Lambda uses this memory size to infer the amount of CPU
+ allocated to your function. Your function use-case determines your
+ CPU and memory requirements. For example, a database operation
+ might need less memory compared to an image processing function.
+ The default value is 128 MB. The value must be a multiple of 64 MB.
+
+ """
+
+ uri = '/2014-11-13/functions/{0}/configuration'.format(function_name)
+ params = {}
+ headers = {}
+ query_params = {}
+ if role is not None:
+ query_params['Role'] = role
+ if handler is not None:
+ query_params['Handler'] = handler
+ if description is not None:
+ query_params['Description'] = description
+ if timeout is not None:
+ query_params['Timeout'] = timeout
+ if memory_size is not None:
+ query_params['MemorySize'] = memory_size
+ return self.make_request('PUT', uri, expected_status=200,
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def upload_function(self, function_name, function_zip, runtime, role,
+ handler, mode, description=None, timeout=None,
+ memory_size=None):
+ """
+ Creates a new Lambda function or updates an existing function.
+ The function metadata is created from the request parameters,
+ and the code for the function is provided by a .zip file in
+ the request body. If the function name already exists, the
+ existing Lambda function is updated with the new code and
+ metadata.
+
+ This operation requires permission for the
+ `lambda:UploadFunction` action.
+
+ :type function_name: string
+ :param function_name: The name you want to assign to the function you
+ are uploading. The function names appear in the console and are
+ returned in the ListFunctions API. Function names are used to
+ specify functions to other AWS Lambda APIs, such as InvokeAsync.
+
+ :type function_zip: blob
+ :param function_zip: A .zip file containing your packaged source code.
+ For more information about creating a .zip file, go to `AWS LambdaL
+ How it Works`_ in the AWS Lambda Developer Guide.
+
+ :type runtime: string
+ :param runtime: The runtime environment for the Lambda function you are
+ uploading. Currently, Lambda supports only "nodejs" as the runtime.
+
+ :type role: string
+ :param role: The Amazon Resource Name (ARN) of the IAM role that Lambda
+ assumes when it executes your function to access any other Amazon
+ Web Services (AWS) resources.
+
+ :type handler: string
+ :param handler: The function that Lambda calls to begin execution. For
+ Node.js, it is the module-name . export value in your function.
+
+ :type mode: string
+ :param mode: How the Lambda function will be invoked. Lambda supports
+ only the "event" mode.
+
+ :type description: string
+ :param description: A short, user-defined function description. Lambda
+ does not use this value. Assign a meaningful description as you see
+ fit.
+
+ :type timeout: integer
+ :param timeout: The function execution time at which Lambda should
+ terminate the function. Because the execution time has cost
+ implications, we recommend you set this value based on your
+ expected execution time. The default is 3 seconds.
+
+ :type memory_size: integer
+ :param memory_size: The amount of memory, in MB, your Lambda function
+ is given. Lambda uses this memory size to infer the amount of CPU
+ allocated to your function. Your function use-case determines your
+ CPU and memory requirements. For example, database operation might
+ need less memory compared to image processing function. The default
+ value is 128 MB. The value must be a multiple of 64 MB.
+
+ """
+ uri = '/2014-11-13/functions/{0}'.format(function_name)
+ headers = {}
+ query_params = {}
+ if runtime is not None:
+ query_params['Runtime'] = runtime
+ if role is not None:
+ query_params['Role'] = role
+ if handler is not None:
+ query_params['Handler'] = handler
+ if mode is not None:
+ query_params['Mode'] = mode
+ if description is not None:
+ query_params['Description'] = description
+ if timeout is not None:
+ query_params['Timeout'] = timeout
+ if memory_size is not None:
+ query_params['MemorySize'] = memory_size
+
+ try:
+ content_length = str(len(function_zip))
+ except (TypeError, AttributeError):
+ # If a file like object is provided and seekable, try to retrieve
+ # the file size via fstat.
+ try:
+ function_zip.tell()
+ except (AttributeError, OSError, IOError):
+ raise TypeError(
+ "File-like object passed to parameter "
+ "``function_zip`` must be seekable."
+ )
+ content_length = str(os.fstat(function_zip.fileno()).st_size)
+ headers['Content-Length'] = content_length
+ return self.make_request('PUT', uri, expected_status=201,
+ data=function_zip, headers=headers,
+ params=query_params)
+
+ def make_request(self, verb, resource, headers=None, data='',
+ expected_status=None, params=None):
+ if headers is None:
+ headers = {}
+ response = AWSAuthConnection.make_request(
+ self, verb, resource, headers=headers, data=data, params=params)
+ body = response.read().decode('utf-8')
+ if body:
+ body = json.loads(body)
+ if response.status == expected_status:
+ return body
+ else:
+ error_type = response.getheader('x-amzn-ErrorType').split(':')[0]
+ error_class = self._faults.get(error_type, self.ResponseError)
+ raise error_class(response.status, response.reason, body)
diff --git a/boto/cloudhsm/__init__.py b/boto/cloudhsm/__init__.py
new file mode 100644
index 00000000..2d075c48
--- /dev/null
+++ b/boto/cloudhsm/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo, get_regions
+
+
+def regions():
+ """
+ Get all available regions for the AWS CloudHSM service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.kms.layer1 import CloudHSMConnection
+ return get_regions('cloudhsm', connection_cls=CloudHSMConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/cloudhsm/exceptions.py b/boto/cloudhsm/exceptions.py
new file mode 100644
index 00000000..1e14abe1
--- /dev/null
+++ b/boto/cloudhsm/exceptions.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.exception import BotoServerError
+
+
+class InvalidRequestException(BotoServerError):
+ pass
+
+
+class CloudHsmServiceException(BotoServerError):
+ pass
+
+
+class CloudHsmInternalException(BotoServerError):
+ pass
diff --git a/boto/cloudhsm/layer1.py b/boto/cloudhsm/layer1.py
new file mode 100644
index 00000000..e0877736
--- /dev/null
+++ b/boto/cloudhsm/layer1.py
@@ -0,0 +1,448 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.compat import json
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.cloudhsm import exceptions
+
+
+class CloudHSMConnection(AWSQueryConnection):
+ """
+ AWS CloudHSM Service
+ """
+ APIVersion = "2014-05-30"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "cloudhsm.us-east-1.amazonaws.com"
+ ServiceName = "CloudHSM"
+ TargetPrefix = "CloudHsmFrontendService"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "InvalidRequestException": exceptions.InvalidRequestException,
+ "CloudHsmServiceException": exceptions.CloudHsmServiceException,
+ "CloudHsmInternalException": exceptions.CloudHsmInternalException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs or kwargs['host'] is None:
+ kwargs['host'] = region.endpoint
+
+ super(CloudHSMConnection, self).__init__(**kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def create_hapg(self, label):
+ """
+ Creates a high-availability partition group. A high-
+ availability partition group is a group of partitions that
+ spans multiple physical HSMs.
+
+ :type label: string
+ :param label: The label of the new high-availability partition group.
+
+ """
+ params = {'Label': label, }
+ return self.make_request(action='CreateHapg',
+ body=json.dumps(params))
+
+ def create_hsm(self, subnet_id, ssh_key, iam_role_arn, subscription_type,
+ eni_ip=None, external_id=None, client_token=None,
+ syslog_ip=None):
+ """
+ Creates an uninitialized HSM instance. Running this command
+ provisions an HSM appliance and will result in charges to your
+ AWS account for the HSM.
+
+ :type subnet_id: string
+ :param subnet_id: The identifier of the subnet in your VPC in which to
+ place the HSM.
+
+ :type ssh_key: string
+ :param ssh_key: The SSH public key to install on the HSM.
+
+ :type eni_ip: string
+ :param eni_ip: The IP address to assign to the HSM's ENI.
+
+ :type iam_role_arn: string
+ :param iam_role_arn: The ARN of an IAM role to enable the AWS CloudHSM
+ service to allocate an ENI on your behalf.
+
+ :type external_id: string
+ :param external_id: The external ID from **IamRoleArn**, if present.
+
+ :type subscription_type: string
+ :param subscription_type: The subscription type.
+
+ :type client_token: string
+ :param client_token: A user-defined token to ensure idempotence.
+ Subsequent calls to this action with the same token will be
+ ignored.
+
+ :type syslog_ip: string
+ :param syslog_ip: The IP address for the syslog monitoring server.
+
+ """
+ params = {
+ 'SubnetId': subnet_id,
+ 'SshKey': ssh_key,
+ 'IamRoleArn': iam_role_arn,
+ 'SubscriptionType': subscription_type,
+ }
+ if eni_ip is not None:
+ params['EniIp'] = eni_ip
+ if external_id is not None:
+ params['ExternalId'] = external_id
+ if client_token is not None:
+ params['ClientToken'] = client_token
+ if syslog_ip is not None:
+ params['SyslogIp'] = syslog_ip
+ return self.make_request(action='CreateHsm',
+ body=json.dumps(params))
+
+ def create_luna_client(self, certificate, label=None):
+ """
+ Creates an HSM client.
+
+ :type label: string
+ :param label: The label for the client.
+
+ :type certificate: string
+ :param certificate: The contents of a Base64-Encoded X.509 v3
+ certificate to be installed on the HSMs used by this client.
+
+ """
+ params = {'Certificate': certificate, }
+ if label is not None:
+ params['Label'] = label
+ return self.make_request(action='CreateLunaClient',
+ body=json.dumps(params))
+
+ def delete_hapg(self, hapg_arn):
+ """
+ Deletes a high-availability partition group.
+
+ :type hapg_arn: string
+ :param hapg_arn: The ARN of the high-availability partition group to
+ delete.
+
+ """
+ params = {'HapgArn': hapg_arn, }
+ return self.make_request(action='DeleteHapg',
+ body=json.dumps(params))
+
+ def delete_hsm(self, hsm_arn):
+ """
+ Deletes an HSM. Once complete, this operation cannot be undone
+ and your key material cannot be recovered.
+
+ :type hsm_arn: string
+ :param hsm_arn: The ARN of the HSM to delete.
+
+ """
+ params = {'HsmArn': hsm_arn, }
+ return self.make_request(action='DeleteHsm',
+ body=json.dumps(params))
+
+ def delete_luna_client(self, client_arn):
+ """
+ Deletes a client.
+
+ :type client_arn: string
+ :param client_arn: The ARN of the client to delete.
+
+ """
+ params = {'ClientArn': client_arn, }
+ return self.make_request(action='DeleteLunaClient',
+ body=json.dumps(params))
+
+ def describe_hapg(self, hapg_arn):
+ """
+ Retrieves information about a high-availability partition
+ group.
+
+ :type hapg_arn: string
+ :param hapg_arn: The ARN of the high-availability partition group to
+ describe.
+
+ """
+ params = {'HapgArn': hapg_arn, }
+ return self.make_request(action='DescribeHapg',
+ body=json.dumps(params))
+
+ def describe_hsm(self, hsm_arn=None, hsm_serial_number=None):
+ """
+ Retrieves information about an HSM. You can identify the HSM
+ by its ARN or its serial number.
+
+ :type hsm_arn: string
+ :param hsm_arn: The ARN of the HSM. Either the HsmArn or the
+ SerialNumber parameter must be specified.
+
+ :type hsm_serial_number: string
+ :param hsm_serial_number: The serial number of the HSM. Either the
+ HsmArn or the HsmSerialNumber parameter must be specified.
+
+ """
+ params = {}
+ if hsm_arn is not None:
+ params['HsmArn'] = hsm_arn
+ if hsm_serial_number is not None:
+ params['HsmSerialNumber'] = hsm_serial_number
+ return self.make_request(action='DescribeHsm',
+ body=json.dumps(params))
+
+ def describe_luna_client(self, client_arn=None,
+ certificate_fingerprint=None):
+ """
+ Retrieves information about an HSM client.
+
+ :type client_arn: string
+ :param client_arn: The ARN of the client.
+
+ :type certificate_fingerprint: string
+ :param certificate_fingerprint: The certificate fingerprint.
+
+ """
+ params = {}
+ if client_arn is not None:
+ params['ClientArn'] = client_arn
+ if certificate_fingerprint is not None:
+ params['CertificateFingerprint'] = certificate_fingerprint
+ return self.make_request(action='DescribeLunaClient',
+ body=json.dumps(params))
+
+ def get_config(self, client_arn, client_version, hapg_list):
+ """
+ Gets the configuration files necessary to connect to all high
+ availability partition groups the client is associated with.
+
+ :type client_arn: string
+ :param client_arn: The ARN of the client.
+
+ :type client_version: string
+ :param client_version: The client version.
+
+ :type hapg_list: list
+ :param hapg_list: A list of ARNs that identify the high-availability
+ partition groups that are associated with the client.
+
+ """
+ params = {
+ 'ClientArn': client_arn,
+ 'ClientVersion': client_version,
+ 'HapgList': hapg_list,
+ }
+ return self.make_request(action='GetConfig',
+ body=json.dumps(params))
+
+ def list_available_zones(self):
+ """
+ Lists the Availability Zones that have available AWS CloudHSM
+ capacity.
+
+
+ """
+ params = {}
+ return self.make_request(action='ListAvailableZones',
+ body=json.dumps(params))
+
+ def list_hapgs(self, next_token=None):
+ """
+ Lists the high-availability partition groups for the account.
+
+ This operation supports pagination with the use of the
+ NextToken member. If more results are available, the NextToken
+ member of the response contains a token that you pass in the
+ next call to ListHapgs to retrieve the next set of items.
+
+ :type next_token: string
+ :param next_token: The NextToken value from a previous call to
+ ListHapgs. Pass null if this is the first call.
+
+ """
+ params = {}
+ if next_token is not None:
+ params['NextToken'] = next_token
+ return self.make_request(action='ListHapgs',
+ body=json.dumps(params))
+
+ def list_hsms(self, next_token=None):
+ """
+ Retrieves the identifiers of all of the HSMs provisioned for
+ the current customer.
+
+ This operation supports pagination with the use of the
+ NextToken member. If more results are available, the NextToken
+ member of the response contains a token that you pass in the
+ next call to ListHsms to retrieve the next set of items.
+
+ :type next_token: string
+ :param next_token: The NextToken value from a previous call to
+ ListHsms. Pass null if this is the first call.
+
+ """
+ params = {}
+ if next_token is not None:
+ params['NextToken'] = next_token
+ return self.make_request(action='ListHsms',
+ body=json.dumps(params))
+
+ def list_luna_clients(self, next_token=None):
+ """
+ Lists all of the clients.
+
+ This operation supports pagination with the use of the
+ NextToken member. If more results are available, the NextToken
+ member of the response contains a token that you pass in the
+ next call to ListLunaClients to retrieve the next set of
+ items.
+
+ :type next_token: string
+ :param next_token: The NextToken value from a previous call to
+ ListLunaClients. Pass null if this is the first call.
+
+ """
+ params = {}
+ if next_token is not None:
+ params['NextToken'] = next_token
+ return self.make_request(action='ListLunaClients',
+ body=json.dumps(params))
+
+ def modify_hapg(self, hapg_arn, label=None, partition_serial_list=None):
+ """
+ Modifies an existing high-availability partition group.
+
+ :type hapg_arn: string
+ :param hapg_arn: The ARN of the high-availability partition group to
+ modify.
+
+ :type label: string
+ :param label: The new label for the high-availability partition group.
+
+ :type partition_serial_list: list
+ :param partition_serial_list: The list of partition serial numbers to
+ make members of the high-availability partition group.
+
+ """
+ params = {'HapgArn': hapg_arn, }
+ if label is not None:
+ params['Label'] = label
+ if partition_serial_list is not None:
+ params['PartitionSerialList'] = partition_serial_list
+ return self.make_request(action='ModifyHapg',
+ body=json.dumps(params))
+
+ def modify_hsm(self, hsm_arn, subnet_id=None, eni_ip=None,
+ iam_role_arn=None, external_id=None, syslog_ip=None):
+ """
+ Modifies an HSM.
+
+ :type hsm_arn: string
+ :param hsm_arn: The ARN of the HSM to modify.
+
+ :type subnet_id: string
+ :param subnet_id: The new identifier of the subnet that the HSM is in.
+
+ :type eni_ip: string
+ :param eni_ip: The new IP address for the elastic network interface
+ attached to the HSM.
+
+ :type iam_role_arn: string
+ :param iam_role_arn: The new IAM role ARN.
+
+ :type external_id: string
+ :param external_id: The new external ID.
+
+ :type syslog_ip: string
+ :param syslog_ip: The new IP address for the syslog monitoring server.
+
+ """
+ params = {'HsmArn': hsm_arn, }
+ if subnet_id is not None:
+ params['SubnetId'] = subnet_id
+ if eni_ip is not None:
+ params['EniIp'] = eni_ip
+ if iam_role_arn is not None:
+ params['IamRoleArn'] = iam_role_arn
+ if external_id is not None:
+ params['ExternalId'] = external_id
+ if syslog_ip is not None:
+ params['SyslogIp'] = syslog_ip
+ return self.make_request(action='ModifyHsm',
+ body=json.dumps(params))
+
+ def modify_luna_client(self, client_arn, certificate):
+ """
+ Modifies the certificate used by the client.
+
+ This action can potentially start a workflow to install the
+ new certificate on the client's HSMs.
+
+ :type client_arn: string
+ :param client_arn: The ARN of the client.
+
+ :type certificate: string
+ :param certificate: The new certificate for the client.
+
+ """
+ params = {
+ 'ClientArn': client_arn,
+ 'Certificate': certificate,
+ }
+ return self.make_request(action='ModifyLunaClient',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read().decode('utf-8')
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/codedeploy/__init__.py b/boto/codedeploy/__init__.py
new file mode 100644
index 00000000..24e1b505
--- /dev/null
+++ b/boto/codedeploy/__init__.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo, get_regions
+
+
+def regions():
+ """
+ Get all available regions for the AWS CodeDeploy service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.codedeploy.layer1 import CodeDeployConnection
+ return get_regions('codedeploy', connection_cls=CodeDeployConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/codedeploy/exceptions.py b/boto/codedeploy/exceptions.py
new file mode 100644
index 00000000..f23db8f0
--- /dev/null
+++ b/boto/codedeploy/exceptions.py
@@ -0,0 +1,199 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.exception import BotoServerError
+
+
+class InvalidDeploymentIdException(BotoServerError):
+ pass
+
+
+class InvalidDeploymentGroupNameException(BotoServerError):
+ pass
+
+
+class DeploymentConfigAlreadyExistsException(BotoServerError):
+ pass
+
+
+class InvalidRoleException(BotoServerError):
+ pass
+
+
+class RoleRequiredException(BotoServerError):
+ pass
+
+
+class DeploymentGroupAlreadyExistsException(BotoServerError):
+ pass
+
+
+class DeploymentConfigLimitExceededException(BotoServerError):
+ pass
+
+
+class InvalidNextTokenException(BotoServerError):
+ pass
+
+
+class InvalidDeploymentConfigNameException(BotoServerError):
+ pass
+
+
+class InvalidSortByException(BotoServerError):
+ pass
+
+
+class InstanceDoesNotExistException(BotoServerError):
+ pass
+
+
+class InvalidMinimumHealthyHostValueException(BotoServerError):
+ pass
+
+
+class ApplicationLimitExceededException(BotoServerError):
+ pass
+
+
+class ApplicationNameRequiredException(BotoServerError):
+ pass
+
+
+class InvalidEC2TagException(BotoServerError):
+ pass
+
+
+class DeploymentDoesNotExistException(BotoServerError):
+ pass
+
+
+class DeploymentLimitExceededException(BotoServerError):
+ pass
+
+
+class InvalidInstanceStatusException(BotoServerError):
+ pass
+
+
+class RevisionRequiredException(BotoServerError):
+ pass
+
+
+class InvalidBucketNameFilterException(BotoServerError):
+ pass
+
+
+class DeploymentGroupLimitExceededException(BotoServerError):
+ pass
+
+
+class DeploymentGroupDoesNotExistException(BotoServerError):
+ pass
+
+
+class DeploymentConfigNameRequiredException(BotoServerError):
+ pass
+
+
+class DeploymentAlreadyCompletedException(BotoServerError):
+ pass
+
+
+class RevisionDoesNotExistException(BotoServerError):
+ pass
+
+
+class DeploymentGroupNameRequiredException(BotoServerError):
+ pass
+
+
+class DeploymentIdRequiredException(BotoServerError):
+ pass
+
+
+class DeploymentConfigDoesNotExistException(BotoServerError):
+ pass
+
+
+class BucketNameFilterRequiredException(BotoServerError):
+ pass
+
+
+class InvalidTimeRangeException(BotoServerError):
+ pass
+
+
+class ApplicationDoesNotExistException(BotoServerError):
+ pass
+
+
+class InvalidRevisionException(BotoServerError):
+ pass
+
+
+class InvalidSortOrderException(BotoServerError):
+ pass
+
+
+class InvalidOperationException(BotoServerError):
+ pass
+
+
+class InvalidAutoScalingGroupException(BotoServerError):
+ pass
+
+
+class InvalidApplicationNameException(BotoServerError):
+ pass
+
+
+class DescriptionTooLongException(BotoServerError):
+ pass
+
+
+class ApplicationAlreadyExistsException(BotoServerError):
+ pass
+
+
+class InvalidDeployedStateFilterException(BotoServerError):
+ pass
+
+
+class DeploymentNotStartedException(BotoServerError):
+ pass
+
+
+class DeploymentConfigInUseException(BotoServerError):
+ pass
+
+
+class InstanceIdRequiredException(BotoServerError):
+ pass
+
+
+class InvalidKeyPrefixFilterException(BotoServerError):
+ pass
+
+
+class InvalidDeploymentStatusException(BotoServerError):
+ pass
diff --git a/boto/codedeploy/layer1.py b/boto/codedeploy/layer1.py
new file mode 100644
index 00000000..6c61a083
--- /dev/null
+++ b/boto/codedeploy/layer1.py
@@ -0,0 +1,899 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.compat import json
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.codedeploy import exceptions
+
+
+class CodeDeployConnection(AWSQueryConnection):
+ """
+ AWS CodeDeploy **Overview**
+ This is the AWS CodeDeploy API Reference. This guide provides
+ descriptions of the AWS CodeDeploy APIs. For additional
+ information, see the `AWS CodeDeploy User Guide`_.
+ **Using the APIs**
+ You can use the AWS CodeDeploy APIs to work with the following
+ items:
+
+
+ + Applications , which are unique identifiers that AWS CodeDeploy
+ uses to ensure that the correct combinations of revisions,
+ deployment configurations, and deployment groups are being
+ referenced during deployments. You can work with applications by
+ calling CreateApplication, DeleteApplication, GetApplication,
+ ListApplications, BatchGetApplications, and UpdateApplication to
+ create, delete, and get information about applications, and to
+ change information about an application, respectively.
+ + Deployment configurations , which are sets of deployment rules
+ and deployment success and failure conditions that AWS CodeDeploy
+ uses during deployments. You can work with deployment
+ configurations by calling CreateDeploymentConfig,
+ DeleteDeploymentConfig, GetDeploymentConfig, and
+ ListDeploymentConfigs to create, delete, and get information about
+ deployment configurations, respectively.
+ + Deployment groups , which represent groups of Amazon EC2
+ instances to which application revisions can be deployed. You can
+ work with deployment groups by calling CreateDeploymentGroup,
+ DeleteDeploymentGroup, GetDeploymentGroup, ListDeploymentGroups,
+ and UpdateDeploymentGroup to create, delete, and get information
+ about single and multiple deployment groups, and to change
+ information about a deployment group, respectively.
+ + Deployment instances (also known simply as instances ), which
+ represent Amazon EC2 instances to which application revisions are
+ deployed. Deployment instances are identified by their Amazon EC2
+ tags or Auto Scaling group names. Deployment instances belong to
+ deployment groups. You can work with deployment instances by
+ calling GetDeploymentInstance and ListDeploymentInstances to get
+ information about single and multiple deployment instances,
+ respectively.
+ + Deployments , which represent the process of deploying revisions
+ to deployment groups. You can work with deployments by calling
+ CreateDeployment, GetDeployment, ListDeployments,
+ BatchGetDeployments, and StopDeployment to create and get
+ information about deployments, and to stop a deployment,
+ respectively.
+ + Application revisions (also known simply as revisions ), which
+ are archive files that are stored in Amazon S3 buckets or GitHub
+ repositories. These revisions contain source content (such as
+ source code, web pages, executable files, any deployment scripts,
+ and similar) along with an Application Specification file (AppSpec
+ file). (The AppSpec file is unique to AWS CodeDeploy; it defines a
+ series of deployment actions that you want AWS CodeDeploy to
+ execute.) An application revision is uniquely identified by its
+ Amazon S3 object key and its ETag, version, or both. Application
+ revisions are deployed to deployment groups. You can work with
+ application revisions by calling GetApplicationRevision,
+ ListApplicationRevisions, and RegisterApplicationRevision to get
+ information about application revisions and to inform AWS
+ CodeDeploy about an application revision, respectively.
+ """
+ APIVersion = "2014-10-06"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "codedeploy.us-east-1.amazonaws.com"
+ ServiceName = "codedeploy"
+ TargetPrefix = "CodeDeploy_20141006"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "InvalidDeploymentIdException": exceptions.InvalidDeploymentIdException,
+ "InvalidDeploymentGroupNameException": exceptions.InvalidDeploymentGroupNameException,
+ "DeploymentConfigAlreadyExistsException": exceptions.DeploymentConfigAlreadyExistsException,
+ "InvalidRoleException": exceptions.InvalidRoleException,
+ "RoleRequiredException": exceptions.RoleRequiredException,
+ "DeploymentGroupAlreadyExistsException": exceptions.DeploymentGroupAlreadyExistsException,
+ "DeploymentConfigLimitExceededException": exceptions.DeploymentConfigLimitExceededException,
+ "InvalidNextTokenException": exceptions.InvalidNextTokenException,
+ "InvalidDeploymentConfigNameException": exceptions.InvalidDeploymentConfigNameException,
+ "InvalidSortByException": exceptions.InvalidSortByException,
+ "InstanceDoesNotExistException": exceptions.InstanceDoesNotExistException,
+ "InvalidMinimumHealthyHostValueException": exceptions.InvalidMinimumHealthyHostValueException,
+ "ApplicationLimitExceededException": exceptions.ApplicationLimitExceededException,
+ "ApplicationNameRequiredException": exceptions.ApplicationNameRequiredException,
+ "InvalidEC2TagException": exceptions.InvalidEC2TagException,
+ "DeploymentDoesNotExistException": exceptions.DeploymentDoesNotExistException,
+ "DeploymentLimitExceededException": exceptions.DeploymentLimitExceededException,
+ "InvalidInstanceStatusException": exceptions.InvalidInstanceStatusException,
+ "RevisionRequiredException": exceptions.RevisionRequiredException,
+ "InvalidBucketNameFilterException": exceptions.InvalidBucketNameFilterException,
+ "DeploymentGroupLimitExceededException": exceptions.DeploymentGroupLimitExceededException,
+ "DeploymentGroupDoesNotExistException": exceptions.DeploymentGroupDoesNotExistException,
+ "DeploymentConfigNameRequiredException": exceptions.DeploymentConfigNameRequiredException,
+ "DeploymentAlreadyCompletedException": exceptions.DeploymentAlreadyCompletedException,
+ "RevisionDoesNotExistException": exceptions.RevisionDoesNotExistException,
+ "DeploymentGroupNameRequiredException": exceptions.DeploymentGroupNameRequiredException,
+ "DeploymentIdRequiredException": exceptions.DeploymentIdRequiredException,
+ "DeploymentConfigDoesNotExistException": exceptions.DeploymentConfigDoesNotExistException,
+ "BucketNameFilterRequiredException": exceptions.BucketNameFilterRequiredException,
+ "InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
+ "ApplicationDoesNotExistException": exceptions.ApplicationDoesNotExistException,
+ "InvalidRevisionException": exceptions.InvalidRevisionException,
+ "InvalidSortOrderException": exceptions.InvalidSortOrderException,
+ "InvalidOperationException": exceptions.InvalidOperationException,
+ "InvalidAutoScalingGroupException": exceptions.InvalidAutoScalingGroupException,
+ "InvalidApplicationNameException": exceptions.InvalidApplicationNameException,
+ "DescriptionTooLongException": exceptions.DescriptionTooLongException,
+ "ApplicationAlreadyExistsException": exceptions.ApplicationAlreadyExistsException,
+ "InvalidDeployedStateFilterException": exceptions.InvalidDeployedStateFilterException,
+ "DeploymentNotStartedException": exceptions.DeploymentNotStartedException,
+ "DeploymentConfigInUseException": exceptions.DeploymentConfigInUseException,
+ "InstanceIdRequiredException": exceptions.InstanceIdRequiredException,
+ "InvalidKeyPrefixFilterException": exceptions.InvalidKeyPrefixFilterException,
+ "InvalidDeploymentStatusException": exceptions.InvalidDeploymentStatusException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs or kwargs['host'] is None:
+ kwargs['host'] = region.endpoint
+
+ super(CodeDeployConnection, self).__init__(**kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def batch_get_applications(self, application_names=None):
+ """
+ Gets information about one or more applications.
+
+ :type application_names: list
+ :param application_names: A list of application names, with multiple
+ application names separated by spaces.
+
+ """
+ params = {}
+ if application_names is not None:
+ params['applicationNames'] = application_names
+ return self.make_request(action='BatchGetApplications',
+ body=json.dumps(params))
+
+ def batch_get_deployments(self, deployment_ids=None):
+ """
+ Gets information about one or more deployments.
+
+ :type deployment_ids: list
+ :param deployment_ids: A list of deployment IDs, with multiple
+ deployment IDs separated by spaces.
+
+ """
+ params = {}
+ if deployment_ids is not None:
+ params['deploymentIds'] = deployment_ids
+ return self.make_request(action='BatchGetDeployments',
+ body=json.dumps(params))
+
+ def create_application(self, application_name):
+ """
+ Creates a new application.
+
+ :type application_name: string
+ :param application_name: The name of the application. This name must be
+ unique within the AWS user account.
+
+ """
+ params = {'applicationName': application_name, }
+ return self.make_request(action='CreateApplication',
+ body=json.dumps(params))
+
+ def create_deployment(self, application_name, deployment_group_name=None,
+ revision=None, deployment_config_name=None,
+ description=None,
+ ignore_application_stop_failures=None):
+ """
+ Deploys an application revision to the specified deployment
+ group.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ :type deployment_group_name: string
+ :param deployment_group_name: The deployment group's name.
+
+ :type revision: dict
+ :param revision: The type of revision to deploy, along with information
+ about the revision's location.
+
+ :type deployment_config_name: string
+ :param deployment_config_name: The name of an existing deployment
+ configuration within the AWS user account.
+ If not specified, the value configured in the deployment group will be
+ used as the default. If the deployment group does not have a
+ deployment configuration associated with it, then
+ CodeDeployDefault.OneAtATime will be used by default.
+
+ :type description: string
+ :param description: A comment about the deployment.
+
+ :type ignore_application_stop_failures: boolean
+ :param ignore_application_stop_failures: If set to true, then if the
+ deployment causes the ApplicationStop deployment lifecycle event to
+ fail to a specific instance, the deployment will not be considered
+ to have failed to that instance at that point and will continue on
+ to the BeforeInstall deployment lifecycle event.
+ If set to false or not specified, then if the deployment causes the
+ ApplicationStop deployment lifecycle event to fail to a specific
+ instance, the deployment will stop to that instance, and the
+ deployment to that instance will be considered to have failed.
+
+ """
+ params = {'applicationName': application_name, }
+ if deployment_group_name is not None:
+ params['deploymentGroupName'] = deployment_group_name
+ if revision is not None:
+ params['revision'] = revision
+ if deployment_config_name is not None:
+ params['deploymentConfigName'] = deployment_config_name
+ if description is not None:
+ params['description'] = description
+ if ignore_application_stop_failures is not None:
+ params['ignoreApplicationStopFailures'] = ignore_application_stop_failures
+ return self.make_request(action='CreateDeployment',
+ body=json.dumps(params))
+
+ def create_deployment_config(self, deployment_config_name,
+ minimum_healthy_hosts=None):
+ """
+ Creates a new deployment configuration.
+
+ :type deployment_config_name: string
+ :param deployment_config_name: The name of the deployment configuration
+ to create.
+
+ :type minimum_healthy_hosts: dict
+ :param minimum_healthy_hosts: The minimum number of healthy instances
+ that should be available at any time during the deployment. There
+ are two parameters expected in the input: type and value.
+ The type parameter takes either of the following values:
+
+
+ + HOST_COUNT: The value parameter represents the minimum number of
+ healthy instances, as an absolute value.
+ + FLEET_PERCENT: The value parameter represents the minimum number of
+ healthy instances, as a percentage of the total number of instances
+ in the deployment. If you specify FLEET_PERCENT, then at the start
+ of the deployment AWS CodeDeploy converts the percentage to the
+ equivalent number of instances and rounds fractional instances up.
+
+
+ The value parameter takes an integer.
+
+ For example, to set a minimum of 95% healthy instances, specify a type
+ of FLEET_PERCENT and a value of 95.
+
+ """
+ params = {'deploymentConfigName': deployment_config_name, }
+ if minimum_healthy_hosts is not None:
+ params['minimumHealthyHosts'] = minimum_healthy_hosts
+ return self.make_request(action='CreateDeploymentConfig',
+ body=json.dumps(params))
+
+ def create_deployment_group(self, application_name,
+ deployment_group_name,
+ deployment_config_name=None,
+ ec_2_tag_filters=None,
+ auto_scaling_groups=None,
+ service_role_arn=None):
+ """
+ Creates a new deployment group for application revisions to be
+ deployed to.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ :type deployment_group_name: string
+ :param deployment_group_name: The name of an existing deployment group
+ for the specified application.
+
+ :type deployment_config_name: string
+ :param deployment_config_name: If specified, the deployment
+ configuration name must be one of the predefined values, or it can
+ be a custom deployment configuration:
+
+ + CodeDeployDefault.AllAtOnce deploys an application revision to up to
+ all of the Amazon EC2 instances at once. The overall deployment
+ succeeds if the application revision deploys to at least one of the
+ instances. The overall deployment fails after the application
+ revision fails to deploy to all of the instances. For example, for
+ 9 instances, deploy to up to all 9 instances at once. The overall
+ deployment succeeds if any of the 9 instances is successfully
+ deployed to, and it fails if all 9 instances fail to be deployed
+ to.
+ + CodeDeployDefault.HalfAtATime deploys to up to half of the instances
+ at a time (with fractions rounded down). The overall deployment
+ succeeds if the application revision deploys to at least half of
+ the instances (with fractions rounded up); otherwise, the
+ deployment fails. For example, for 9 instances, deploy to up to 4
+ instances at a time. The overall deployment succeeds if 5 or more
+ instances are successfully deployed to; otherwise, the deployment
+ fails. Note that the deployment may successfully deploy to some
+ instances, even if the overall deployment fails.
+ + CodeDeployDefault.OneAtATime deploys the application revision to only
+ one of the instances at a time. The overall deployment succeeds if
+ the application revision deploys to all of the instances. The
+ overall deployment fails after the application revision first fails
+ to deploy to any one instance. For example, for 9 instances, deploy
+ to one instance at a time. The overall deployment succeeds if all 9
+ instances are successfully deployed to, and it fails if any of one
+ of the 9 instances fail to be deployed to. Note that the deployment
+ may successfully deploy to some instances, even if the overall
+ deployment fails. This is the default deployment configuration if a
+ configuration isn't specified for either the deployment or the
+ deployment group.
+
+
+ To create a custom deployment configuration, call the create deployment
+ configuration operation.
+
+ :type ec_2_tag_filters: list
+ :param ec_2_tag_filters: The Amazon EC2 tags to filter on.
+
+ :type auto_scaling_groups: list
+ :param auto_scaling_groups: A list of associated Auto Scaling groups.
+
+ :type service_role_arn: string
+ :param service_role_arn: A service role ARN that allows AWS CodeDeploy
+ to act on the user's behalf when interacting with AWS services.
+
+ """
+ params = {
+ 'applicationName': application_name,
+ 'deploymentGroupName': deployment_group_name,
+ }
+ if deployment_config_name is not None:
+ params['deploymentConfigName'] = deployment_config_name
+ if ec_2_tag_filters is not None:
+ params['ec2TagFilters'] = ec_2_tag_filters
+ if auto_scaling_groups is not None:
+ params['autoScalingGroups'] = auto_scaling_groups
+ if service_role_arn is not None:
+ params['serviceRoleArn'] = service_role_arn
+ return self.make_request(action='CreateDeploymentGroup',
+ body=json.dumps(params))
+
+ def delete_application(self, application_name):
+ """
+ Deletes an application.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ """
+ params = {'applicationName': application_name, }
+ return self.make_request(action='DeleteApplication',
+ body=json.dumps(params))
+
+ def delete_deployment_config(self, deployment_config_name):
+ """
+ Deletes a deployment configuration.
+
+ A deployment configuration cannot be deleted if it is
+ currently in use. Also, predefined configurations cannot be
+ deleted.
+
+ :type deployment_config_name: string
+ :param deployment_config_name: The name of an existing deployment
+ configuration within the AWS user account.
+
+ """
+ params = {'deploymentConfigName': deployment_config_name, }
+ return self.make_request(action='DeleteDeploymentConfig',
+ body=json.dumps(params))
+
+ def delete_deployment_group(self, application_name,
+ deployment_group_name):
+ """
+ Deletes a deployment group.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ :type deployment_group_name: string
+ :param deployment_group_name: The name of an existing deployment group
+ for the specified application.
+
+ """
+ params = {
+ 'applicationName': application_name,
+ 'deploymentGroupName': deployment_group_name,
+ }
+ return self.make_request(action='DeleteDeploymentGroup',
+ body=json.dumps(params))
+
+ def get_application(self, application_name):
+ """
+ Gets information about an application.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ """
+ params = {'applicationName': application_name, }
+ return self.make_request(action='GetApplication',
+ body=json.dumps(params))
+
+ def get_application_revision(self, application_name, revision):
+ """
+ Gets information about an application revision.
+
+ :type application_name: string
+ :param application_name: The name of the application that corresponds
+ to the revision.
+
+ :type revision: dict
+ :param revision: Information about the application revision to get,
+ including the revision's type and its location.
+
+ """
+ params = {
+ 'applicationName': application_name,
+ 'revision': revision,
+ }
+ return self.make_request(action='GetApplicationRevision',
+ body=json.dumps(params))
+
+ def get_deployment(self, deployment_id):
+ """
+ Gets information about a deployment.
+
+ :type deployment_id: string
+ :param deployment_id: An existing deployment ID within the AWS user
+ account.
+
+ """
+ params = {'deploymentId': deployment_id, }
+ return self.make_request(action='GetDeployment',
+ body=json.dumps(params))
+
+ def get_deployment_config(self, deployment_config_name):
+ """
+ Gets information about a deployment configuration.
+
+ :type deployment_config_name: string
+ :param deployment_config_name: The name of an existing deployment
+ configuration within the AWS user account.
+
+ """
+ params = {'deploymentConfigName': deployment_config_name, }
+ return self.make_request(action='GetDeploymentConfig',
+ body=json.dumps(params))
+
+ def get_deployment_group(self, application_name, deployment_group_name):
+ """
+ Gets information about a deployment group.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ :type deployment_group_name: string
+ :param deployment_group_name: The name of an existing deployment group
+ for the specified application.
+
+ """
+ params = {
+ 'applicationName': application_name,
+ 'deploymentGroupName': deployment_group_name,
+ }
+ return self.make_request(action='GetDeploymentGroup',
+ body=json.dumps(params))
+
+ def get_deployment_instance(self, deployment_id, instance_id):
+ """
+ Gets information about an Amazon EC2 instance as part of a
+ deployment.
+
+ :type deployment_id: string
+ :param deployment_id: The unique ID of a deployment.
+
+ :type instance_id: string
+ :param instance_id: The unique ID of an Amazon EC2 instance in the
+ deployment's deployment group.
+
+ """
+ params = {
+ 'deploymentId': deployment_id,
+ 'instanceId': instance_id,
+ }
+ return self.make_request(action='GetDeploymentInstance',
+ body=json.dumps(params))
+
+ def list_application_revisions(self, application_name, sort_by=None,
+ sort_order=None, s_3_bucket=None,
+ s_3_key_prefix=None, deployed=None,
+ next_token=None):
+ """
+ Lists information about revisions for an application.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ :type sort_by: string
+ :param sort_by: The column name to sort the list results by:
+
+ + registerTime: Sort the list results by when the revisions were
+ registered with AWS CodeDeploy.
+ + firstUsedTime: Sort the list results by when the revisions were first
+ used by in a deployment.
+ + lastUsedTime: Sort the list results by when the revisions were last
+ used in a deployment.
+
+
+ If not specified or set to null, the results will be returned in an
+ arbitrary order.
+
+ :type sort_order: string
+ :param sort_order: The order to sort the list results by:
+
+ + ascending: Sort the list results in ascending order.
+ + descending: Sort the list results in descending order.
+
+
+ If not specified, the results will be sorted in ascending order.
+
+ If set to null, the results will be sorted in an arbitrary order.
+
+ :type s_3_bucket: string
+ :param s_3_bucket: A specific Amazon S3 bucket name to limit the search
+ for revisions.
+ If set to null, then all of the user's buckets will be searched.
+
+ :type s_3_key_prefix: string
+ :param s_3_key_prefix: A specific key prefix for the set of Amazon S3
+ objects to limit the search for revisions.
+
+ :type deployed: string
+ :param deployed:
+ Whether to list revisions based on whether the revision is the target
+ revision of an deployment group:
+
+
+ + include: List revisions that are target revisions of a deployment
+ group.
+ + exclude: Do not list revisions that are target revisions of a
+ deployment group.
+ + ignore: List all revisions, regardless of whether they are target
+ revisions of a deployment group.
+
+ :type next_token: string
+ :param next_token: An identifier that was returned from the previous
+ list application revisions call, which can be used to return the
+ next set of applications in the list.
+
+ """
+ params = {'applicationName': application_name, }
+ if sort_by is not None:
+ params['sortBy'] = sort_by
+ if sort_order is not None:
+ params['sortOrder'] = sort_order
+ if s_3_bucket is not None:
+ params['s3Bucket'] = s_3_bucket
+ if s_3_key_prefix is not None:
+ params['s3KeyPrefix'] = s_3_key_prefix
+ if deployed is not None:
+ params['deployed'] = deployed
+ if next_token is not None:
+ params['nextToken'] = next_token
+ return self.make_request(action='ListApplicationRevisions',
+ body=json.dumps(params))
+
+ def list_applications(self, next_token=None):
+ """
+ Lists the applications registered within the AWS user account.
+
+ :type next_token: string
+ :param next_token: An identifier that was returned from the previous
+ list applications call, which can be used to return the next set of
+ applications in the list.
+
+ """
+ params = {}
+ if next_token is not None:
+ params['nextToken'] = next_token
+ return self.make_request(action='ListApplications',
+ body=json.dumps(params))
+
+ def list_deployment_configs(self, next_token=None):
+ """
+ Lists the deployment configurations within the AWS user
+ account.
+
+ :type next_token: string
+ :param next_token: An identifier that was returned from the previous
+ list deployment configurations call, which can be used to return
+ the next set of deployment configurations in the list.
+
+ """
+ params = {}
+ if next_token is not None:
+ params['nextToken'] = next_token
+ return self.make_request(action='ListDeploymentConfigs',
+ body=json.dumps(params))
+
+ def list_deployment_groups(self, application_name, next_token=None):
+ """
+ Lists the deployment groups for an application registered
+ within the AWS user account.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ :type next_token: string
+ :param next_token: An identifier that was returned from the previous
+ list deployment groups call, which can be used to return the next
+ set of deployment groups in the list.
+
+ """
+ params = {'applicationName': application_name, }
+ if next_token is not None:
+ params['nextToken'] = next_token
+ return self.make_request(action='ListDeploymentGroups',
+ body=json.dumps(params))
+
+ def list_deployment_instances(self, deployment_id, next_token=None,
+ instance_status_filter=None):
+ """
+ Lists the Amazon EC2 instances for a deployment within the AWS
+ user account.
+
+ :type deployment_id: string
+ :param deployment_id: The unique ID of a deployment.
+
+ :type next_token: string
+ :param next_token: An identifier that was returned from the previous
+ list deployment instances call, which can be used to return the
+ next set of deployment instances in the list.
+
+ :type instance_status_filter: list
+ :param instance_status_filter:
+ A subset of instances to list, by status:
+
+
+ + Pending: Include in the resulting list those instances with pending
+ deployments.
+ + InProgress: Include in the resulting list those instances with in-
+ progress deployments.
+ + Succeeded: Include in the resulting list those instances with
+ succeeded deployments.
+ + Failed: Include in the resulting list those instances with failed
+ deployments.
+ + Skipped: Include in the resulting list those instances with skipped
+ deployments.
+ + Unknown: Include in the resulting list those instances with
+ deployments in an unknown state.
+
+ """
+ params = {'deploymentId': deployment_id, }
+ if next_token is not None:
+ params['nextToken'] = next_token
+ if instance_status_filter is not None:
+ params['instanceStatusFilter'] = instance_status_filter
+ return self.make_request(action='ListDeploymentInstances',
+ body=json.dumps(params))
+
+ def list_deployments(self, application_name=None,
+ deployment_group_name=None,
+ include_only_statuses=None, create_time_range=None,
+ next_token=None):
+ """
+ Lists the deployments under a deployment group for an
+ application registered within the AWS user account.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ :type deployment_group_name: string
+ :param deployment_group_name: The name of an existing deployment group
+ for the specified application.
+
+ :type include_only_statuses: list
+ :param include_only_statuses: A subset of deployments to list, by
+ status:
+
+ + Created: Include in the resulting list created deployments.
+ + Queued: Include in the resulting list queued deployments.
+ + In Progress: Include in the resulting list in-progress deployments.
+ + Succeeded: Include in the resulting list succeeded deployments.
+ + Failed: Include in the resulting list failed deployments.
+ + Aborted: Include in the resulting list aborted deployments.
+
+ :type create_time_range: dict
+ :param create_time_range: A deployment creation start- and end-time
+ range for returning a subset of the list of deployments.
+
+ :type next_token: string
+ :param next_token: An identifier that was returned from the previous
+ list deployments call, which can be used to return the next set of
+ deployments in the list.
+
+ """
+ params = {}
+ if application_name is not None:
+ params['applicationName'] = application_name
+ if deployment_group_name is not None:
+ params['deploymentGroupName'] = deployment_group_name
+ if include_only_statuses is not None:
+ params['includeOnlyStatuses'] = include_only_statuses
+ if create_time_range is not None:
+ params['createTimeRange'] = create_time_range
+ if next_token is not None:
+ params['nextToken'] = next_token
+ return self.make_request(action='ListDeployments',
+ body=json.dumps(params))
+
+ def register_application_revision(self, application_name, revision,
+ description=None):
+ """
+ Registers with AWS CodeDeploy a revision for the specified
+ application.
+
+ :type application_name: string
+ :param application_name: The name of an existing AWS CodeDeploy
+ application within the AWS user account.
+
+ :type description: string
+ :param description: A comment about the revision.
+
+ :type revision: dict
+ :param revision: Information about the application revision to
+ register, including the revision's type and its location.
+
+ """
+ params = {
+ 'applicationName': application_name,
+ 'revision': revision,
+ }
+ if description is not None:
+ params['description'] = description
+ return self.make_request(action='RegisterApplicationRevision',
+ body=json.dumps(params))
+
+ def stop_deployment(self, deployment_id):
+ """
+ Attempts to stop an ongoing deployment.
+
+ :type deployment_id: string
+ :param deployment_id: The unique ID of a deployment.
+
+ """
+ params = {'deploymentId': deployment_id, }
+ return self.make_request(action='StopDeployment',
+ body=json.dumps(params))
+
+ def update_application(self, application_name=None,
+ new_application_name=None):
+ """
+ Changes an existing application's name.
+
+ :type application_name: string
+ :param application_name: The current name of the application that you
+ want to change.
+
+ :type new_application_name: string
+ :param new_application_name: The new name that you want to change the
+ application to.
+
+ """
+ params = {}
+ if application_name is not None:
+ params['applicationName'] = application_name
+ if new_application_name is not None:
+ params['newApplicationName'] = new_application_name
+ return self.make_request(action='UpdateApplication',
+ body=json.dumps(params))
+
+ def update_deployment_group(self, application_name,
+ current_deployment_group_name,
+ new_deployment_group_name=None,
+ deployment_config_name=None,
+ ec_2_tag_filters=None,
+ auto_scaling_groups=None,
+ service_role_arn=None):
+ """
+ Changes information about an existing deployment group.
+
+ :type application_name: string
+ :param application_name: The application name corresponding to the
+ deployment group to update.
+
+ :type current_deployment_group_name: string
+ :param current_deployment_group_name: The current name of the existing
+ deployment group.
+
+ :type new_deployment_group_name: string
+ :param new_deployment_group_name: The new name of the deployment group,
+ if you want to change it.
+
+ :type deployment_config_name: string
+ :param deployment_config_name: The replacement deployment configuration
+ name to use, if you want to change it.
+
+ :type ec_2_tag_filters: list
+ :param ec_2_tag_filters: The replacement set of Amazon EC2 tags to
+ filter on, if you want to change them.
+
+ :type auto_scaling_groups: list
+ :param auto_scaling_groups: The replacement list of Auto Scaling groups
+ to be included in the deployment group, if you want to change them.
+
+ :type service_role_arn: string
+ :param service_role_arn: A replacement service role's ARN, if you want
+ to change it.
+
+ """
+ params = {
+ 'applicationName': application_name,
+ 'currentDeploymentGroupName': current_deployment_group_name,
+ }
+ if new_deployment_group_name is not None:
+ params['newDeploymentGroupName'] = new_deployment_group_name
+ if deployment_config_name is not None:
+ params['deploymentConfigName'] = deployment_config_name
+ if ec_2_tag_filters is not None:
+ params['ec2TagFilters'] = ec_2_tag_filters
+ if auto_scaling_groups is not None:
+ params['autoScalingGroups'] = auto_scaling_groups
+ if service_role_arn is not None:
+ params['serviceRoleArn'] = service_role_arn
+ return self.make_request(action='UpdateDeploymentGroup',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read().decode('utf-8')
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/cognito/identity/exceptions.py b/boto/cognito/identity/exceptions.py
index 179089df..b5c1236d 100644
--- a/boto/cognito/identity/exceptions.py
+++ b/boto/cognito/identity/exceptions.py
@@ -20,6 +20,10 @@ class ResourceConflictException(BotoServerError):
pass
+class DeveloperUserAlreadyRegisteredException(BotoServerError):
+ pass
+
+
class TooManyRequestsException(BotoServerError):
pass
diff --git a/boto/cognito/identity/layer1.py b/boto/cognito/identity/layer1.py
index 0a9c8e4e..a7363d5b 100644
--- a/boto/cognito/identity/layer1.py
+++ b/boto/cognito/identity/layer1.py
@@ -31,19 +31,30 @@ from boto.cognito.identity import exceptions
class CognitoIdentityConnection(AWSQueryConnection):
"""
Amazon Cognito
- Amazon Cognito is a web service that facilitates the delivery of
- scoped, temporary credentials to mobile devices or other untrusted
- environments. Amazon Cognito uniquely identifies a device or user
- and supplies the user with a consistent identity throughout the
- lifetime of an application.
-
- Amazon Cognito lets users authenticate with third-party identity
- providers (Facebook, Google, or Login with Amazon). As a
- developer, you decide which identity providers to trust. You can
- also choose to support unauthenticated access from your
- application. Your users are provided with Cognito tokens that
- uniquely identify their device and any information provided about
- third-party logins.
+ Amazon Cognito is a web service that delivers scoped temporary
+ credentials to mobile devices and other untrusted environments.
+ Amazon Cognito uniquely identifies a device and supplies the user
+ with a consistent identity over the lifetime of an application.
+
+ Using Amazon Cognito, you can enable authentication with one or
+ more third-party identity providers (Facebook, Google, or Login
+ with Amazon), and you can also choose to support unauthenticated
+ access from your app. Cognito delivers a unique identifier for
+ each user and acts as an OpenID token provider trusted by AWS
+ Security Token Service (STS) to access temporary, limited-
+ privilege AWS credentials.
+
+ To provide end-user credentials, first make an unsigned call to
+ GetId. If the end user is authenticated with one of the supported
+ identity providers, set the `Logins` map with the identity
+ provider token. `GetId` returns a unique identifier for the user.
+
+ Next, make an unsigned call to GetOpenIdToken, which returns the
+ OpenID token necessary to call STS and retrieve AWS credentials.
+ This call expects the same `Logins` map as the `GetId` call, as
+ well as the `IdentityID` originally returned by `GetId`. The token
+ returned by `GetOpenIdToken` can be passed to the STS operation
+ `AssumeRoleWithWebIdentity`_ to retrieve AWS credentials.
"""
APIVersion = "2014-06-30"
DefaultRegionName = "us-east-1"
@@ -55,6 +66,7 @@ class CognitoIdentityConnection(AWSQueryConnection):
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"ResourceConflictException": exceptions.ResourceConflictException,
+ "DeveloperUserAlreadyRegisteredException": exceptions.DeveloperUserAlreadyRegisteredException,
"TooManyRequestsException": exceptions.TooManyRequestsException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
@@ -80,11 +92,13 @@ class CognitoIdentityConnection(AWSQueryConnection):
def create_identity_pool(self, identity_pool_name,
allow_unauthenticated_identities,
- supported_login_providers=None):
+ supported_login_providers=None,
+ developer_provider_name=None,
+ open_id_connect_provider_ar_ns=None):
"""
Creates a new identity pool. The identity pool is a store of
user identity information that is specific to your AWS
- account.
+ account. The limit on identity pools is 60 per account.
:type identity_pool_name: string
:param identity_pool_name: A string that you provide.
@@ -97,6 +111,19 @@ class CognitoIdentityConnection(AWSQueryConnection):
:param supported_login_providers: Optional key:value pairs mapping
provider names to provider app IDs.
+ :type developer_provider_name: string
+ :param developer_provider_name: The "domain" by which Cognito will
+ refer to your users. This name acts as a placeholder that allows
+ your backend and the Cognito service to communicate about the
+ developer provider. For the `DeveloperProviderName`, you can use
+ letters as well as period ( `.`), underscore ( `_`), and dash (
+ `-`).
+ Once you have set a developer provider name, you cannot change it.
+ Please take care in setting this parameter.
+
+ :type open_id_connect_provider_ar_ns: list
+ :param open_id_connect_provider_ar_ns:
+
"""
params = {
'IdentityPoolName': identity_pool_name,
@@ -104,6 +131,10 @@ class CognitoIdentityConnection(AWSQueryConnection):
}
if supported_login_providers is not None:
params['SupportedLoginProviders'] = supported_login_providers
+ if developer_provider_name is not None:
+ params['DeveloperProviderName'] = developer_provider_name
+ if open_id_connect_provider_ar_ns is not None:
+ params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns
return self.make_request(action='CreateIdentityPool',
body=json.dumps(params))
@@ -146,8 +177,13 @@ class CognitoIdentityConnection(AWSQueryConnection):
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type logins: map
- :param logins: A set of optional name/value pairs that map provider
+ :param logins: A set of optional name-value pairs that map provider
names to provider tokens.
+ The available provider names for `Logins` are as follows:
+
+ + Facebook: `graph.facebook.com`
+ + Google: `accounts.google.com`
+ + Amazon: `www.amazon.com`
"""
params = {
@@ -162,15 +198,17 @@ class CognitoIdentityConnection(AWSQueryConnection):
def get_open_id_token(self, identity_id, logins=None):
"""
Gets an OpenID token, using a known Cognito ID. This known
- Cognito ID is returned from GetId. You can optionally add
+ Cognito ID is returned by GetId. You can optionally add
additional logins for the identity. Supplying multiple logins
creates an implicit link.
+ The OpenId token is valid for 15 minutes.
+
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
- :param logins: A set of optional name/value pairs that map provider
+ :param logins: A set of optional name-value pairs that map provider
names to provider tokens.
"""
@@ -180,6 +218,69 @@ class CognitoIdentityConnection(AWSQueryConnection):
return self.make_request(action='GetOpenIdToken',
body=json.dumps(params))
+ def get_open_id_token_for_developer_identity(self, identity_pool_id,
+ logins, identity_id=None,
+ token_duration=None):
+ """
+ Registers (or retrieves) a Cognito `IdentityId` and an OpenID
+ Connect token for a user authenticated by your backend
+ authentication process. Supplying multiple logins will create
+ an implicit linked account. You can only specify one developer
+ provider as part of the `Logins` map, which is linked to the
+ identity pool. The developer provider is the "domain" by which
+ Cognito will refer to your users.
+
+ You can use `GetOpenIdTokenForDeveloperIdentity` to create a
+ new identity and to link new logins (that is, user credentials
+ issued by a public provider or developer provider) to an
+ existing identity. When you want to create a new identity, the
+ `IdentityId` should be null. When you want to associate a new
+ login with an existing authenticated/unauthenticated identity,
+ you can do so by providing the existing `IdentityId`. This API
+ will create the identity in the specified `IdentityPoolId`.
+
+ :type identity_pool_id: string
+ :param identity_pool_id: An identity pool ID in the format REGION:GUID.
+
+ :type identity_id: string
+ :param identity_id: A unique identifier in the format REGION:GUID.
+
+ :type logins: map
+ :param logins: A set of optional name-value pairs that map provider
+ names to provider tokens. Each name-value pair represents a user
+ from a public provider or developer provider. If the user is from a
+ developer provider, the name-value pair will follow the syntax
+ `"developer_provider_name": "developer_user_identifier"`. The
+ developer provider is the "domain" by which Cognito will refer to
+ your users; you provided this domain while creating/updating the
+ identity pool. The developer user identifier is an identifier from
+ your backend that uniquely identifies a user. When you create an
+ identity pool, you can specify the supported logins.
+
+ :type token_duration: long
+ :param token_duration: The expiration time of the token, in seconds.
+ You can specify a custom expiration time for the token so that you
+ can cache it. If you don't provide an expiration time, the token is
+ valid for 15 minutes. You can exchange the token with Amazon STS
+ for temporary AWS credentials, which are valid for a maximum of one
+ hour. The maximum token duration you can set is 24 hours. You
+ should take care in setting the expiration time for a token, as
+ there are significant security implications: an attacker could use
+ a leaked token to access your AWS resources for the token's
+ duration.
+
+ """
+ params = {
+ 'IdentityPoolId': identity_pool_id,
+ 'Logins': logins,
+ }
+ if identity_id is not None:
+ params['IdentityId'] = identity_id
+ if token_duration is not None:
+ params['TokenDuration'] = token_duration
+ return self.make_request(action='GetOpenIdTokenForDeveloperIdentity',
+ body=json.dumps(params))
+
def list_identities(self, identity_pool_id, max_results, next_token=None):
"""
Lists the identities in a pool.
@@ -221,6 +322,138 @@ class CognitoIdentityConnection(AWSQueryConnection):
return self.make_request(action='ListIdentityPools',
body=json.dumps(params))
+ def lookup_developer_identity(self, identity_pool_id, identity_id=None,
+ developer_user_identifier=None,
+ max_results=None, next_token=None):
+ """
+ Retrieves the `IdentityID` associated with a
+ `DeveloperUserIdentifier` or the list of
+ `DeveloperUserIdentifier`s associated with an `IdentityId` for
+ an existing identity. Either `IdentityID` or
+ `DeveloperUserIdentifier` must not be null. If you supply only
+ one of these values, the other value will be searched in the
+ database and returned as a part of the response. If you supply
+ both, `DeveloperUserIdentifier` will be matched against
+ `IdentityID`. If the values are verified against the database,
+ the response returns both values and is the same as the
+ request. Otherwise a `ResourceConflictException` is thrown.
+
+ :type identity_pool_id: string
+ :param identity_pool_id: An identity pool ID in the format REGION:GUID.
+
+ :type identity_id: string
+ :param identity_id: A unique identifier in the format REGION:GUID.
+
+ :type developer_user_identifier: string
+ :param developer_user_identifier: A unique ID used by your backend
+ authentication process to identify a user. Typically, a developer
+ identity provider would issue many developer user identifiers, in
+ keeping with the number of users.
+
+ :type max_results: integer
+ :param max_results: The maximum number of identities to return.
+
+ :type next_token: string
+ :param next_token: A pagination token. The first call you make will
+ have `NextToken` set to null. After that the service will return
+ `NextToken` values as needed. For example, let's say you make a
+ request with `MaxResults` set to 10, and there are 20 matches in
+ the database. The service will return a pagination token as a part
+ of the response. This token can be used to call the API again and
+ get results starting from the 11th match.
+
+ """
+ params = {'IdentityPoolId': identity_pool_id, }
+ if identity_id is not None:
+ params['IdentityId'] = identity_id
+ if developer_user_identifier is not None:
+ params['DeveloperUserIdentifier'] = developer_user_identifier
+ if max_results is not None:
+ params['MaxResults'] = max_results
+ if next_token is not None:
+ params['NextToken'] = next_token
+ return self.make_request(action='LookupDeveloperIdentity',
+ body=json.dumps(params))
+
+ def merge_developer_identities(self, source_user_identifier,
+ destination_user_identifier,
+ developer_provider_name, identity_pool_id):
+ """
+ Merges two users having different `IdentityId`s, existing in
+ the same identity pool, and identified by the same developer
+ provider. You can use this action to request that discrete
+ users be merged and identified as a single user in the Cognito
+ environment. Cognito associates the given source user (
+ `SourceUserIdentifier`) with the `IdentityId` of the
+ `DestinationUserIdentifier`. Only developer-authenticated
+ users can be merged. If the users to be merged are associated
+ with the same public provider, but as two different users, an
+ exception will be thrown.
+
+ :type source_user_identifier: string
+ :param source_user_identifier: User identifier for the source user. The
+ value should be a `DeveloperUserIdentifier`.
+
+ :type destination_user_identifier: string
+ :param destination_user_identifier: User identifier for the destination
+ user. The value should be a `DeveloperUserIdentifier`.
+
+ :type developer_provider_name: string
+ :param developer_provider_name: The "domain" by which Cognito will
+ refer to your users. This is a (pseudo) domain name that you
+ provide while creating an identity pool. This name acts as a
+ placeholder that allows your backend and the Cognito service to
+ communicate about the developer provider. For the
+ `DeveloperProviderName`, you can use letters as well as period (.),
+ underscore (_), and dash (-).
+
+ :type identity_pool_id: string
+ :param identity_pool_id: An identity pool ID in the format REGION:GUID.
+
+ """
+ params = {
+ 'SourceUserIdentifier': source_user_identifier,
+ 'DestinationUserIdentifier': destination_user_identifier,
+ 'DeveloperProviderName': developer_provider_name,
+ 'IdentityPoolId': identity_pool_id,
+ }
+ return self.make_request(action='MergeDeveloperIdentities',
+ body=json.dumps(params))
+
+ def unlink_developer_identity(self, identity_id, identity_pool_id,
+ developer_provider_name,
+ developer_user_identifier):
+ """
+ Unlinks a `DeveloperUserIdentifier` from an existing identity.
+ Unlinked developer users will be considered new identities
+ next time they are seen. If, for a given Cognito identity, you
+ remove all federated identities as well as the developer user
+ identifier, the Cognito identity becomes inaccessible.
+
+ :type identity_id: string
+ :param identity_id: A unique identifier in the format REGION:GUID.
+
+ :type identity_pool_id: string
+ :param identity_pool_id: An identity pool ID in the format REGION:GUID.
+
+ :type developer_provider_name: string
+ :param developer_provider_name: The "domain" by which Cognito will
+ refer to your users.
+
+ :type developer_user_identifier: string
+ :param developer_user_identifier: A unique ID used by your backend
+ authentication process to identify a user.
+
+ """
+ params = {
+ 'IdentityId': identity_id,
+ 'IdentityPoolId': identity_pool_id,
+ 'DeveloperProviderName': developer_provider_name,
+ 'DeveloperUserIdentifier': developer_user_identifier,
+ }
+ return self.make_request(action='UnlinkDeveloperIdentity',
+ body=json.dumps(params))
+
def unlink_identity(self, identity_id, logins, logins_to_remove):
"""
Unlinks a federated identity from an existing account.
@@ -232,7 +465,7 @@ class CognitoIdentityConnection(AWSQueryConnection):
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
- :param logins: A set of optional name/value pairs that map provider
+ :param logins: A set of optional name-value pairs that map provider
names to provider tokens.
:type logins_to_remove: list
@@ -249,7 +482,9 @@ class CognitoIdentityConnection(AWSQueryConnection):
def update_identity_pool(self, identity_pool_id, identity_pool_name,
allow_unauthenticated_identities,
- supported_login_providers=None):
+ supported_login_providers=None,
+ developer_provider_name=None,
+ open_id_connect_provider_ar_ns=None):
"""
Updates a user pool.
@@ -267,6 +502,13 @@ class CognitoIdentityConnection(AWSQueryConnection):
:param supported_login_providers: Optional key:value pairs mapping
provider names to provider app IDs.
+ :type developer_provider_name: string
+ :param developer_provider_name: The "domain" by which Cognito will
+ refer to your users.
+
+ :type open_id_connect_provider_ar_ns: list
+ :param open_id_connect_provider_ar_ns:
+
"""
params = {
'IdentityPoolId': identity_pool_id,
@@ -275,6 +517,10 @@ class CognitoIdentityConnection(AWSQueryConnection):
}
if supported_login_providers is not None:
params['SupportedLoginProviders'] = supported_login_providers
+ if developer_provider_name is not None:
+ params['DeveloperProviderName'] = developer_provider_name
+ if open_id_connect_provider_ar_ns is not None:
+ params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns
return self.make_request(action='UpdateIdentityPool',
body=json.dumps(params))
diff --git a/boto/cognito/sync/exceptions.py b/boto/cognito/sync/exceptions.py
index d64fb278..3e83c3ca 100644
--- a/boto/cognito/sync/exceptions.py
+++ b/boto/cognito/sync/exceptions.py
@@ -30,6 +30,10 @@ class ResourceConflictException(BotoServerError):
pass
+class InvalidConfigurationException(BotoServerError):
+ pass
+
+
class TooManyRequestsException(BotoServerError):
pass
diff --git a/boto/cognito/sync/layer1.py b/boto/cognito/sync/layer1.py
index 545af5eb..59e9d953 100644
--- a/boto/cognito/sync/layer1.py
+++ b/boto/cognito/sync/layer1.py
@@ -39,6 +39,11 @@ class CognitoSyncConnection(AWSAuthConnection):
user ID and credentials. User data is persisted in a dataset that
can store up to 1 MB of key-value pairs, and you can have up to 20
datasets per user identity.
+
+ With Amazon Cognito Sync, the data stored for each identity is
+ accessible only to credentials assigned to that identity. In order
+ to use the Cognito Sync service, you need to make API calls using
+ credentials retrieved with `Amazon Cognito Identity service`_.
"""
APIVersion = "2014-06-30"
DefaultRegionName = "us-east-1"
@@ -48,6 +53,7 @@ class CognitoSyncConnection(AWSAuthConnection):
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"ResourceConflictException": exceptions.ResourceConflictException,
+ "InvalidConfigurationException": exceptions.InvalidConfigurationException,
"TooManyRequestsException": exceptions.TooManyRequestsException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
@@ -94,6 +100,7 @@ class CognitoSyncConnection(AWSAuthConnection):
(dot).
"""
+
uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format(
identity_pool_id, identity_id, dataset_name)
return self.make_request('DELETE', uri, expected_status=200)
@@ -101,6 +108,11 @@ class CognitoSyncConnection(AWSAuthConnection):
def describe_dataset(self, identity_pool_id, identity_id, dataset_name):
"""
Gets metadata about a dataset by identity and dataset name.
+ The credentials used to make this API call need to have access
+ to the identity data. With Amazon Cognito Sync, each identity
+ has access only to its own data. You should use Amazon Cognito
+ Identity service to retrieve the credentials necessary to make
+ this API call.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
@@ -118,6 +130,7 @@ class CognitoSyncConnection(AWSAuthConnection):
(dot).
"""
+
uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format(
identity_pool_id, identity_id, dataset_name)
return self.make_request('GET', uri, expected_status=200)
@@ -133,6 +146,7 @@ class CognitoSyncConnection(AWSAuthConnection):
Cognito. GUID generation is unique within a region.
"""
+
uri = '/identitypools/{0}'.format(identity_pool_id)
return self.make_request('GET', uri, expected_status=200)
@@ -152,14 +166,34 @@ class CognitoSyncConnection(AWSAuthConnection):
Cognito. GUID generation is unique within a region.
"""
+
uri = '/identitypools/{0}/identities/{1}'.format(
identity_pool_id, identity_id)
return self.make_request('GET', uri, expected_status=200)
+ def get_identity_pool_configuration(self, identity_pool_id):
+ """
+ Gets the configuration settings of an identity pool.
+
+ :type identity_pool_id: string
+ :param identity_pool_id: A name-spaced GUID (for example, us-
+ east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
+ Cognito. This is the ID of the pool for which to return a
+ configuration.
+
+ """
+
+ uri = '/identitypools/{0}/configuration'.format(identity_pool_id)
+ return self.make_request('GET', uri, expected_status=200)
+
def list_datasets(self, identity_pool_id, identity_id, next_token=None,
max_results=None):
"""
- Lists datasets for an identity.
+ Lists datasets for an identity. The credentials used to make
+ this API call need to have access to the identity data. With
+ Amazon Cognito Sync, each identity has access only to its own
+ data. You should use Amazon Cognito Identity service to
+ retrieve the credentials necessary to make this API call.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
@@ -179,12 +213,19 @@ class CognitoSyncConnection(AWSAuthConnection):
:param max_results: The maximum number of results to be returned.
"""
+
uri = '/identitypools/{0}/identities/{1}/datasets'.format(
identity_pool_id, identity_id)
params = {}
headers = {}
+ query_params = {}
+ if next_token is not None:
+ query_params['nextToken'] = next_token
+ if max_results is not None:
+ query_params['maxResults'] = max_results
return self.make_request('GET', uri, expected_status=200,
- data=json.dumps(params), headers=headers)
+ data=json.dumps(params), headers=headers,
+ params=query_params)
def list_identity_pool_usage(self, next_token=None, max_results=None):
"""
@@ -198,18 +239,29 @@ class CognitoSyncConnection(AWSAuthConnection):
:param max_results: The maximum number of results to be returned.
"""
+
uri = '/identitypools'
params = {}
headers = {}
+ query_params = {}
+ if next_token is not None:
+ query_params['nextToken'] = next_token
+ if max_results is not None:
+ query_params['maxResults'] = max_results
return self.make_request('GET', uri, expected_status=200,
- data=json.dumps(params), headers=headers)
+ data=json.dumps(params), headers=headers,
+ params=query_params)
def list_records(self, identity_pool_id, identity_id, dataset_name,
last_sync_count=None, next_token=None, max_results=None,
sync_session_token=None):
"""
Gets paginated records, optionally changed after a particular
- sync count for a dataset and identity.
+ sync count for a dataset and identity. The credentials used to
+ make this API call need to have access to the identity data.
+ With Amazon Cognito Sync, each identity has access only to its
+ own data. You should use Amazon Cognito Identity service to
+ retrieve the credentials necessary to make this API call.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
@@ -241,19 +293,142 @@ class CognitoSyncConnection(AWSAuthConnection):
ID, and expiration.
"""
+
uri = '/identitypools/{0}/identities/{1}/datasets/{2}/records'.format(
identity_pool_id, identity_id, dataset_name)
params = {}
headers = {}
+ query_params = {}
+ if last_sync_count is not None:
+ query_params['lastSyncCount'] = last_sync_count
+ if next_token is not None:
+ query_params['nextToken'] = next_token
+ if max_results is not None:
+ query_params['maxResults'] = max_results
+ if sync_session_token is not None:
+ query_params['syncSessionToken'] = sync_session_token
return self.make_request('GET', uri, expected_status=200,
- data=json.dumps(params), headers=headers)
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def register_device(self, identity_pool_id, identity_id, platform, token):
+ """
+ Registers a device to receive push sync notifications.
+
+ :type identity_pool_id: string
+ :param identity_pool_id: A name-spaced GUID (for example, us-
+ east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
+ Cognito. Here, the ID of the pool that the identity belongs to.
+
+ :type identity_id: string
+ :param identity_id: The unique ID for this identity.
+
+ :type platform: string
+ :param platform: The SNS platform type (e.g. GCM, SDM, APNS,
+ APNS_SANDBOX).
+
+ :type token: string
+ :param token: The push token.
+
+ """
+
+ uri = '/identitypools/{0}/identity/{1}/device'.format(
+ identity_pool_id, identity_id)
+ params = {'Platform': platform, 'Token': token, }
+ headers = {}
+ query_params = {}
+ return self.make_request('POST', uri, expected_status=200,
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def set_identity_pool_configuration(self, identity_pool_id,
+ push_sync=None):
+ """
+ Sets the necessary configuration for push sync.
+
+ :type identity_pool_id: string
+ :param identity_pool_id: A name-spaced GUID (for example, us-
+ east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
+ Cognito. This is the ID of the pool to modify.
+
+ :type push_sync: dict
+ :param push_sync: Configuration options to be applied to the identity
+ pool.
+
+ """
+
+ uri = '/identitypools/{0}/configuration'.format(identity_pool_id)
+ params = {}
+ headers = {}
+ query_params = {}
+ if push_sync is not None:
+ params['PushSync'] = push_sync
+ return self.make_request('POST', uri, expected_status=200,
+ data=json.dumps(params), headers=headers,
+ params=query_params)
+
+ def subscribe_to_dataset(self, identity_pool_id, identity_id,
+ dataset_name, device_id):
+ """
+ Subscribes to receive notifications when a dataset is modified
+ by another device.
+
+ :type identity_pool_id: string
+ :param identity_pool_id: A name-spaced GUID (for example, us-
+ east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
+ Cognito. The ID of the pool to which the identity belongs.
+
+ :type identity_id: string
+ :param identity_id: Unique ID for this identity.
+
+ :type dataset_name: string
+ :param dataset_name: The name of the dataset to subcribe to.
+
+ :type device_id: string
+ :param device_id: The unique ID generated for this device by Cognito.
+
+ """
+
+ uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format(
+ identity_pool_id, identity_id, dataset_name, device_id)
+ return self.make_request('POST', uri, expected_status=200)
+
+ def unsubscribe_from_dataset(self, identity_pool_id, identity_id,
+ dataset_name, device_id):
+ """
+ Unsubscribe from receiving notifications when a dataset is
+ modified by another device.
+
+ :type identity_pool_id: string
+ :param identity_pool_id: A name-spaced GUID (for example, us-
+ east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
+ Cognito. The ID of the pool to which this identity belongs.
+
+ :type identity_id: string
+ :param identity_id: Unique ID for this identity.
+
+ :type dataset_name: string
+ :param dataset_name: The name of the dataset from which to unsubcribe.
+
+ :type device_id: string
+ :param device_id: The unique ID generated for this device by Cognito.
+
+ """
+
+ uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format(
+ identity_pool_id, identity_id, dataset_name, device_id)
+ return self.make_request('DELETE', uri, expected_status=200)
def update_records(self, identity_pool_id, identity_id, dataset_name,
- sync_session_token, record_patches=None,
- client_context=None):
+ sync_session_token, device_id=None,
+ record_patches=None, client_context=None):
"""
Posts updates to records and add and delete records for a
- dataset and user.
+ dataset and user. The credentials used to make this API call
+ need to have access to the identity data. With Amazon Cognito
+ Sync, each identity has access only to its own data. You
+ should use Amazon Cognito Identity service to retrieve the
+ credentials necessary to make this API call.
:type identity_pool_id: string
:param identity_pool_id: A name-spaced GUID (for example, us-
@@ -270,27 +445,39 @@ class CognitoSyncConnection(AWSAuthConnection):
characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.'
(dot).
+ :type device_id: string
+ :param device_id: The unique ID generated for this device by Cognito.
+
:type record_patches: list
- :param record_patches:
+ :param record_patches: A list of patch operations.
:type sync_session_token: string
:param sync_session_token: The SyncSessionToken returned by a previous
call to ListRecords for this dataset and identity.
:type client_context: string
- :param client_context:
+ :param client_context: Intended to supply a device ID that will
+ populate the `lastModifiedBy` field referenced in other methods.
+ The `ClientContext` field is not yet implemented.
"""
+
uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format(
identity_pool_id, identity_id, dataset_name)
params = {'SyncSessionToken': sync_session_token, }
headers = {}
+ query_params = {}
+ if device_id is not None:
+ params['DeviceId'] = device_id
if record_patches is not None:
params['RecordPatches'] = record_patches
if client_context is not None:
headers['x-amz-Client-Context'] = client_context
+ if client_context is not None:
+ headers['x-amz-Client-Context'] = client_context
return self.make_request('POST', uri, expected_status=200,
- data=json.dumps(params), headers=headers)
+ data=json.dumps(params), headers=headers,
+ params=query_params)
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
diff --git a/boto/configservice/__init__.py b/boto/configservice/__init__.py
new file mode 100644
index 00000000..dc2e26a9
--- /dev/null
+++ b/boto/configservice/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo, get_regions
+
+
+def regions():
+ """
+ Get all available regions for the AWS Config service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.kms.layer1 import ConfigServiceConnection
+ return get_regions('configservice', connection_cls=ConfigServiceConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/configservice/exceptions.py b/boto/configservice/exceptions.py
new file mode 100644
index 00000000..58aa550f
--- /dev/null
+++ b/boto/configservice/exceptions.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.exception import BotoServerError
+
+
+class InvalidLimitException(BotoServerError):
+ pass
+
+
+class NoSuchBucketException(BotoServerError):
+ pass
+
+
+class InvalidSNSTopicARNException(BotoServerError):
+ pass
+
+
+class ResourceNotDiscoveredException(BotoServerError):
+ pass
+
+
+class MaxNumberOfDeliveryChannelsExceededException(BotoServerError):
+ pass
+
+
+class LastDeliveryChannelDeleteFailedException(BotoServerError):
+ pass
+
+
+class InsufficientDeliveryPolicyException(BotoServerError):
+ pass
+
+
+class InvalidRoleException(BotoServerError):
+ pass
+
+
+class InvalidTimeRangeException(BotoServerError):
+ pass
+
+
+class NoSuchDeliveryChannelException(BotoServerError):
+ pass
+
+
+class NoSuchConfigurationRecorderException(BotoServerError):
+ pass
+
+
+class InvalidS3KeyPrefixException(BotoServerError):
+ pass
+
+
+class InvalidDeliveryChannelNameException(BotoServerError):
+ pass
+
+
+class NoRunningConfigurationRecorderException(BotoServerError):
+ pass
+
+
+class ValidationException(BotoServerError):
+ pass
+
+
+class NoAvailableConfigurationRecorderException(BotoServerError):
+ pass
+
+
+class InvalidNextTokenException(BotoServerError):
+ pass
+
+
+class InvalidConfigurationRecorderNameException(BotoServerError):
+ pass
+
+
+class NoAvailableDeliveryChannelException(BotoServerError):
+ pass
+
+
+class MaxNumberOfConfigurationRecordersExceededException(BotoServerError):
+ pass
diff --git a/boto/configservice/layer1.py b/boto/configservice/layer1.py
new file mode 100644
index 00000000..fe598d98
--- /dev/null
+++ b/boto/configservice/layer1.py
@@ -0,0 +1,381 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.compat import json
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.configservice import exceptions
+
+
+class ConfigServiceConnection(AWSQueryConnection):
+ """
+ AWS Config
+ AWS Config provides a way to keep track of the configurations of
+ all the AWS resources associated with your AWS account. You can
+ use AWS Config to get the current and historical configurations of
+ each AWS resource and also to get information about the
+ relationship between the resources. An AWS resource can be an
+ Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store
+ (EBS) volume, an Elastic network Interface (ENI), or a security
+ group. For a complete list of resources currently supported by AWS
+ Config, see `Supported AWS Resources`_.
+
+ You can access and manage AWS Config through the AWS Management
+ Console, the AWS Command Line Interface (AWS CLI), the AWS Config
+ API, or the AWS SDKs for AWS Config
+
+ This reference guide contains documentation for the AWS Config API
+ and the AWS CLI commands that you can use to manage AWS Config.
+
+ The AWS Config API uses the Signature Version 4 protocol for
+ signing requests. For more information about how to sign a request
+ with this protocol, see `Signature Version 4 Signing Process`_.
+
+ For detailed information about AWS Config features and their
+ associated actions or commands, as well as how to work with AWS
+ Management Console, see `What Is AWS Config?`_ in the AWS Config
+ Developer Guide .
+ """
+ APIVersion = "2014-11-12"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "config.us-east-1.amazonaws.com"
+ ServiceName = "ConfigService"
+ TargetPrefix = "StarlingDoveService"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "InvalidLimitException": exceptions.InvalidLimitException,
+ "NoSuchBucketException": exceptions.NoSuchBucketException,
+ "InvalidSNSTopicARNException": exceptions.InvalidSNSTopicARNException,
+ "ResourceNotDiscoveredException": exceptions.ResourceNotDiscoveredException,
+ "MaxNumberOfDeliveryChannelsExceededException": exceptions.MaxNumberOfDeliveryChannelsExceededException,
+ "LastDeliveryChannelDeleteFailedException": exceptions.LastDeliveryChannelDeleteFailedException,
+ "InsufficientDeliveryPolicyException": exceptions.InsufficientDeliveryPolicyException,
+ "InvalidRoleException": exceptions.InvalidRoleException,
+ "InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
+ "NoSuchDeliveryChannelException": exceptions.NoSuchDeliveryChannelException,
+ "NoSuchConfigurationRecorderException": exceptions.NoSuchConfigurationRecorderException,
+ "InvalidS3KeyPrefixException": exceptions.InvalidS3KeyPrefixException,
+ "InvalidDeliveryChannelNameException": exceptions.InvalidDeliveryChannelNameException,
+ "NoRunningConfigurationRecorderException": exceptions.NoRunningConfigurationRecorderException,
+ "ValidationException": exceptions.ValidationException,
+ "NoAvailableConfigurationRecorderException": exceptions.NoAvailableConfigurationRecorderException,
+ "InvalidNextTokenException": exceptions.InvalidNextTokenException,
+ "InvalidConfigurationRecorderNameException": exceptions.InvalidConfigurationRecorderNameException,
+ "NoAvailableDeliveryChannelException": exceptions.NoAvailableDeliveryChannelException,
+ "MaxNumberOfConfigurationRecordersExceededException": exceptions.MaxNumberOfConfigurationRecordersExceededException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs or kwargs['host'] is None:
+ kwargs['host'] = region.endpoint
+
+ super(ConfigServiceConnection, self).__init__(**kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def delete_delivery_channel(self, delivery_channel_name):
+ """
+ Deletes the specified delivery channel.
+
+ The delivery channel cannot be deleted if it is the only
+ delivery channel and the configuration recorder is still
+ running. To delete the delivery channel, stop the running
+ configuration recorder using the StopConfigurationRecorder
+ action.
+
+ :type delivery_channel_name: string
+ :param delivery_channel_name: The name of the delivery channel to
+ delete.
+
+ """
+ params = {'DeliveryChannelName': delivery_channel_name, }
+ return self.make_request(action='DeleteDeliveryChannel',
+ body=json.dumps(params))
+
+ def deliver_config_snapshot(self, delivery_channel_name):
+ """
+ Schedules delivery of a configuration snapshot to the Amazon
+ S3 bucket in the specified delivery channel. After the
+ delivery has started, AWS Config sends following notifications
+ using an Amazon SNS topic that you have specified.
+
+
+ + Notification of starting the delivery.
+ + Notification of delivery completed, if the delivery was
+ successfully completed.
+ + Notification of delivery failure, if the delivery failed to
+ complete.
+
+ :type delivery_channel_name: string
+ :param delivery_channel_name: The name of the delivery channel through
+ which the snapshot is delivered.
+
+ """
+ params = {'deliveryChannelName': delivery_channel_name, }
+ return self.make_request(action='DeliverConfigSnapshot',
+ body=json.dumps(params))
+
+ def describe_configuration_recorder_status(self,
+ configuration_recorder_names=None):
+ """
+ Returns the current status of the specified configuration
+ recorder. If a configuration recorder is not specified, this
+ action returns the status of all configuration recorder
+ associated with the account.
+
+ :type configuration_recorder_names: list
+ :param configuration_recorder_names: The name(s) of the configuration
+ recorder. If the name is not specified, the action returns the
+ current status of all the configuration recorders associated with
+ the account.
+
+ """
+ params = {}
+ if configuration_recorder_names is not None:
+ params['ConfigurationRecorderNames'] = configuration_recorder_names
+ return self.make_request(action='DescribeConfigurationRecorderStatus',
+ body=json.dumps(params))
+
+ def describe_configuration_recorders(self,
+ configuration_recorder_names=None):
+ """
+ Returns the name of one or more specified configuration
+ recorders. If the recorder name is not specified, this action
+ returns the names of all the configuration recorders
+ associated with the account.
+
+ :type configuration_recorder_names: list
+ :param configuration_recorder_names: A list of configuration recorder
+ names.
+
+ """
+ params = {}
+ if configuration_recorder_names is not None:
+ params['ConfigurationRecorderNames'] = configuration_recorder_names
+ return self.make_request(action='DescribeConfigurationRecorders',
+ body=json.dumps(params))
+
+ def describe_delivery_channel_status(self, delivery_channel_names=None):
+ """
+ Returns the current status of the specified delivery channel.
+ If a delivery channel is not specified, this action returns
+ the current status of all delivery channels associated with
+ the account.
+
+ :type delivery_channel_names: list
+ :param delivery_channel_names: A list of delivery channel names.
+
+ """
+ params = {}
+ if delivery_channel_names is not None:
+ params['DeliveryChannelNames'] = delivery_channel_names
+ return self.make_request(action='DescribeDeliveryChannelStatus',
+ body=json.dumps(params))
+
+ def describe_delivery_channels(self, delivery_channel_names=None):
+ """
+ Returns details about the specified delivery channel. If a
+ delivery channel is not specified, this action returns the
+ details of all delivery channels associated with the account.
+
+ :type delivery_channel_names: list
+ :param delivery_channel_names: A list of delivery channel names.
+
+ """
+ params = {}
+ if delivery_channel_names is not None:
+ params['DeliveryChannelNames'] = delivery_channel_names
+ return self.make_request(action='DescribeDeliveryChannels',
+ body=json.dumps(params))
+
+ def get_resource_config_history(self, resource_type, resource_id,
+ later_time=None, earlier_time=None,
+ chronological_order=None, limit=None,
+ next_token=None):
+ """
+ Returns a list of configuration items for the specified
+ resource. The list contains details about each state of the
+ resource during the specified time interval. You can specify a
+ `limit` on the number of results returned on the page. If a
+ limit is specified, a `nextToken` is returned as part of the
+ result that you can use to continue this request.
+
+ :type resource_type: string
+ :param resource_type: The resource type.
+
+ :type resource_id: string
+ :param resource_id: The ID of the resource (for example., `sg-xxxxxx`).
+
+ :type later_time: timestamp
+ :param later_time: The time stamp that indicates a later time. If not
+ specified, current time is taken.
+
+ :type earlier_time: timestamp
+ :param earlier_time: The time stamp that indicates an earlier time. If
+ not specified, the action returns paginated results that contain
+ configuration items that start from when the first configuration
+ item was recorded.
+
+ :type chronological_order: string
+ :param chronological_order: The chronological order for configuration
+ items listed. By default the results are listed in reverse
+ chronological order.
+
+ :type limit: integer
+ :param limit: The maximum number of configuration items returned in
+ each page. The default is 10. You cannot specify a limit greater
+ than 100.
+
+ :type next_token: string
+ :param next_token: An optional parameter used for pagination of the
+ results.
+
+ """
+ params = {
+ 'resourceType': resource_type,
+ 'resourceId': resource_id,
+ }
+ if later_time is not None:
+ params['laterTime'] = later_time
+ if earlier_time is not None:
+ params['earlierTime'] = earlier_time
+ if chronological_order is not None:
+ params['chronologicalOrder'] = chronological_order
+ if limit is not None:
+ params['limit'] = limit
+ if next_token is not None:
+ params['nextToken'] = next_token
+ return self.make_request(action='GetResourceConfigHistory',
+ body=json.dumps(params))
+
+ def put_configuration_recorder(self, configuration_recorder):
+ """
+ Creates a new configuration recorder to record the resource
+ configurations.
+
+ You can use this action to change the role ( `roleARN`) of an
+ existing recorder. To change the role, call the action on the
+ existing configuration recorder and specify a role.
+
+ :type configuration_recorder: dict
+ :param configuration_recorder: The configuration recorder object that
+ records each configuration change made to the resources.
+
+ """
+ params = {'ConfigurationRecorder': configuration_recorder, }
+ return self.make_request(action='PutConfigurationRecorder',
+ body=json.dumps(params))
+
+ def put_delivery_channel(self, delivery_channel):
+ """
+ Creates a new delivery channel object to deliver the
+ configuration information to an Amazon S3 bucket, and to an
+ Amazon SNS topic.
+
+ You can use this action to change the Amazon S3 bucket or an
+ Amazon SNS topic of the existing delivery channel. To change
+ the Amazon S3 bucket or an Amazon SNS topic, call this action
+ and specify the changed values for the S3 bucket and the SNS
+ topic. If you specify a different value for either the S3
+ bucket or the SNS topic, this action will keep the existing
+ value for the parameter that is not changed.
+
+ :type delivery_channel: dict
+ :param delivery_channel: The configuration delivery channel object that
+ delivers the configuration information to an Amazon S3 bucket, and
+ to an Amazon SNS topic.
+
+ """
+ params = {'DeliveryChannel': delivery_channel, }
+ return self.make_request(action='PutDeliveryChannel',
+ body=json.dumps(params))
+
+ def start_configuration_recorder(self, configuration_recorder_name):
+ """
+ Starts recording configurations of all the resources
+ associated with the account.
+
+ You must have created at least one delivery channel to
+ successfully start the configuration recorder.
+
+ :type configuration_recorder_name: string
+ :param configuration_recorder_name: The name of the recorder object
+ that records each configuration change made to the resources.
+
+ """
+ params = {
+ 'ConfigurationRecorderName': configuration_recorder_name,
+ }
+ return self.make_request(action='StartConfigurationRecorder',
+ body=json.dumps(params))
+
+ def stop_configuration_recorder(self, configuration_recorder_name):
+ """
+ Stops recording configurations of all the resources associated
+ with the account.
+
+ :type configuration_recorder_name: string
+ :param configuration_recorder_name: The name of the recorder object
+ that records each configuration change made to the resources.
+
+ """
+ params = {
+ 'ConfigurationRecorderName': configuration_recorder_name,
+ }
+ return self.make_request(action='StopConfigurationRecorder',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read().decode('utf-8')
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/dynamodb2/layer1.py b/boto/dynamodb2/layer1.py
index fd98d2f9..bd1eb1ea 100644
--- a/boto/dynamodb2/layer1.py
+++ b/boto/dynamodb2/layer1.py
@@ -31,7 +31,9 @@ from boto.dynamodb2 import exceptions
class DynamoDBConnection(AWSQueryConnection):
"""
- Amazon DynamoDB **Overview**
+ Amazon DynamoDB
+ **Overview**
+
This is the Amazon DynamoDB API Reference. This guide provides
descriptions and samples of the low-level DynamoDB API. For
information about DynamoDB application development, go to the
@@ -57,7 +59,6 @@ class DynamoDBConnection(AWSQueryConnection):
**Managing Tables**
-
+ CreateTable - Creates a table with user-specified provisioned
throughput settings. You must designate one attribute as the hash
primary key for the table; you can optionally designate a second
@@ -75,14 +76,12 @@ class DynamoDBConnection(AWSQueryConnection):
+ DeleteTable - Deletes a table and all of its indexes.
-
For conceptual information about managing tables, go to `Working
with Tables`_ in the Amazon DynamoDB Developer Guide .
**Reading Data**
-
+ GetItem - Returns a set of attributes for the item that has a
given primary key. By default, GetItem performs an eventually
consistent read; however, applications can specify a strongly
@@ -106,7 +105,6 @@ class DynamoDBConnection(AWSQueryConnection):
case that requires predictable performance.
-
For conceptual information about reading data, go to `Working with
Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB
Developer Guide .
@@ -114,7 +112,6 @@ class DynamoDBConnection(AWSQueryConnection):
**Modifying Data**
-
+ PutItem - Creates a new item, or replaces an existing item with
a new item (including all the attributes). By default, if an item
in the table already exists with the same primary key, the new
@@ -136,7 +133,6 @@ class DynamoDBConnection(AWSQueryConnection):
MB.
-
For conceptual information about modifying data, go to `Working
with Items`_ and `Query and Scan Operations`_ in the Amazon
DynamoDB Developer Guide .
@@ -294,6 +290,11 @@ class DynamoDBConnection(AWSQueryConnection):
delete requests. Individual items to be written can be as
large as 400 KB.
+
+ BatchWriteItem cannot update items. To update items, use the
+ UpdateItem API.
+
+
The individual PutItem and DeleteItem operations specified in
BatchWriteItem are atomic; however BatchWriteItem as a whole
is not. If any requested operations fail because the table's
@@ -433,10 +434,11 @@ class DynamoDBConnection(AWSQueryConnection):
DynamoDB sets the TableStatus to `ACTIVE`. You can perform
read and write operations only on an `ACTIVE` table.
- If you want to create multiple tables with secondary indexes
- on them, you must create them sequentially. Only one table
- with secondary indexes can be in the `CREATING` state at any
- given time.
+ You can optionally define secondary indexes on the new table,
+ as part of the CreateTable operation. If you want to create
+ multiple tables with secondary indexes on them, you must
+ create the tables sequentially. Only one table with secondary
+ indexes can be in the `CREATING` state at any given time.
You can use the DescribeTable API to check the table status.
@@ -633,8 +635,8 @@ class DynamoDBConnection(AWSQueryConnection):
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
- `a` is greater than `A`, and `aa` is greater than `B`. For a list
- of code values, see
+ `a` is greater than `A`, and `a` is greater than `B`. For a list of
+ code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
@@ -687,9 +689,19 @@ class DynamoDBConnection(AWSQueryConnection):
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
- datatypes, including lists and maps.
+ datatypes, including lists and maps. This operator tests for the
+ existence of an attribute, not its data type. If the data type of
+ attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
+ result is a Boolean true . This result is because the attribute "
+ `a`" exists; its data type is not relevant to the `NOT_NULL`
+ comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
- datatypes, including lists and maps.
+ datatypes, including lists and maps. This operator tests for the
+ nonexistence of an attribute, not its data type. If the data type
+ of attribute " `a`" is null, and you evaluate it using `NULL`, the
+ result is a Boolean false . This is because the attribute " `a`"
+ exists; its data type is not relevant to the `NULL` comparison
+ operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
@@ -760,7 +772,7 @@ class DynamoDBConnection(AWSQueryConnection):
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
-
+ Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
@@ -817,24 +829,25 @@ class DynamoDBConnection(AWSQueryConnection):
returned.
:type condition_expression: string
- :param condition_expression:
- A condition that must be satisfied in order for a conditional
- DeleteItem to succeed.
-
+ :param condition_expression: A condition that must be satisfied in
+ order for a conditional DeleteItem to succeed.
An expression can contain any of the following:
- + Boolean functions: `ATTRIBUTE_EXIST | CONTAINS | BEGINS_WITH`
+ + Boolean functions: `attribute_exists | attribute_not_exists |
+ contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
- + Logical operators: `NOT | AND | OR`
+ + Logical operators: `AND | OR | NOT`
- :type expression_attribute_names: map
- :param expression_attribute_names:
- One or more substitution tokens for simplifying complex expressions.
- The following are some use cases for an ExpressionAttributeNames
- value:
+ For more information on condition expressions, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
+
+ :type expression_attribute_names: map
+ :param expression_attribute_names: One or more substitution tokens for
+ simplifying complex expressions. The following are some use cases
+ for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
@@ -856,37 +869,39 @@ class DynamoDBConnection(AWSQueryConnection):
ExpressionAttributeNames :
- + `{"n":"order.customerInfo.LastName"}`
+ + `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
- + `#n = "Smith" OR #n = "Jones"`
-
- :type expression_attribute_values: map
- :param expression_attribute_values:
- One or more values that can be substituted in an expression.
+ + `#name = "Smith" OR #name = "Jones"`
- Use the **:** character in an expression to dereference an attribute
- value. For example, consider the following expression:
+ For more information on expression attribute names, go to `Accessing
+ Item Attributes`_ in the Amazon DynamoDB Developer Guide .
- + `ProductStatus IN ("Available","Backordered","Discontinued")`
-
-
- Now suppose that you specified the following for
- ExpressionAttributeValues :
+ :type expression_attribute_values: map
+ :param expression_attribute_values: One or more values that can be
+ substituted in an expression.
+ Use the **:** (colon) character in an expression to dereference an
+ attribute value. For example, suppose that you wanted to check
+ whether the value of the ProductStatus attribute was one of the
+ following:
+ `Available | Backordered | Discontinued`
- + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"},
- "d":{"S":"Discontinued"} }`
+ You would first need to specify ExpressionAttributeValues as follows:
+ `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ ":disc":{"S":"Discontinued"} }`
- The expression can now be simplified as follows:
+ You could then use these values in an expression, such as this:
+ `ProductStatus IN (:avail, :back, :disc)`
- + `ProductStatus IN (:a,:b,:c)`
+ For more information on expression attribute values, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
@@ -921,6 +936,12 @@ class DynamoDBConnection(AWSQueryConnection):
table is already in the `DELETING` state, no error is
returned.
+
+ DynamoDB might continue to accept data read and write
+ operations, such as GetItem and PutItem , on a table in the
+ `DELETING` state until the table deletion is complete.
+
+
When you delete a table, any indexes on that table are also
deleted.
@@ -940,6 +961,14 @@ class DynamoDBConnection(AWSQueryConnection):
status of the table, when it was created, the primary key
schema, and any indexes on the table.
+
+ If you issue a DescribeTable request immediately after a
+ CreateTable request, DynamoDB might return a
+ ResourceNotFoundException. This is because DescribeTable uses
+ an eventually consistent query, and the metadata for your
+ table might not be available at that moment. Wait for a few
+ seconds, and then try the DescribeTable request again.
+
:type table_name: string
:param table_name: The name of the table to describe.
@@ -1006,20 +1035,21 @@ class DynamoDBConnection(AWSQueryConnection):
included in the response.
:type projection_expression: string
- :param projection_expression: One or more attributes to retrieve from
- the table. These attributes can include scalars, sets, or elements
- of a JSON document. The attributes in the expression must be
- separated by commas.
+ :param projection_expression: A string that identifies one or more
+ attributes to retrieve from the table. These attributes can include
+ scalars, sets, or elements of a JSON document. The attributes in
+ the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
- :type expression_attribute_names: map
- :param expression_attribute_names:
- One or more substitution tokens for simplifying complex expressions.
- The following are some use cases for an ExpressionAttributeNames
- value:
+ For more information on projection expressions, go to `Accessing Item
+ Attributes`_ in the Amazon DynamoDB Developer Guide .
+ :type expression_attribute_names: map
+ :param expression_attribute_names: One or more substitution tokens for
+ simplifying complex expressions. The following are some use cases
+ for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
@@ -1041,13 +1071,17 @@ class DynamoDBConnection(AWSQueryConnection):
ExpressionAttributeNames :
- + `{"n":"order.customerInfo.LastName"}`
+ + `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
- + `#n = "Smith" OR #n = "Jones"`
+ + `#name = "Smith" OR #name = "Jones"`
+
+
+ For more information on expression attribute names, go to `Accessing
+ Item Attributes`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
@@ -1120,6 +1154,12 @@ class DynamoDBConnection(AWSQueryConnection):
item (after the update). For more information, see the
ReturnValues description below.
+
+ To prevent a new item from replacing an existing item, use a
+ conditional put operation with ComparisonOperator set to
+ `NULL` for the primary key attribute, or attributes.
+
+
For more information about using this API, see `Working with
Items`_ in the Amazon DynamoDB Developer Guide .
@@ -1179,8 +1219,8 @@ class DynamoDBConnection(AWSQueryConnection):
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
- `a` is greater than `A`, and `aa` is greater than `B`. For a list
- of code values, see
+ `a` is greater than `A`, and `a` is greater than `B`. For a list of
+ code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
@@ -1233,9 +1273,19 @@ class DynamoDBConnection(AWSQueryConnection):
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
- datatypes, including lists and maps.
+ datatypes, including lists and maps. This operator tests for the
+ existence of an attribute, not its data type. If the data type of
+ attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
+ result is a Boolean true . This result is because the attribute "
+ `a`" exists; its data type is not relevant to the `NOT_NULL`
+ comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
- datatypes, including lists and maps.
+ datatypes, including lists and maps. This operator tests for the
+ nonexistence of an attribute, not its data type. If the data type
+ of attribute " `a`" is null, and you evaluate it using `NULL`, the
+ result is a Boolean false . This is because the attribute " `a`"
+ exists; its data type is not relevant to the `NULL` comparison
+ operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
@@ -1306,7 +1356,7 @@ class DynamoDBConnection(AWSQueryConnection):
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
-
+ Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
@@ -1364,24 +1414,25 @@ class DynamoDBConnection(AWSQueryConnection):
The operation will succeed only if the entire map evaluates to true.
:type condition_expression: string
- :param condition_expression:
- A condition that must be satisfied in order for a conditional PutItem
- operation to succeed.
-
+ :param condition_expression: A condition that must be satisfied in
+ order for a conditional PutItem operation to succeed.
An expression can contain any of the following:
- + Boolean functions: `ATTRIBUTE_EXIST | CONTAINS | BEGINS_WITH`
+ + Boolean functions: `attribute_exists | attribute_not_exists |
+ contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
- + Logical operators: `NOT | AND | OR`
+ + Logical operators: `AND | OR | NOT`
- :type expression_attribute_names: map
- :param expression_attribute_names:
- One or more substitution tokens for simplifying complex expressions.
- The following are some use cases for an ExpressionAttributeNames
- value:
+ For more information on condition expressions, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
+
+ :type expression_attribute_names: map
+ :param expression_attribute_names: One or more substitution tokens for
+ simplifying complex expressions. The following are some use cases
+ for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
@@ -1403,37 +1454,39 @@ class DynamoDBConnection(AWSQueryConnection):
ExpressionAttributeNames :
- + `{"n":"order.customerInfo.LastName"}`
+ + `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
- + `#n = "Smith" OR #n = "Jones"`
-
- :type expression_attribute_values: map
- :param expression_attribute_values:
- One or more values that can be substituted in an expression.
-
- Use the **:** character in an expression to dereference an attribute
- value. For example, consider the following expression:
+ + `#name = "Smith" OR #name = "Jones"`
- + `ProductStatus IN ("Available","Backordered","Discontinued")`
+ For more information on expression attribute names, go to `Accessing
+ Item Attributes`_ in the Amazon DynamoDB Developer Guide .
+ :type expression_attribute_values: map
+ :param expression_attribute_values: One or more values that can be
+ substituted in an expression.
+ Use the **:** (colon) character in an expression to dereference an
+ attribute value. For example, suppose that you wanted to check
+ whether the value of the ProductStatus attribute was one of the
+ following:
- Now suppose that you specified the following for
- ExpressionAttributeValues :
-
+ `Available | Backordered | Discontinued`
- + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"},
- "d":{"S":"Discontinued"} }`
+ You would first need to specify ExpressionAttributeValues as follows:
+ `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ ":disc":{"S":"Discontinued"} }`
- The expression can now be simplified as follows:
+ You could then use these values in an expression, such as this:
+ `ProductStatus IN (:avail, :back, :disc)`
- + `ProductStatus IN (:a,:b,:c)`
+ For more information on expression attribute values, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Item': item, }
@@ -1601,7 +1654,9 @@ class DynamoDBConnection(AWSQueryConnection):
query on a table, you can have conditions only on the table primary
key attributes. You must specify the hash key attribute name and
value as an `EQ` condition. You can optionally specify a second
- condition, referring to the range key attribute.
+ condition, referring to the range key attribute. If you do not
+ specify a range key condition, all items under the hash key will be
+ fetched and processed. Any filters will applied after this.
For a query on an index, you can have conditions only on the index key
attributes. You must specify the index hash attribute name and
value as an EQ condition. You can optionally specify a second
@@ -1616,8 +1671,8 @@ class DynamoDBConnection(AWSQueryConnection):
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
- `a` is greater than `A`, and `aa` is greater than `B`. For a list
- of code values, see
+ `a` is greater than `A`, and `a` is greater than `B`. For a list of
+ code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
@@ -1687,9 +1742,10 @@ class DynamoDBConnection(AWSQueryConnection):
This parameter does not support lists or maps.
- A condition that evaluates the query results and returns only the
- desired values.
-
+ A condition that evaluates the query results after the items are read
+ and returns only the desired values.
+ Query filters are applied after the items are read, so they do not
+ limit the capacity used.
If you specify more than one condition in the QueryFilter map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
@@ -1697,6 +1753,11 @@ class DynamoDBConnection(AWSQueryConnection):
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
+
+ QueryFilter does not allow key attributes. You cannot define a filter
+ condition on a hash key or range key.
+
+
Each QueryFilter element consists of an attribute name to compare,
along with the following:
@@ -1706,7 +1767,7 @@ class DynamoDBConnection(AWSQueryConnection):
operator specified in ComparisonOperator . For type Number, value
comparisons are numeric. String value comparisons for greater than,
equals, or less than are based on ASCII character code values. For
- example, `a` is greater than `A`, and `aa` is greater than `B`. For
+ example, `a` is greater than `A`, and `a` is greater than `B`. For
a list of code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
@@ -1723,11 +1784,6 @@ class DynamoDBConnection(AWSQueryConnection):
:type conditional_operator: string
:param conditional_operator:
- There is a newer parameter available. Use ConditionExpression instead.
- Note that if you use ConditionalOperator and ConditionExpression at
- the same time, DynamoDB will return a ValidationException
- exception.
-
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the QueryFilter map:
@@ -1769,26 +1825,32 @@ class DynamoDBConnection(AWSQueryConnection):
included in the response.
:type projection_expression: string
- :param projection_expression: One or more attributes to retrieve from
- the table. These attributes can include scalars, sets, or elements
- of a JSON document. The attributes in the expression must be
- separated by commas.
+ :param projection_expression: A string that identifies one or more
+ attributes to retrieve from the table. These attributes can include
+ scalars, sets, or elements of a JSON document. The attributes in
+ the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
+ For more information on projection expressions, go to `Accessing Item
+ Attributes`_ in the Amazon DynamoDB Developer Guide .
+
:type filter_expression: string
:param filter_expression: A condition that evaluates the query results
- and returns only the desired values.
+ after the items are read and returns only the desired values.
The condition you specify is applied to the items queried; any items
that do not match the expression are not returned.
+ Filter expressions are applied after the items are read, so they do not
+ limit the capacity used.
+ A FilterExpression has the same syntax as a ConditionExpression . For
+ more information on expression syntax, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
- :param expression_attribute_names:
- One or more substitution tokens for simplifying complex expressions.
- The following are some use cases for an ExpressionAttributeNames
- value:
-
+ :param expression_attribute_names: One or more substitution tokens for
+ simplifying complex expressions. The following are some use cases
+ for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
@@ -1810,37 +1872,39 @@ class DynamoDBConnection(AWSQueryConnection):
ExpressionAttributeNames :
- + `{"n":"order.customerInfo.LastName"}`
+ + `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
- + `#n = "Smith" OR #n = "Jones"`
-
- :type expression_attribute_values: map
- :param expression_attribute_values:
- One or more values that can be substituted in an expression.
-
- Use the **:** character in an expression to dereference an attribute
- value. For example, consider the following expression:
-
+ + `#name = "Smith" OR #name = "Jones"`
- + `ProductStatus IN ("Available","Backordered","Discontinued")`
+ For more information on expression attribute names, go to `Accessing
+ Item Attributes`_ in the Amazon DynamoDB Developer Guide .
- Now suppose that you specified the following for
- ExpressionAttributeValues :
+ :type expression_attribute_values: map
+ :param expression_attribute_values: One or more values that can be
+ substituted in an expression.
+ Use the **:** (colon) character in an expression to dereference an
+ attribute value. For example, suppose that you wanted to check
+ whether the value of the ProductStatus attribute was one of the
+ following:
+ `Available | Backordered | Discontinued`
- + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"},
- "d":{"S":"Discontinued"} }`
+ You would first need to specify ExpressionAttributeValues as follows:
+ `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ ":disc":{"S":"Discontinued"} }`
- The expression can now be simplified as follows:
+ You could then use these values in an expression, such as this:
+ `ProductStatus IN (:avail, :back, :disc)`
- + `ProductStatus IN (:a,:b,:c)`
+ For more information on expression attribute values, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {
@@ -1988,7 +2052,7 @@ class DynamoDBConnection(AWSQueryConnection):
operator specified in ComparisonOperator . For type Number, value
comparisons are numeric. String value comparisons for greater than,
equals, or less than are based on ASCII character code values. For
- example, `a` is greater than `A`, and `aa` is greater than `B`. For
+ example, `a` is greater than `A`, and `a` is greater than `B`. For
a list of code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For Binary, DynamoDB treats each byte of the binary data as
@@ -2074,14 +2138,17 @@ class DynamoDBConnection(AWSQueryConnection):
If you specify Segment , you must also specify TotalSegments .
:type projection_expression: string
- :param projection_expression: One or more attributes to retrieve from
- the table. These attributes can include scalars, sets, or elements
- of a JSON document. The attributes in the expression must be
- separated by commas.
+ :param projection_expression: A string that identifies one or more
+ attributes to retrieve from the table. These attributes can include
+ scalars, sets, or elements of a JSON document. The attributes in
+ the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
+ For more information on projection expressions, go to `Accessing Item
+ Attributes`_ in the Amazon DynamoDB Developer Guide .
+
:type filter_expression: string
:param filter_expression: A condition that evaluates the scan results
and returns only the desired values.
@@ -2089,11 +2156,9 @@ class DynamoDBConnection(AWSQueryConnection):
that do not match the expression are not returned.
:type expression_attribute_names: map
- :param expression_attribute_names:
- One or more substitution tokens for simplifying complex expressions.
- The following are some use cases for an ExpressionAttributeNames
- value:
-
+ :param expression_attribute_names: One or more substitution tokens for
+ simplifying complex expressions. The following are some use cases
+ for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
@@ -2115,37 +2180,39 @@ class DynamoDBConnection(AWSQueryConnection):
ExpressionAttributeNames :
- + `{"n":"order.customerInfo.LastName"}`
+ + `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
- + `#n = "Smith" OR #n = "Jones"`
-
- :type expression_attribute_values: map
- :param expression_attribute_values:
- One or more values that can be substituted in an expression.
-
- Use the **:** character in an expression to dereference an attribute
- value. For example, consider the following expression:
-
+ + `#name = "Smith" OR #name = "Jones"`
- + `ProductStatus IN ("Available","Backordered","Discontinued")`
+ For more information on expression attribute names, go to `Accessing
+ Item Attributes`_ in the Amazon DynamoDB Developer Guide .
- Now suppose that you specified the following for
- ExpressionAttributeValues :
+ :type expression_attribute_values: map
+ :param expression_attribute_values: One or more values that can be
+ substituted in an expression.
+ Use the **:** (colon) character in an expression to dereference an
+ attribute value. For example, suppose that you wanted to check
+ whether the value of the ProductStatus attribute was one of the
+ following:
+ `Available | Backordered | Discontinued`
- + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"},
- "d":{"S":"Discontinued"} }`
+ You would first need to specify ExpressionAttributeValues as follows:
+ `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ ":disc":{"S":"Discontinued"} }`
- The expression can now be simplified as follows:
+ You could then use these values in an expression, such as this:
+ `ProductStatus IN (:avail, :back, :disc)`
- + `ProductStatus IN (:a,:b,:c)`
+ For more information on expression attribute values, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, }
@@ -2255,7 +2322,17 @@ class DynamoDBConnection(AWSQueryConnection):
+ If the existing attribute is a number, and if Value is also a number,
then Value is mathematically added to the existing attribute. If
Value is a negative number, then it is subtracted from the existing
- attribute.
+ attribute. If you use `ADD` to increment or decrement a number
+ value for an item that doesn't exist before the update, DynamoDB
+ uses 0 as the initial value. Similarly, if you use `ADD` for an
+ existing item to increment or decrement an attribute value that
+ doesn't exist before the update, DynamoDB uses `0` as the initial
+ value. For example, suppose that the item you want to update
+ doesn't have an attribute named itemcount , but you decide to `ADD`
+ the number `3` to this attribute anyway. DynamoDB will create the
+ itemcount attribute, set its initial value to `0`, and finally add
+ `3` to it. The result will be a new itemcount attribute, with a
+ value of `3`.
+ If the existing data type is a set, and if Value is also a set, then
Value is appended to the existing set. For example, if the
attribute value is the set `[1,2]`, and the `ADD` action specified
@@ -2271,8 +2348,10 @@ class DynamoDBConnection(AWSQueryConnection):
+ `PUT` - Causes DynamoDB to create a new item with the specified
primary key, and then adds the attribute.
- + `DELETE` - Causes nothing to happen; there is no attribute to delete.
- + `ADD` - Causes DynamoDB to creat an item with the supplied primary
+ + `DELETE` - Nothing happens, because attributes cannot be deleted from
+ a nonexistent item. The operation succeeds, but DynamoDB does not
+ create a new item.
+ + `ADD` - Causes DynamoDB to create an item with the supplied primary
key and number (or set of numbers) for the attribute value. The
only data types allowed are Number and Number Set.
@@ -2317,8 +2396,8 @@ class DynamoDBConnection(AWSQueryConnection):
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
- `a` is greater than `A`, and `aa` is greater than `B`. For a list
- of code values, see
+ `a` is greater than `A`, and `a` is greater than `B`. For a list of
+ code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
@@ -2371,9 +2450,19 @@ class DynamoDBConnection(AWSQueryConnection):
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
- datatypes, including lists and maps.
+ datatypes, including lists and maps. This operator tests for the
+ existence of an attribute, not its data type. If the data type of
+ attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
+ result is a Boolean true . This result is because the attribute "
+ `a`" exists; its data type is not relevant to the `NOT_NULL`
+ comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
- datatypes, including lists and maps.
+ datatypes, including lists and maps. This operator tests for the
+ nonexistence of an attribute, not its data type. If the data type
+ of attribute " `a`" is null, and you evaluate it using `NULL`, the
+ result is a Boolean false . This is because the attribute " `a`"
+ exists; its data type is not relevant to the `NULL` comparison
+ operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
@@ -2444,7 +2533,7 @@ class DynamoDBConnection(AWSQueryConnection):
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
-
+ Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
@@ -2508,10 +2597,9 @@ class DynamoDBConnection(AWSQueryConnection):
returned.
:type update_expression: string
- :param update_expression:
- An expression that defines one or more attributes to be updated, the
- action to be performed on them, and new value(s) for them.
-
+ :param update_expression: An expression that defines one or more
+ attributes to be updated, the action to be performed on them, and
+ new value(s) for them.
The following action values are available for UpdateExpression .
@@ -2537,7 +2625,17 @@ class DynamoDBConnection(AWSQueryConnection):
+ If the existing attribute is a number, and if Value is also a number,
then Value is mathematically added to the existing attribute. If
Value is a negative number, then it is subtracted from the existing
- attribute.
+ attribute. If you use `ADD` to increment or decrement a number
+ value for an item that doesn't exist before the update, DynamoDB
+ uses `0` as the initial value. Similarly, if you use `ADD` for an
+ existing item to increment or decrement an attribute value that
+ doesn't exist before the update, DynamoDB uses `0` as the initial
+ value. For example, suppose that the item you want to update
+ doesn't have an attribute named itemcount , but you decide to `ADD`
+ the number `3` to this attribute anyway. DynamoDB will create the
+ itemcount attribute, set its initial value to `0`, and finally add
+ `3` to it. The result will be a new itemcount attribute in the
+ item, with a value of `3`.
+ If the existing data type is a set and if Value is also a set, then
Value is added to the existing set. For example, if the attribute
value is the set `[1,2]`, and the `ADD` action specified `[3]`,
@@ -2563,33 +2661,29 @@ class DynamoDBConnection(AWSQueryConnection):
following: `SET a=:value1, b=:value2 DELETE :value3, :value4,
:value5`
- An expression can contain any of the following:
-
-
- + Boolean functions: `ATTRIBUTE_EXIST | CONTAINS | BEGINS_WITH`
- + Comparison operators: ` = | <> | < | > | <=
- | >= | BETWEEN | IN`
- + Logical operators: `NOT | AND | OR`
+ For more information on update expressions, go to `Modifying Items and
+ Attributes`_ in the Amazon DynamoDB Developer Guide .
:type condition_expression: string
- :param condition_expression:
- A condition that must be satisfied in order for a conditional update to
- succeed.
-
+ :param condition_expression: A condition that must be satisfied in
+ order for a conditional update to succeed.
An expression can contain any of the following:
- + Boolean functions: `ATTRIBUTE_EXIST | CONTAINS | BEGINS_WITH`
+ + Boolean functions: `attribute_exists | attribute_not_exists |
+ contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
- + Logical operators: `NOT | AND | OR`
+ + Logical operators: `AND | OR | NOT`
- :type expression_attribute_names: map
- :param expression_attribute_names:
- One or more substitution tokens for simplifying complex expressions.
- The following are some use cases for an ExpressionAttributeNames
- value:
+ For more information on condition expressions, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
+
+ :type expression_attribute_names: map
+ :param expression_attribute_names: One or more substitution tokens for
+ simplifying complex expressions. The following are some use cases
+ for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
@@ -2611,37 +2705,39 @@ class DynamoDBConnection(AWSQueryConnection):
ExpressionAttributeNames :
- + `{"n":"order.customerInfo.LastName"}`
+ + `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
- + `#n = "Smith" OR #n = "Jones"`
-
- :type expression_attribute_values: map
- :param expression_attribute_values:
- One or more values that can be substituted in an expression.
+ + `#name = "Smith" OR #name = "Jones"`
- Use the **:** character in an expression to dereference an attribute
- value. For example, consider the following expression:
+ For more information on expression attribute names, go to `Accessing
+ Item Attributes`_ in the Amazon DynamoDB Developer Guide .
- + `ProductStatus IN ("Available","Backordered","Discontinued")`
-
-
- Now suppose that you specified the following for
- ExpressionAttributeValues :
+ :type expression_attribute_values: map
+ :param expression_attribute_values: One or more values that can be
+ substituted in an expression.
+ Use the **:** (colon) character in an expression to dereference an
+ attribute value. For example, suppose that you wanted to check
+ whether the value of the ProductStatus attribute was one of the
+ following:
+ `Available | Backordered | Discontinued`
- + `{ "a":{"S":"Available"}, "b":{"S":"Backordered"},
- "d":{"S":"Discontinued"} }`
+ You would first need to specify ExpressionAttributeValues as follows:
+ `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ ":disc":{"S":"Discontinued"} }`
- The expression can now be simplified as follows:
+ You could then use these values in an expression, such as this:
+ `ProductStatus IN (:avail, :back, :disc)`
- + `ProductStatus IN (:a,:b,:c)`
+ For more information on expression attribute values, go to `Specifying
+ Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
@@ -2669,28 +2765,35 @@ class DynamoDBConnection(AWSQueryConnection):
body=json.dumps(params))
def update_table(self, table_name, provisioned_throughput=None,
- global_secondary_index_updates=None):
+ global_secondary_index_updates=None,
+ attribute_definitions=None):
"""
- Updates the provisioned throughput for the given table.
- Setting the throughput for a table helps you manage
- performance and is part of the provisioned throughput feature
- of DynamoDB.
+ Updates the provisioned throughput for the given table, or
+ manages the global secondary indexes on the table.
+
+ You can increase or decrease the table's provisioned
+ throughput values within the maximums and minimums listed in
+ the `Limits`_ section in the Amazon DynamoDB Developer Guide .
- The provisioned throughput values can be upgraded or
- downgraded based on the maximums and minimums listed in the
- `Limits`_ section in the Amazon DynamoDB Developer Guide .
+ In addition, you can use UpdateTable to add, modify or delete
+ global secondary indexes on the table. For more information,
+ see `Managing Global Secondary Indexes`_ in the Amazon
+ DynamoDB Developer Guide .
- The table must be in the `ACTIVE` state for this operation to
+ The table must be in the `ACTIVE` state for UpdateTable to
succeed. UpdateTable is an asynchronous operation; while
executing the operation, the table is in the `UPDATING` state.
While the table is in the `UPDATING` state, the table still
- has the provisioned throughput from before the call. The new
- provisioned throughput setting is in effect only when the
- table returns to the `ACTIVE` state after the UpdateTable
- operation.
+ has the provisioned throughput from before the call. The
+ table's new provisioned throughput settings go into effect
+ when the table returns to the `ACTIVE` state; at that point,
+ the UpdateTable operation is complete.
- You cannot add, modify or delete indexes using UpdateTable .
- Indexes can only be defined at table creation time.
+ :type attribute_definitions: list
+ :param attribute_definitions: An array of attributes that describe the
+ key schema for the table and indexes. If you are adding a new
+ global secondary index to the table, AttributeDefinitions must
+ include the key element(s) of the new index.
:type table_name: string
:param table_name: The name of the table to be updated.
@@ -2703,12 +2806,20 @@ class DynamoDBConnection(AWSQueryConnection):
`Limits`_ in the Amazon DynamoDB Developer Guide .
:type global_secondary_index_updates: list
- :param global_secondary_index_updates: An array of one or more global
- secondary indexes on the table, together with provisioned
- throughput settings for each index.
+ :param global_secondary_index_updates:
+ An array of one or more global secondary indexes for the table. For
+ each index in the array, you can specify one action:
+
+
+ + Create - add a new global secondary index to the table.
+ + Update - modify the provisioned throughput settings of an existing
+ global secondary index.
+ + Delete - remove a global secondary index from the table.
"""
params = {'TableName': table_name, }
+ if attribute_definitions is not None:
+ params['AttributeDefinitions'] = attribute_definitions
if provisioned_throughput is not None:
params['ProvisionedThroughput'] = provisioned_throughput
if global_secondary_index_updates is not None:
diff --git a/boto/ec2containerservice/__init__.py b/boto/ec2containerservice/__init__.py
new file mode 100644
index 00000000..a8946a0e
--- /dev/null
+++ b/boto/ec2containerservice/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo, get_regions
+
+
+def regions():
+ """
+ Get all available regions for the Amazon EC2 Container Service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.ec2containerservice import EC2ContainerServiceConnection
+ return get_regions('', connection_cls=EC2ContainerServiceConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/ec2containerservice/exceptions.py b/boto/ec2containerservice/exceptions.py
new file mode 100644
index 00000000..4ad32aea
--- /dev/null
+++ b/boto/ec2containerservice/exceptions.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.exception import BotoServerError
+
+
+class ServerException(BotoServerError):
+ pass
+
+
+class ClientException(BotoServerError):
+ pass
diff --git a/boto/ec2containerservice/layer1.py b/boto/ec2containerservice/layer1.py
new file mode 100644
index 00000000..4168bdd0
--- /dev/null
+++ b/boto/ec2containerservice/layer1.py
@@ -0,0 +1,748 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.compat import json
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.ec2containerservice import exceptions
+
+
+class EC2ContainerServiceConnection(AWSQueryConnection):
+ """
+ Amazon EC2 Container Service (Amazon ECS) is a highly scalable,
+ fast, container management service that makes it easy to run,
+ stop, and manage Docker containers on a cluster of Amazon EC2
+ instances. Amazon ECS lets you launch and stop container-enabled
+ applications with simple API calls, allows you to get the state of
+ your cluster from a centralized service, and gives you access to
+ many familiar Amazon EC2 features like security groups, Amazon EBS
+ volumes, and IAM roles.
+
+ You can use Amazon ECS to schedule the placement of containers
+ across your cluster based on your resource needs, isolation
+ policies, and availability requirements. Amazon EC2 Container
+ Service eliminates the need for you to operate your own cluster
+ management and configuration management systems or worry about
+ scaling your management infrastructure.
+ """
+ APIVersion = "2014-11-13"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "ecs.us-east-1.amazonaws.com"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "ServerException": exceptions.ServerException,
+ "ClientException": exceptions.ClientException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs or kwargs['host'] is None:
+ kwargs['host'] = region.endpoint
+
+ super(EC2ContainerServiceConnection, self).__init__(**kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def create_cluster(self, cluster_name=None):
+ """
+ Creates a new Amazon ECS cluster. By default, your account
+ will receive a `default` cluster when you launch your first
+ container instance. However, you can create your own cluster
+ with a unique name with the `CreateCluster` action.
+
+ During the preview, each account is limited to two clusters.
+
+ :type cluster_name: string
+ :param cluster_name: The name of your cluster. If you do not specify a
+ name for your cluster, you will create a cluster named `default`.
+
+ """
+ params = {}
+ if cluster_name is not None:
+ params['clusterName'] = cluster_name
+ return self._make_request(
+ action='CreateCluster',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_cluster(self, cluster):
+ """
+ Deletes the specified cluster. You must deregister all
+ container instances from this cluster before you may delete
+ it. You can list the container instances in a cluster with
+ ListContainerInstances and deregister them with
+ DeregisterContainerInstance.
+
+ :type cluster: string
+ :param cluster: The cluster you want to delete.
+
+ """
+ params = {'cluster': cluster, }
+ return self._make_request(
+ action='DeleteCluster',
+ verb='POST',
+ path='/', params=params)
+
+ def deregister_container_instance(self, container_instance, cluster=None,
+ force=None):
+ """
+ Deregisters an Amazon ECS container instance from the
+ specified cluster. This instance will no longer be available
+ to run tasks.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that hosts the container instance you want to
+ deregister. If you do not specify a cluster, the default cluster is
+ assumed.
+
+ :type container_instance: string
+ :param container_instance: The container instance UUID or full Amazon
+ Resource Name (ARN) of the container instance you want to
+ deregister. The ARN contains the `arn:aws:ecs` namespace, followed
+ by the region of the container instance, the AWS account ID of the
+ container instance owner, the `container-instance` namespace, and
+ then the container instance UUID. For example, arn:aws:ecs: region
+ : aws_account_id :container-instance/ container_instance_UUID .
+
+ :type force: boolean
+ :param force: Force the deregistration of the container instance. You
+ can use the `force` parameter if you have several tasks running on
+ a container instance and you don't want to run `StopTask` for each
+ task before deregistering the container instance.
+
+ """
+ params = {'containerInstance': container_instance, }
+ if cluster is not None:
+ params['cluster'] = cluster
+ if force is not None:
+ params['force'] = str(
+ force).lower()
+ return self._make_request(
+ action='DeregisterContainerInstance',
+ verb='POST',
+ path='/', params=params)
+
+ def deregister_task_definition(self, task_definition):
+ """
+ Deregisters the specified task definition. You will no longer
+ be able to run tasks from this definition after
+ deregistration.
+
+ :type task_definition: string
+ :param task_definition: The `family` and `revision` (
+ `family:revision`) or full Amazon Resource Name (ARN) of the task
+ definition that you want to deregister.
+
+ """
+ params = {'taskDefinition': task_definition, }
+ return self._make_request(
+ action='DeregisterTaskDefinition',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_clusters(self, clusters=None):
+ """
+ Describes one or more of your clusters.
+
+ :type clusters: list
+ :param clusters: A space-separated list of cluster names or full
+ cluster Amazon Resource Name (ARN) entries. If you do not specify a
+ cluster, the default cluster is assumed.
+
+ """
+ params = {}
+ if clusters is not None:
+ self.build_list_params(params,
+ clusters,
+ 'clusters.member')
+ return self._make_request(
+ action='DescribeClusters',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_container_instances(self, container_instances, cluster=None):
+ """
+ Describes Amazon EC2 Container Service container instances.
+ Returns metadata about registered and remaining resources on
+ each container instance requested.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that hosts the container instances you want to
+ describe. If you do not specify a cluster, the default cluster is
+ assumed.
+
+ :type container_instances: list
+ :param container_instances: A space-separated list of container
+ instance UUIDs or full Amazon Resource Name (ARN) entries.
+
+ """
+ params = {}
+ self.build_list_params(params,
+ container_instances,
+ 'containerInstances.member')
+ if cluster is not None:
+ params['cluster'] = cluster
+ return self._make_request(
+ action='DescribeContainerInstances',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_task_definition(self, task_definition):
+ """
+ Describes a task definition.
+
+ :type task_definition: string
+ :param task_definition: The `family` and `revision` (
+ `family:revision`) or full Amazon Resource Name (ARN) of the task
+ definition that you want to describe.
+
+ """
+ params = {'taskDefinition': task_definition, }
+ return self._make_request(
+ action='DescribeTaskDefinition',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_tasks(self, tasks, cluster=None):
+ """
+ Describes a specified task or tasks.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that hosts the task you want to describe. If you do not
+ specify a cluster, the default cluster is assumed.
+
+ :type tasks: list
+ :param tasks: A space-separated list of task UUIDs or full Amazon
+ Resource Name (ARN) entries.
+
+ """
+ params = {}
+ self.build_list_params(params,
+ tasks,
+ 'tasks.member')
+ if cluster is not None:
+ params['cluster'] = cluster
+ return self._make_request(
+ action='DescribeTasks',
+ verb='POST',
+ path='/', params=params)
+
+ def discover_poll_endpoint(self, container_instance=None):
+ """
+ This action is only used by the Amazon EC2 Container Service
+ agent, and it is not intended for use outside of the agent.
+
+
+ Returns an endpoint for the Amazon EC2 Container Service agent
+ to poll for updates.
+
+ :type container_instance: string
+ :param container_instance: The container instance UUID or full Amazon
+ Resource Name (ARN) of the container instance. The ARN contains the
+ `arn:aws:ecs` namespace, followed by the region of the container
+ instance, the AWS account ID of the container instance owner, the
+ `container-instance` namespace, and then the container instance
+ UUID. For example, arn:aws:ecs: region : aws_account_id :container-
+ instance/ container_instance_UUID .
+
+ """
+ params = {}
+ if container_instance is not None:
+ params['containerInstance'] = container_instance
+ return self._make_request(
+ action='DiscoverPollEndpoint',
+ verb='POST',
+ path='/', params=params)
+
+ def list_clusters(self, next_token=None, max_results=None):
+ """
+ Returns a list of existing clusters.
+
+ :type next_token: string
+ :param next_token: The `nextToken` value returned from a previous
+ paginated `ListClusters` request where `maxResults` was used and
+ the results exceeded the value of that parameter. Pagination
+ continues from the end of the previous results that returned the
+ `nextToken` value. This value is `null` when there are no more
+ results to return.
+
+ :type max_results: integer
+ :param max_results: The maximum number of cluster results returned by
+ `ListClusters` in paginated output. When this parameter is used,
+ `ListClusters` only returns `maxResults` results in a single page
+ along with a `nextToken` response element. The remaining results of
+ the initial request can be seen by sending another `ListClusters`
+ request with the returned `nextToken` value. This value can be
+ between 1 and 100. If this parameter is not used, then
+ `ListClusters` returns up to 100 results and a `nextToken` value if
+ applicable.
+
+ """
+ params = {}
+ if next_token is not None:
+ params['nextToken'] = next_token
+ if max_results is not None:
+ params['maxResults'] = max_results
+ return self._make_request(
+ action='ListClusters',
+ verb='POST',
+ path='/', params=params)
+
+ def list_container_instances(self, cluster=None, next_token=None,
+ max_results=None):
+ """
+ Returns a list of container instances in a specified cluster.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that hosts the container instances you want to list. If
+ you do not specify a cluster, the default cluster is assumed..
+
+ :type next_token: string
+ :param next_token: The `nextToken` value returned from a previous
+ paginated `ListContainerInstances` request where `maxResults` was
+ used and the results exceeded the value of that parameter.
+ Pagination continues from the end of the previous results that
+ returned the `nextToken` value. This value is `null` when there are
+ no more results to return.
+
+ :type max_results: integer
+ :param max_results: The maximum number of container instance results
+ returned by `ListContainerInstances` in paginated output. When this
+ parameter is used, `ListContainerInstances` only returns
+ `maxResults` results in a single page along with a `nextToken`
+ response element. The remaining results of the initial request can
+ be seen by sending another `ListContainerInstances` request with
+ the returned `nextToken` value. This value can be between 1 and
+ 100. If this parameter is not used, then `ListContainerInstances`
+ returns up to 100 results and a `nextToken` value if applicable.
+
+ """
+ params = {}
+ if cluster is not None:
+ params['cluster'] = cluster
+ if next_token is not None:
+ params['nextToken'] = next_token
+ if max_results is not None:
+ params['maxResults'] = max_results
+ return self._make_request(
+ action='ListContainerInstances',
+ verb='POST',
+ path='/', params=params)
+
+ def list_task_definitions(self, family_prefix=None, next_token=None,
+ max_results=None):
+ """
+ Returns a list of task definitions that are registered to your
+ account. You can filter the results by family name with the
+ `familyPrefix` parameter.
+
+ :type family_prefix: string
+ :param family_prefix: The name of the family that you want to filter
+ the `ListTaskDefinitions` results with. Specifying a `familyPrefix`
+ will limit the listed task definitions to definitions that belong
+ to that family.
+
+ :type next_token: string
+ :param next_token: The `nextToken` value returned from a previous
+ paginated `ListTaskDefinitions` request where `maxResults` was used
+ and the results exceeded the value of that parameter. Pagination
+ continues from the end of the previous results that returned the
+ `nextToken` value. This value is `null` when there are no more
+ results to return.
+
+ :type max_results: integer
+ :param max_results: The maximum number of task definition results
+ returned by `ListTaskDefinitions` in paginated output. When this
+ parameter is used, `ListTaskDefinitions` only returns `maxResults`
+ results in a single page along with a `nextToken` response element.
+ The remaining results of the initial request can be seen by sending
+ another `ListTaskDefinitions` request with the returned `nextToken`
+ value. This value can be between 1 and 100. If this parameter is
+ not used, then `ListTaskDefinitions` returns up to 100 results and
+ a `nextToken` value if applicable.
+
+ """
+ params = {}
+ if family_prefix is not None:
+ params['familyPrefix'] = family_prefix
+ if next_token is not None:
+ params['nextToken'] = next_token
+ if max_results is not None:
+ params['maxResults'] = max_results
+ return self._make_request(
+ action='ListTaskDefinitions',
+ verb='POST',
+ path='/', params=params)
+
+ def list_tasks(self, cluster=None, container_instance=None, family=None,
+ next_token=None, max_results=None):
+ """
+ Returns a list of tasks for a specified cluster. You can
+ filter the results by family name or by a particular container
+ instance with the `family` and `containerInstance` parameters.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that hosts the tasks you want to list. If you do not
+ specify a cluster, the default cluster is assumed..
+
+ :type container_instance: string
+ :param container_instance: The container instance UUID or full Amazon
+ Resource Name (ARN) of the container instance that you want to
+ filter the `ListTasks` results with. Specifying a
+ `containerInstance` will limit the results to tasks that belong to
+ that container instance.
+
+ :type family: string
+ :param family: The name of the family that you want to filter the
+ `ListTasks` results with. Specifying a `family` will limit the
+ results to tasks that belong to that family.
+
+ :type next_token: string
+ :param next_token: The `nextToken` value returned from a previous
+ paginated `ListTasks` request where `maxResults` was used and the
+ results exceeded the value of that parameter. Pagination continues
+ from the end of the previous results that returned the `nextToken`
+ value. This value is `null` when there are no more results to
+ return.
+
+ :type max_results: integer
+ :param max_results: The maximum number of task results returned by
+ `ListTasks` in paginated output. When this parameter is used,
+ `ListTasks` only returns `maxResults` results in a single page
+ along with a `nextToken` response element. The remaining results of
+ the initial request can be seen by sending another `ListTasks`
+ request with the returned `nextToken` value. This value can be
+ between 1 and 100. If this parameter is not used, then `ListTasks`
+ returns up to 100 results and a `nextToken` value if applicable.
+
+ """
+ params = {}
+ if cluster is not None:
+ params['cluster'] = cluster
+ if container_instance is not None:
+ params['containerInstance'] = container_instance
+ if family is not None:
+ params['family'] = family
+ if next_token is not None:
+ params['nextToken'] = next_token
+ if max_results is not None:
+ params['maxResults'] = max_results
+ return self._make_request(
+ action='ListTasks',
+ verb='POST',
+ path='/', params=params)
+
+ def register_container_instance(self, cluster=None,
+ instance_identity_document=None,
+ instance_identity_document_signature=None,
+ total_resources=None):
+ """
+ This action is only used by the Amazon EC2 Container Service
+ agent, and it is not intended for use outside of the agent.
+
+
+ Registers an Amazon EC2 instance into the specified cluster.
+ This instance will become available to place containers on.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that you want to register your container instance with.
+ If you do not specify a cluster, the default cluster is assumed..
+
+ :type instance_identity_document: string
+ :param instance_identity_document:
+
+ :type instance_identity_document_signature: string
+ :param instance_identity_document_signature:
+
+ :type total_resources: list
+ :param total_resources:
+
+ """
+ params = {}
+ if cluster is not None:
+ params['cluster'] = cluster
+ if instance_identity_document is not None:
+ params['instanceIdentityDocument'] = instance_identity_document
+ if instance_identity_document_signature is not None:
+ params['instanceIdentityDocumentSignature'] = instance_identity_document_signature
+ if total_resources is not None:
+ self.build_complex_list_params(
+ params, total_resources,
+ 'totalResources.member',
+ ('name', 'type', 'doubleValue', 'longValue', 'integerValue', 'stringSetValue'))
+ return self._make_request(
+ action='RegisterContainerInstance',
+ verb='POST',
+ path='/', params=params)
+
+ def register_task_definition(self, family, container_definitions):
+ """
+ Registers a new task definition from the supplied `family` and
+ `containerDefinitions`.
+
+ :type family: string
+ :param family: You can specify a `family` for a task definition, which
+ allows you to track multiple versions of the same task definition.
+ You can think of the `family` as a name for your task definition.
+
+ :type container_definitions: list
+ :param container_definitions: A list of container definitions in JSON
+ format that describe the different containers that make up your
+ task.
+
+ """
+ params = {'family': family, }
+ self.build_complex_list_params(
+ params, container_definitions,
+ 'containerDefinitions.member',
+ ('name', 'image', 'cpu', 'memory', 'links', 'portMappings', 'essential', 'entryPoint', 'command', 'environment'))
+ return self._make_request(
+ action='RegisterTaskDefinition',
+ verb='POST',
+ path='/', params=params)
+
+ def run_task(self, task_definition, cluster=None, overrides=None,
+ count=None):
+ """
+ Start a task using random placement and the default Amazon ECS
+ scheduler. If you want to use your own scheduler or place a
+ task on a specific container instance, use `StartTask`
+ instead.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that you want to run your task on. If you do not
+ specify a cluster, the default cluster is assumed..
+
+ :type task_definition: string
+ :param task_definition: The `family` and `revision` (
+ `family:revision`) or full Amazon Resource Name (ARN) of the task
+ definition that you want to run.
+
+ :type overrides: dict
+ :param overrides:
+
+ :type count: integer
+ :param count: The number of instances of the specified task that you
+ would like to place on your cluster.
+
+ """
+ params = {'taskDefinition': task_definition, }
+ if cluster is not None:
+ params['cluster'] = cluster
+ if overrides is not None:
+ params['overrides'] = overrides
+ if count is not None:
+ params['count'] = count
+ return self._make_request(
+ action='RunTask',
+ verb='POST',
+ path='/', params=params)
+
+ def start_task(self, task_definition, container_instances, cluster=None,
+ overrides=None):
+ """
+ Starts a new task from the specified task definition on the
+ specified container instance or instances. If you want to use
+ the default Amazon ECS scheduler to place your task, use
+ `RunTask` instead.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that you want to start your task on. If you do not
+ specify a cluster, the default cluster is assumed..
+
+ :type task_definition: string
+ :param task_definition: The `family` and `revision` (
+ `family:revision`) or full Amazon Resource Name (ARN) of the task
+ definition that you want to start.
+
+ :type overrides: dict
+ :param overrides:
+
+ :type container_instances: list
+ :param container_instances: The container instance UUIDs or full Amazon
+ Resource Name (ARN) entries for the container instances on which
+ you would like to place your task.
+
+ """
+ params = {'taskDefinition': task_definition, }
+ self.build_list_params(params,
+ container_instances,
+ 'containerInstances.member')
+ if cluster is not None:
+ params['cluster'] = cluster
+ if overrides is not None:
+ params['overrides'] = overrides
+ return self._make_request(
+ action='StartTask',
+ verb='POST',
+ path='/', params=params)
+
+ def stop_task(self, task, cluster=None):
+ """
+ Stops a running task.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that hosts the task you want to stop. If you do not
+ specify a cluster, the default cluster is assumed..
+
+ :type task: string
+ :param task: The task UUIDs or full Amazon Resource Name (ARN) entry of
+ the task you would like to stop.
+
+ """
+ params = {'task': task, }
+ if cluster is not None:
+ params['cluster'] = cluster
+ return self._make_request(
+ action='StopTask',
+ verb='POST',
+ path='/', params=params)
+
+ def submit_container_state_change(self, cluster=None, task=None,
+ container_name=None, status=None,
+ exit_code=None, reason=None,
+ network_bindings=None):
+ """
+ This action is only used by the Amazon EC2 Container Service
+ agent, and it is not intended for use outside of the agent.
+
+
+ Sent to acknowledge that a container changed states.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that hosts the container.
+
+ :type task: string
+ :param task: The task UUID or full Amazon Resource Name (ARN) of the
+ task that hosts the container.
+
+ :type container_name: string
+ :param container_name: The name of the container.
+
+ :type status: string
+ :param status: The status of the state change request.
+
+ :type exit_code: integer
+ :param exit_code: The exit code returned for the state change request.
+
+ :type reason: string
+ :param reason: The reason for the state change request.
+
+ :type network_bindings: list
+ :param network_bindings: The network bindings of the container.
+
+ """
+ params = {}
+ if cluster is not None:
+ params['cluster'] = cluster
+ if task is not None:
+ params['task'] = task
+ if container_name is not None:
+ params['containerName'] = container_name
+ if status is not None:
+ params['status'] = status
+ if exit_code is not None:
+ params['exitCode'] = exit_code
+ if reason is not None:
+ params['reason'] = reason
+ if network_bindings is not None:
+ self.build_complex_list_params(
+ params, network_bindings,
+ 'networkBindings.member',
+ ('bindIP', 'containerPort', 'hostPort'))
+ return self._make_request(
+ action='SubmitContainerStateChange',
+ verb='POST',
+ path='/', params=params)
+
+ def submit_task_state_change(self, cluster=None, task=None, status=None,
+ reason=None):
+ """
+ This action is only used by the Amazon EC2 Container Service
+ agent, and it is not intended for use outside of the agent.
+
+
+ Sent to acknowledge that a task changed states.
+
+ :type cluster: string
+ :param cluster: The short name or full Amazon Resource Name (ARN) of
+ the cluster that hosts the task.
+
+ :type task: string
+ :param task: The task UUID or full Amazon Resource Name (ARN) of the
+ task in the state change request.
+
+ :type status: string
+ :param status: The status of the state change request.
+
+ :type reason: string
+ :param reason: The reason for the state change request.
+
+ """
+ params = {}
+ if cluster is not None:
+ params['cluster'] = cluster
+ if task is not None:
+ params['task'] = task
+ if status is not None:
+ params['status'] = status
+ if reason is not None:
+ params['reason'] = reason
+ return self._make_request(
+ action='SubmitTaskStateChange',
+ verb='POST',
+ path='/', params=params)
+
+ def _make_request(self, action, verb, path, params):
+ params['ContentType'] = 'JSON'
+ response = self.make_request(action=action, verb='POST',
+ path='/', params=params)
+ body = response.read().decode('utf-8')
+ boto.log.debug(body)
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ json_body = json.loads(body)
+ fault_name = json_body.get('Error', {}).get('Code', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
diff --git a/boto/endpoints.json b/boto/endpoints.json
index d9b4f6bb..535d7685 100644
--- a/boto/endpoints.json
+++ b/boto/endpoints.json
@@ -12,6 +12,11 @@
"us-west-2": "autoscaling.us-west-2.amazonaws.com",
"eu-central-1": "autoscaling.eu-central-1.amazonaws.com"
},
+ "awslambda": {
+ "us-east-1": "lambda.us-east-1.amazonaws.com",
+ "us-west-2": "lambda.us-west-2.amazonaws.com",
+ "eu-west-1": "lambda.eu-west-1.amazonaws.com"
+ },
"cloudformation": {
"ap-northeast-1": "cloudformation.ap-northeast-1.amazonaws.com",
"ap-southeast-1": "cloudformation.ap-southeast-1.amazonaws.com",
@@ -36,6 +41,13 @@
"us-west-2": "cloudfront.amazonaws.com",
"eu-central-1": "cloudfront.amazonaws.com"
},
+ "cloudhsm": {
+ "us-east-1": "cloudhsm.us-east-1.amazonaws.com",
+ "us-west-2": "cloudhsm.us-west-2.amazonaws.com",
+ "eu-west-1": "cloudhsm.eu-west-1.amazonaws.com",
+ "eu-central-1": "cloudhsm.eu-central-1.amazonaws.com",
+ "ap-southeast-2": "cloudhsm.ap-southeast-2.amazonaws.com"
+ },
"cloudsearch": {
"ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "cloudsearch.ap-southeast-2.amazonaws.com",
@@ -82,12 +94,19 @@
"us-west-2": "monitoring.us-west-2.amazonaws.com",
"eu-central-1": "monitoring.eu-central-1.amazonaws.com"
},
+ "codedeploy": {
+ "us-east-1": "codedeploy.us-east-1.amazonaws.com",
+ "us-west-2": "codedeploy.us-west-2.amazonaws.com"
+ },
"cognito-identity": {
"us-east-1": "cognito-identity.us-east-1.amazonaws.com"
},
"cognito-sync": {
"us-east-1": "cognito-sync.us-east-1.amazonaws.com"
},
+ "configservice": {
+ "us-east-1": "config.us-east-1.amazonaws.com"
+ },
"datapipeline": {
"us-east-1": "datapipeline.us-east-1.amazonaws.com",
"us-west-2": "datapipeline.us-west-2.amazonaws.com",
@@ -133,6 +152,9 @@
"us-west-2": "ec2.us-west-2.amazonaws.com",
"eu-central-1": "ec2.eu-central-1.amazonaws.com"
},
+ "ec2containerservice": {
+ "us-east-1": "ecs.us-east-1.amazonaws.com"
+ },
"elasticache": {
"ap-northeast-1": "elasticache.ap-northeast-1.amazonaws.com",
"ap-southeast-1": "elasticache.ap-southeast-1.amazonaws.com",
@@ -232,6 +254,17 @@
"ap-northeast-1": "kinesis.ap-northeast-1.amazonaws.com",
"eu-central-1": "kinesis.eu-central-1.amazonaws.com"
},
+ "kms": {
+ "us-east-1": "kms.us-east-1.amazonaws.com",
+ "us-west-1": "kms.us-west-1.amazonaws.com",
+ "us-west-2": "kms.us-west-2.amazonaws.com",
+ "eu-west-1": "kms.eu-west-1.amazonaws.com",
+ "eu-central-1": "kms.eu-central-1.amazonaws.com",
+ "ap-southeast-2": "kms.ap-southeast-2.amazonaws.com",
+ "ap-southeast-1": "kms.ap-southeast-1.amazonaws.com",
+ "ap-northeast-1": "kms.ap-northeast-1.amazonaws.com",
+ "sa-east-1": "kms.sa-east-1.amazonaws.com"
+ },
"logs": {
"us-east-1": "logs.us-east-1.amazonaws.com",
"us-west-2": "logs.us-west-2.amazonaws.com",
@@ -354,7 +387,8 @@
"us-east-1": "sts.amazonaws.com",
"us-gov-west-1": "sts.us-gov-west-1.amazonaws.com",
"us-west-1": "sts.amazonaws.com",
- "us-west-2": "sts.amazonaws.com"
+ "us-west-2": "sts.amazonaws.com",
+ "eu-central-1": "sts.amazonaws.com"
},
"support": {
"us-east-1": "support.us-east-1.amazonaws.com",
diff --git a/boto/kinesis/layer1.py b/boto/kinesis/layer1.py
index d514b064..f1910ff4 100644
--- a/boto/kinesis/layer1.py
+++ b/boto/kinesis/layer1.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -28,6 +28,7 @@ from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
from boto.compat import json
+from boto.compat import six
class KinesisConnection(AWSQueryConnection):
@@ -67,23 +68,43 @@ class KinesisConnection(AWSQueryConnection):
def _required_auth_capability(self):
return ['hmac-v4']
+ def add_tags_to_stream(self, stream_name, tags):
+ """
+ Adds or updates tags for the specified Amazon Kinesis stream.
+ Each stream can have up to 10 tags.
+
+ If tags have already been assigned to the stream,
+ `AddTagsToStream` overwrites any existing tags that correspond
+ to the specified tag keys.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream.
+
+ :type tags: map
+ :param tags: The set of key-value pairs to use to create the tags.
+
+ """
+ params = {'StreamName': stream_name, 'Tags': tags, }
+ return self.make_request(action='AddTagsToStream',
+ body=json.dumps(params))
+
def create_stream(self, stream_name, shard_count):
"""
- This operation adds a new Amazon Kinesis stream to your AWS
- account. A stream captures and transports data records that
- are continuously emitted from different data sources or
- producers . Scale-out within an Amazon Kinesis stream is
- explicitly supported by means of shards, which are uniquely
- identified groups of data records in an Amazon Kinesis stream.
+ Creates a Amazon Kinesis stream. A stream captures and
+ transports data records that are continuously emitted from
+ different data sources or producers . Scale-out within an
+ Amazon Kinesis stream is explicitly supported by means of
+ shards, which are uniquely identified groups of data records
+ in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
- composed of. Each shard can support up to 5 read transactions
- per second up to a maximum total of 2 MB of data read per
- second. Each shard can support up to 1000 write transactions
- per second up to a maximum total of 1 MB data written per
- second. You can add shards to a stream if the amount of data
- input increases and you can remove shards if the amount of
- data input decreases.
+ composed of. Each open shard can support up to 5 read
+ transactions per second, up to a maximum total of 2 MB of data
+ read per second. Each shard can support up to 1000 records
+ written per second, up to a maximum total of 1 MB data written
+ per second. You can add shards to a stream if the amount of
+ data input increases and you can remove shards if the amount
+ of data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
@@ -93,27 +114,26 @@ class KinesisConnection(AWSQueryConnection):
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
- sets the stream status to CREATING. After the stream is
- created, Amazon Kinesis sets the stream status to ACTIVE. You
- should perform read and write operations only on an ACTIVE
- stream.
+ sets the stream status to `CREATING`. After the stream is
+ created, Amazon Kinesis sets the stream status to `ACTIVE`.
+ You should perform read and write operations only on an
+ `ACTIVE` stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
- + Have more than five streams in the CREATING state at any
+ + Have more than five streams in the `CREATING` state at any
point in time.
+ Create more shards than are authorized for your account.
- **Note:** The default limit for an AWS account is two shards
- per stream. If you need to create a stream with more than two
- shards, contact AWS Support to increase the limit on your
- account.
+ The default limit for an AWS account is 10 shards per stream.
+ If you need to create a stream with more than 10 shards,
+ `contact AWS Support`_ to increase the limit on your account.
- You can use the `DescribeStream` operation to check the stream
- status, which is returned in `StreamStatus`.
+ You can use `DescribeStream` to check the stream status, which
+ is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
@@ -130,9 +150,9 @@ class KinesisConnection(AWSQueryConnection):
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
- **Note:** The default limit for an AWS account is two shards per
- stream. If you need to create a stream with more than two shards,
- contact AWS Support to increase the limit on your account.
+ **Note:** The default limit for an AWS account is 10 shards per stream.
+ If you need to create a stream with more than 10 shards, `contact
+ AWS Support`_ to increase the limit on your account.
"""
params = {
@@ -144,23 +164,23 @@ class KinesisConnection(AWSQueryConnection):
def delete_stream(self, stream_name):
"""
- This operation deletes a stream and all of its shards and
- data. You must shut down any applications that are operating
- on the stream before you delete the stream. If an application
- attempts to operate on a deleted stream, it will receive the
- exception `ResourceNotFoundException`.
+ Deletes a stream and all its shards and data. You must shut
+ down any applications that are operating on the stream before
+ you delete the stream. If an application attempts to operate
+ on a deleted stream, it will receive the exception
+ `ResourceNotFoundException`.
- If the stream is in the ACTIVE state, you can delete it. After
- a `DeleteStream` request, the specified stream is in the
- DELETING state until Amazon Kinesis completes the deletion.
+ If the stream is in the `ACTIVE` state, you can delete it.
+ After a `DeleteStream` request, the specified stream is in the
+ `DELETING` state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
- and write operations, such as PutRecord and GetRecords, on a
- stream in the DELETING state until the stream deletion is
- complete.
+ and write operations, such as PutRecord, PutRecords, and
+ GetRecords, on a stream in the `DELETING` state until the
+ stream deletion is complete.
When you delete a stream, any shards in that stream are also
- deleted.
+ deleted, and any tags are dissociated from the stream.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
@@ -179,17 +199,17 @@ class KinesisConnection(AWSQueryConnection):
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
- This operation returns the following information about the
- stream: the current status of the stream, the stream Amazon
- Resource Name (ARN), and an array of shard objects that
- comprise the stream. For each shard object there is
- information about the hash key and sequence number ranges that
- the shard spans, and the IDs of any earlier shards that played
- in a role in a MergeShards or SplitShard operation that
- created the shard. A sequence number is the identifier
- associated with every record ingested in the Amazon Kinesis
- stream. The sequence number is assigned by the Amazon Kinesis
- service when a record is put into the stream.
+ Describes the specified stream.
+
+ The information about the stream includes its current status,
+ its Amazon Resource Name (ARN), and an array of shard objects.
+ For each shard object, there is information about the hash key
+ and sequence number ranges that the shard spans, and the IDs
+ of any earlier shards that played in a role in creating the
+ shard. A sequence number is the identifier associated with
+ every record ingested in the Amazon Kinesis stream. The
+ sequence number is assigned when a record is put into the
+ stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
@@ -198,11 +218,11 @@ class KinesisConnection(AWSQueryConnection):
output. `HasMoreShards` is set to `True` when there is more
data available.
- If there are more shards available, you can request more
- shards by using the shard ID of the last shard returned by the
- `DescribeStream` request, in the `ExclusiveStartShardId`
- parameter in a subsequent request to `DescribeStream`.
- `DescribeStream` is a paginated operation.
+ `DescribeStream` is a paginated operation. If there are more
+ shards available, you can request them using the shard ID of
+ the last shard returned. Specify this ID in the
+ `ExclusiveStartShardId` parameter in a subsequent request to
+ `DescribeStream`.
`DescribeStream` has a limit of 10 transactions per second per
account.
@@ -215,7 +235,7 @@ class KinesisConnection(AWSQueryConnection):
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
- with for the stream description.
+ with.
"""
params = {'StreamName': stream_name, }
@@ -228,52 +248,72 @@ class KinesisConnection(AWSQueryConnection):
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
- This operation returns one or more data records from a shard.
- A `GetRecords` operation request can retrieve up to 10 MB of
- data.
-
- You specify a shard iterator for the shard that you want to
- read data from in the `ShardIterator` parameter. The shard
- iterator specifies the position in the shard from which you
- want to start reading data records sequentially. A shard
- iterator specifies this position using the sequence number of
- a data record in the shard. For more information about the
- shard iterator, see GetShardIterator.
-
- `GetRecords` may return a partial result if the response size
- limit is exceeded. You will get an error, but not a partial
- result if the shard's provisioned throughput is exceeded, the
- shard iterator has expired, or an internal processing failure
- has occurred. Clients can request a smaller amount of data by
- specifying a maximum number of returned records using the
- `Limit` parameter. The `Limit` parameter can be set to an
- integer value of up to 10,000. If you set the value to an
- integer greater than 10,000, you will receive
- `InvalidArgumentException`.
-
- A new shard iterator is returned by every `GetRecords` request
- in `NextShardIterator`, which you use in the `ShardIterator`
- parameter of the next `GetRecords` request. When you
- repeatedly read from an Amazon Kinesis stream use a
- GetShardIterator request to get the first shard iterator to
- use in your first `GetRecords` request and then use the shard
- iterator returned in `NextShardIterator` for subsequent reads.
+ Gets data records from a shard.
- `GetRecords` can return `null` for the `NextShardIterator` to
- reflect that the shard has been closed and that the requested
- shard iterator would never have returned more data.
-
- If no items can be processed because of insufficient
- provisioned throughput on the shard involved in the request,
- `GetRecords` throws `ProvisionedThroughputExceededException`.
+ Specify a shard iterator using the `ShardIterator` parameter.
+ The shard iterator specifies the position in the shard from
+ which you want to start reading data records sequentially. If
+ there are no records available in the portion of the shard
+ that the iterator points to, `GetRecords` returns an empty
+ list. Note that it might take multiple calls to get to a
+ portion of the shard that contains records.
+
+ You can scale by provisioning multiple shards. Your
+ application should have one thread per shard, each reading
+ continuously from its stream. To read from a stream
+ continually, call `GetRecords` in a loop. Use GetShardIterator
+ to get the shard iterator to specify in the first `GetRecords`
+ call. `GetRecords` returns a new shard iterator in
+ `NextShardIterator`. Specify the shard iterator returned in
+ `NextShardIterator` in subsequent calls to `GetRecords`. Note
+ that if the shard has been closed, the shard iterator can't
+ return more data and `GetRecords` returns `null` in
+ `NextShardIterator`. You can terminate the loop when the shard
+ is closed, or when the shard iterator reaches the record with
+ the sequence number or other attribute that marks it as the
+ last record to process.
+
+ Each data record can be up to 50 KB in size, and each shard
+ can read up to 2 MB per second. You can ensure that your calls
+ don't exceed the maximum supported size or throughput by using
+ the `Limit` parameter to specify the maximum number of records
+ that `GetRecords` can return. Consider your average record
+ size when determining this limit. For example, if your average
+ record size is 40 KB, you can limit the data returned to about
+ 1 MB per call by specifying 25 as the limit.
+
+ The size of the data returned by `GetRecords` will vary
+ depending on the utilization of the shard. The maximum size of
+ data that `GetRecords` can return is 10 MB. If a call returns
+ 10 MB of data, subsequent calls made within the next 5 seconds
+ throw `ProvisionedThroughputExceededException`. If there is
+ insufficient provisioned throughput on the shard, subsequent
+ calls made within the next 1 second throw
+ `ProvisionedThroughputExceededException`. Note that
+ `GetRecords` won't return any data when it throws an
+ exception. For this reason, we recommend that you wait one
+ second between calls to `GetRecords`; however, it's possible
+ that the application will get exceptions for longer than 1
+ second.
+
+ To detect whether the application is falling behind in
+ processing, add a timestamp to your records and note how long
+ it takes to process them. You can also monitor how much data
+ is in a stream using the CloudWatch metrics for write
+ operations ( `PutRecord` and `PutRecords`). For more
+ information, see `Monitoring Amazon Kinesis with Amazon
+ CloudWatch`_ in the Amazon Kinesis Developer Guide .
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
- start sequentially reading data records.
+ start sequentially reading data records. A shard iterator specifies
+ this position using the sequence number of a data record in the
+ shard.
:type limit: integer
- :param limit: The maximum number of records to return, which can be set
- to a value of up to 10,000.
+ :param limit: The maximum number of records to return. Specify a value
+ of up to 10,000. If you specify a value that is greater than
+ 10,000, `GetRecords` throws `InvalidArgumentException`.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
@@ -297,32 +337,31 @@ class KinesisConnection(AWSQueryConnection):
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
- This operation returns a shard iterator in `ShardIterator`.
- The shard iterator specifies the position in the shard from
- which you want to start reading data records sequentially. A
- shard iterator specifies this position using the sequence
- number of a data record in a shard. A sequence number is the
- identifier associated with every record ingested in the Amazon
- Kinesis stream. The sequence number is assigned by the Amazon
- Kinesis service when a record is put into the stream.
-
- You must specify the shard iterator type in the
- `GetShardIterator` request. For example, you can set the
- `ShardIteratorType` parameter to read exactly from the
+ Gets a shard iterator. A shard iterator expires five minutes
+ after it is returned to the requester.
+
+ A shard iterator specifies the position in the shard from
+ which to start reading data records sequentially. A shard
+ iterator specifies this position using the sequence number of
+ a data record in a shard. A sequence number is the identifier
+ associated with every record ingested in the Amazon Kinesis
+ stream. The sequence number is assigned when a record is put
+ into the stream.
+
+ You must specify the shard iterator type. For example, you can
+ set the `ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
- AT_SEQUENCE_NUMBER shard iterator type, or right after the
- sequence number by using the AFTER_SEQUENCE_NUMBER shard
+ `AT_SEQUENCE_NUMBER` shard iterator type, or right after the
+ sequence number by using the `AFTER_SEQUENCE_NUMBER` shard
iterator type, using sequence numbers returned by earlier
- PutRecord, GetRecords or DescribeStream requests. You can
- specify the shard iterator type TRIM_HORIZON in the request to
- cause `ShardIterator` to point to the last untrimmed record in
- the shard in the system, which is the oldest data record in
- the shard. Or you can point to just after the most recent
- record in the shard, by using the shard iterator type LATEST,
- so that you always read the most recent data in the shard.
-
- **Note:** Each shard iterator expires five minutes after it is
- returned to the requester.
+ calls to PutRecord, PutRecords, GetRecords, or DescribeStream.
+ You can specify the shard iterator type `TRIM_HORIZON` in the
+ request to cause `ShardIterator` to point to the last
+ untrimmed record in the shard in the system, which is the
+ oldest data record in the shard. Or you can point to just
+ after the most recent record in the shard, by using the shard
+ iterator type `LATEST`, so that you always read the most
+ recent data in the shard.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
@@ -333,18 +372,16 @@ class KinesisConnection(AWSQueryConnection):
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
- If a `GetShardIterator` request is made too often, you will
- receive a `ProvisionedThroughputExceededException`. For more
- information about throughput limits, see the `Amazon Kinesis
- Developer Guide`_.
+ If a `GetShardIterator` request is made too often, you receive
+ a `ProvisionedThroughputExceededException`. For more
+ information about throughput limits, see GetRecords.
- `GetShardIterator` can return `null` for its `ShardIterator`
- to indicate that the shard has been closed and that the
- requested iterator will return no more data. A shard can be
- closed by a SplitShard or MergeShards operation.
+ If the shard is closed, the iterator can't return more data,
+ and `GetShardIterator` returns `null` for its `ShardIterator`.
+ A shard can be closed using SplitShard or MergeShards.
`GetShardIterator` has a limit of 5 transactions per second
- per account per shard.
+ per account per open shard.
:type stream_name: string
:param stream_name: The name of the stream.
@@ -386,10 +423,7 @@ class KinesisConnection(AWSQueryConnection):
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
- This operation returns an array of the names of all the
- streams that are associated with the AWS account making the
- `ListStreams` request. A given AWS account can have many
- streams active at one time.
+ Lists your streams.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
@@ -426,46 +460,74 @@ class KinesisConnection(AWSQueryConnection):
return self.make_request(action='ListStreams',
body=json.dumps(params))
+ def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None,
+ limit=None):
+ """
+ Lists the tags for the specified Amazon Kinesis stream.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream.
+
+ :type exclusive_start_tag_key: string
+ :param exclusive_start_tag_key: The key to use as the starting point
+ for the list of tags. If this parameter is set, `ListTagsForStream`
+ gets all tags that occur after `ExclusiveStartTagKey`.
+
+ :type limit: integer
+ :param limit: The number of tags to return. If this number is less than
+ the total number of tags associated with the stream, `HasMoreTags`
+ is set to `True`. To list additional tags, set
+ `ExclusiveStartTagKey` to the last key in the response.
+
+ """
+ params = {'StreamName': stream_name, }
+ if exclusive_start_tag_key is not None:
+ params['ExclusiveStartTagKey'] = exclusive_start_tag_key
+ if limit is not None:
+ params['Limit'] = limit
+ return self.make_request(action='ListTagsForStream',
+ body=json.dumps(params))
+
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
- This operation merges two adjacent shards in a stream and
- combines them into a single shard to reduce the stream's
- capacity to ingest and transport data. Two shards are
- considered adjacent if the union of the hash key ranges for
- the two shards form a contiguous set with no gaps. For
- example, if you have two shards, one with a hash key range of
- 276...381 and the other with a hash key range of 382...454,
- then you could merge these two shards into a single shard that
- would have a hash key range of 276...454. After the merge, the
- single child shard receives data for all hash key values
- covered by the two parent shards.
+ Merges two adjacent shards in a stream and combines them into
+ a single shard to reduce the stream's capacity to ingest and
+ transport data. Two shards are considered adjacent if the
+ union of the hash key ranges for the two shards form a
+ contiguous set with no gaps. For example, if you have two
+ shards, one with a hash key range of 276...381 and the other
+ with a hash key range of 382...454, then you could merge these
+ two shards into a single shard that would have a hash key
+ range of 276...454. After the merge, the single child shard
+ receives data for all hash key values covered by the two
+ parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
- is not being used. The operation requires that you specify the
- shard to be merged and the adjacent shard for a given stream.
- For more information about merging shards, see the `Amazon
- Kinesis Developer Guide`_.
-
- If the stream is in the ACTIVE state, you can call
- `MergeShards`. If a stream is in CREATING or UPDATING or
- DELETING states, then Amazon Kinesis returns a
+ is not being used. You must specify the shard to be merged and
+ the adjacent shard for a stream. For more information about
+ merging shards, see `Merge Two Shards`_ in the Amazon Kinesis
+ Developer Guide .
+
+ If the stream is in the `ACTIVE` state, you can call
+ `MergeShards`. If a stream is in the `CREATING`, `UPDATING`,
+ or `DELETING` state, `MergeShards` returns a
`ResourceInUseException`. If the specified stream does not
- exist, Amazon Kinesis returns a `ResourceNotFoundException`.
+ exist, `MergeShards` returns a `ResourceNotFoundException`.
- You can use the DescribeStream operation to check the state of
- the stream, which is returned in `StreamStatus`.
+ You can use DescribeStream to check the state of the stream,
+ which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
- response and sets the `StreamStatus` to UPDATING. After the
+ response and sets the `StreamStatus` to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
- to ACTIVE. Read and write operations continue to work while
- the stream is in the UPDATING state.
+ to `ACTIVE`. Read and write operations continue to work while
+ the stream is in the `UPDATING` state.
- You use the DescribeStream operation to determine the shard
- IDs that are specified in the `MergeShards` request.
+ You use DescribeStream to determine the shard IDs that are
+ specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
@@ -591,66 +653,171 @@ class KinesisConnection(AWSQueryConnection):
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
- params['Data'] = base64.b64encode(
- params['Data'].encode('utf-8')).decode('utf-8')
+ if not isinstance(params['Data'], six.binary_type):
+ params['Data'] = params['Data'].encode('utf-8')
+ params['Data'] = base64.b64encode(params['Data']).decode('utf-8')
return self.make_request(action='PutRecord',
body=json.dumps(params))
+ def put_records(self, records, stream_name, b64_encode=True):
+ """
+ Puts (writes) multiple data records from a producer into an
+ Amazon Kinesis stream in a single call (also referred to as a
+ `PutRecords` request). Use this operation to send data from a
+ data producer into the Amazon Kinesis stream for real-time
+ ingestion and processing. Each shard can support up to 1000
+ records written per second, up to a maximum total of 1 MB data
+ written per second.
+
+ You must specify the name of the stream that captures, stores,
+ and transports the data; and an array of request `Records`,
+ with each record in the array requiring a partition key and
+ data blob.
+
+ The data blob can be any type of data; for example, a segment
+ from a log file, geographic/location data, website clickstream
+ data, and so on.
+
+ The partition key is used by Amazon Kinesis as input to a hash
+ function that maps the partition key and associated data to a
+ specific shard. An MD5 hash function is used to map partition
+ keys to 128-bit integer values and to map associated data
+ records to shards. As a result of this hashing mechanism, all
+ data records with the same partition key map to the same shard
+ within the stream. For more information, see `Partition Key`_
+ in the Amazon Kinesis Developer Guide .
+
+ Each record in the `Records` array may include an optional
+ parameter, `ExplicitHashKey`, which overrides the partition
+ key to shard mapping. This parameter allows a data producer to
+ determine explicitly the shard where the record is stored. For
+ more information, see `Adding Multiple Records with
+ PutRecords`_ in the Amazon Kinesis Developer Guide .
+
+ The `PutRecords` response includes an array of response
+ `Records`. Each record in the response array directly
+ correlates with a record in the request array using natural
+ ordering, from the top to the bottom of the request and
+ response. The response `Records` array always includes the
+ same number of records as the request array.
+
+ The response `Records` array includes both successfully and
+ unsuccessfully processed records. Amazon Kinesis attempts to
+ process all records in each `PutRecords` request. A single
+ record failure does not stop the processing of subsequent
+ records.
+
+ A successfully-processed record includes `ShardId` and
+ `SequenceNumber` values. The `ShardId` parameter identifies
+ the shard in the stream where the record is stored. The
+ `SequenceNumber` parameter is an identifier assigned to the
+ put record, unique to all records in the stream.
+
+ An unsuccessfully-processed record includes `ErrorCode` and
+ `ErrorMessage` values. `ErrorCode` reflects the type of error
+ and can be one of the following values:
+ `ProvisionedThroughputExceededException` or `InternalFailure`.
+ `ErrorMessage` provides more detailed information about the
+ `ProvisionedThroughputExceededException` exception including
+ the account ID, stream name, and shard ID of the record that
+ was throttled.
+
+ Data records are accessible for only 24 hours from the time
+ that they are added to an Amazon Kinesis stream.
+
+ :type records: list
+ :param records: The records associated with the request.
+
+ :type stream_name: string
+ :param stream_name: The stream name associated with the request.
+
+ :type b64_encode: boolean
+ :param b64_encode: Whether to Base64 encode `data`. Can be set to
+ ``False`` if `data` is already encoded to prevent double encoding.
+
+ """
+ params = {'Records': records, 'StreamName': stream_name, }
+ if b64_encode:
+ for i in range(len(params['Records'])):
+ data = params['Records'][i]['Data']
+ if not isinstance(data, six.binary_type):
+ data = data.encode('utf-8')
+ params['Records'][i]['Data'] = base64.b64encode(
+ data).decode('utf-8')
+ return self.make_request(action='PutRecords',
+ body=json.dumps(params))
+
+ def remove_tags_from_stream(self, stream_name, tag_keys):
+ """
+ Deletes tags from the specified Amazon Kinesis stream.
+
+ If you specify a tag that does not exist, it is ignored.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream.
+
+ :type tag_keys: list
+ :param tag_keys: A list of tag keys. Each corresponding tag is removed
+ from the stream.
+
+ """
+ params = {'StreamName': stream_name, 'TagKeys': tag_keys, }
+ return self.make_request(action='RemoveTagsFromStream',
+ body=json.dumps(params))
+
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
- This operation splits a shard into two new shards in the
- stream, to increase the stream's capacity to ingest and
- transport data. `SplitShard` is called when there is a need to
- increase the overall capacity of stream because of an expected
- increase in the volume of data records being ingested.
+ Splits a shard into two new shards in the stream, to increase
+ the stream's capacity to ingest and transport data.
+ `SplitShard` is called when there is a need to increase the
+ overall capacity of stream because of an expected increase in
+ the volume of data records being ingested.
- `SplitShard` can also be used when a given shard appears to be
+ You can also use `SplitShard` when a shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
- the `SplitShard` operation to increase stream capacity, so
- that more Amazon Kinesis applications can simultaneously read
- data from the stream for real-time processing.
-
- The `SplitShard` operation requires that you specify the shard
- to be split and the new hash key, which is the position in the
- shard where the shard gets split in two. In many cases, the
- new hash key might simply be the average of the beginning and
- ending hash key, but it can be any hash key value in the range
- being mapped into the shard. For more information about
- splitting shards, see the `Amazon Kinesis Developer Guide`_.
-
- You can use the DescribeStream operation to determine the
- shard ID and hash key values for the `ShardToSplit` and
- `NewStartingHashKey` parameters that are specified in the
- `SplitShard` request.
+ `SplitShard` to increase stream capacity, so that more Amazon
+ Kinesis applications can simultaneously read data from the
+ stream for real-time processing.
+
+ You must specify the shard to be split and the new hash key,
+ which is the position in the shard where the shard gets split
+ in two. In many cases, the new hash key might simply be the
+ average of the beginning and ending hash key, but it can be
+ any hash key value in the range being mapped into the shard.
+ For more information about splitting shards, see `Split a
+ Shard`_ in the Amazon Kinesis Developer Guide .
+
+ You can use DescribeStream to determine the shard ID and hash
+ key values for the `ShardToSplit` and `NewStartingHashKey`
+ parameters that are specified in the `SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
- response and sets the stream status to UPDATING. After the
+ response and sets the stream status to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the stream status
- to ACTIVE. Read and write operations continue to work while
- the stream is in the UPDATING state.
+ to `ACTIVE`. Read and write operations continue to work while
+ the stream is in the `UPDATING` state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
- in the ACTIVE state, you can call `SplitShard`. If a stream is
- in CREATING or UPDATING or DELETING states, then Amazon
- Kinesis returns a `ResourceInUseException`.
-
- If the specified stream does not exist, Amazon Kinesis returns
- a `ResourceNotFoundException`. If you try to create more
- shards than are authorized for your account, you receive a
- `LimitExceededException`.
-
- **Note:** The default limit for an AWS account is two shards
- per stream. If you need to create a stream with more than two
- shards, contact AWS Support to increase the limit on your
- account.
+ in the `ACTIVE` state, you can call `SplitShard`. If a stream
+ is in `CREATING` or `UPDATING` or `DELETING` states,
+ `DescribeStream` returns a `ResourceInUseException`.
+
+ If the specified stream does not exist, `DescribeStream`
+ returns a `ResourceNotFoundException`. If you try to create
+ more shards than are authorized for your account, you receive
+ a `LimitExceededException`.
+
+ The default limit for an AWS account is 10 shards per stream.
+ If you need to create a stream with more than 10 shards,
+ `contact AWS Support`_ to increase the limit on your account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
- will receive a `LimitExceededException`.
+ receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
diff --git a/boto/kms/__init__.py b/boto/kms/__init__.py
new file mode 100644
index 00000000..f6ded152
--- /dev/null
+++ b/boto/kms/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo, get_regions
+
+
+def regions():
+ """
+ Get all available regions for the AWS Key Management Service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.kms.layer1 import KMSConnection
+ return get_regions('kms', connection_cls=KMSConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/kms/exceptions.py b/boto/kms/exceptions.py
new file mode 100644
index 00000000..8b422560
--- /dev/null
+++ b/boto/kms/exceptions.py
@@ -0,0 +1,72 @@
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import BotoServerError
+
+
+class InvalidGrantTokenException(BotoServerError):
+ pass
+
+
+class DisabledException(BotoServerError):
+ pass
+
+
+class LimitExceededException(BotoServerError):
+ pass
+
+
+class DependencyTimeoutException(BotoServerError):
+ pass
+
+
+class InvalidMarkerException(BotoServerError):
+ pass
+
+
+class AlreadyExistsException(BotoServerError):
+ pass
+
+
+class InvalidCiphertextException(BotoServerError):
+ pass
+
+
+class KeyUnavailableException(BotoServerError):
+ pass
+
+
+class InvalidAliasNameException(BotoServerError):
+ pass
+
+
+class UnsupportedOperationException(BotoServerError):
+ pass
+
+
+class InvalidArnException(BotoServerError):
+ pass
+
+
+class KMSInternalException(BotoServerError):
+ pass
+
+
+class InvalidKeyUsageException(BotoServerError):
+ pass
+
+
+class MalformedPolicyDocumentException(BotoServerError):
+ pass
+
+
+class NotFoundException(BotoServerError):
+ pass
diff --git a/boto/kms/layer1.py b/boto/kms/layer1.py
new file mode 100644
index 00000000..f44cd048
--- /dev/null
+++ b/boto/kms/layer1.py
@@ -0,0 +1,821 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.compat import json
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.kms import exceptions
+from boto.compat import six
+import base64
+
+
+class KMSConnection(AWSQueryConnection):
+ """
+ AWS Key Management Service
+ AWS Key Management Service (KMS) is an encryption and key
+ management web service. This guide describes the KMS actions that
+ you can call programmatically. For general information about KMS,
+ see (need an address here). For the KMS developer guide, see (need
+ address here).
+
+ AWS provides SDKs that consist of libraries and sample code for
+ various programming languages and platforms (Java, Ruby, .Net,
+ iOS, Android, etc.). The SDKs provide a convenient way to create
+ programmatic access to KMS and AWS. For example, the SDKs take
+ care of tasks such as signing requests (see below), managing
+ errors, and retrying requests automatically. For more information
+ about the AWS SDKs, including how to download and install them,
+ see `Tools for Amazon Web Services`_.
+
+ We recommend that you use the AWS SDKs to make programmatic API
+ calls to KMS. However, you can also use the KMS Query API to make
+ to make direct calls to the KMS web service.
+
+ **Signing Requests**
+
+ Requests must be signed by using an access key ID and a secret
+ access key. We strongly recommend that you do not use your AWS
+ account access key ID and secret key for everyday work with KMS.
+ Instead, use the access key ID and secret access key for an IAM
+ user, or you can use the AWS Security Token Service to generate
+ temporary security credentials that you can use to sign requests.
+
+ All KMS operations require `Signature Version 4`_.
+
+ **Recording API Requests**
+
+ KMS supports AWS CloudTrail, a service that records AWS API calls
+ and related events for your AWS account and delivers them to an
+ Amazon S3 bucket that you specify. By using the information
+ collected by CloudTrail, you can determine what requests were made
+ to KMS, who made the request, when it was made, and so on. To
+ learn more about CloudTrail, including how to turn it on and find
+ your log files, see the `AWS CloudTrail User Guide`_
+
+ **Additional Resources**
+
+ For more information about credentials and request signing, see
+ the following:
+
+
+ + `AWS Security Credentials`_. This topic provides general
+ information about the types of credentials used for accessing AWS.
+ + `AWS Security Token Service`_. This guide describes how to
+ create and use temporary security credentials.
+ + `Signing AWS API Requests`_. This set of topics walks you
+ through the process of signing a request using an access key ID
+ and a secret access key.
+ """
+ APIVersion = "2014-11-01"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "kms.us-east-1.amazonaws.com"
+ ServiceName = "KMS"
+ TargetPrefix = "TrentService"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "InvalidGrantTokenException": exceptions.InvalidGrantTokenException,
+ "DisabledException": exceptions.DisabledException,
+ "LimitExceededException": exceptions.LimitExceededException,
+ "DependencyTimeoutException": exceptions.DependencyTimeoutException,
+ "InvalidMarkerException": exceptions.InvalidMarkerException,
+ "AlreadyExistsException": exceptions.AlreadyExistsException,
+ "InvalidCiphertextException": exceptions.InvalidCiphertextException,
+ "KeyUnavailableException": exceptions.KeyUnavailableException,
+ "InvalidAliasNameException": exceptions.InvalidAliasNameException,
+ "UnsupportedOperationException": exceptions.UnsupportedOperationException,
+ "InvalidArnException": exceptions.InvalidArnException,
+ "KMSInternalException": exceptions.KMSInternalException,
+ "InvalidKeyUsageException": exceptions.InvalidKeyUsageException,
+ "MalformedPolicyDocumentException": exceptions.MalformedPolicyDocumentException,
+ "NotFoundException": exceptions.NotFoundException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs or kwargs['host'] is None:
+ kwargs['host'] = region.endpoint
+
+ super(KMSConnection, self).__init__(**kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def create_alias(self, alias_name, target_key_id):
+ """
+ Creates a display name for a customer master key. An alias can
+ be used to identify a key and should be unique. The console
+ enforces a one-to-one mapping between the alias and a key. An
+ alias name can contain only alphanumeric characters, forward
+ slashes (/), underscores (_), and dashes (-). An alias must
+ start with the word "alias" followed by a forward slash
+ (alias/). An alias that begins with "aws" after the forward
+ slash (alias/aws...) is reserved by Amazon Web Services (AWS).
+
+ :type alias_name: string
+ :param alias_name: String that contains the display name. Aliases that
+ begin with AWS are reserved.
+
+ :type target_key_id: string
+ :param target_key_id: An identifier of the key for which you are
+ creating the alias. This value cannot be another alias.
+
+ """
+ params = {
+ 'AliasName': alias_name,
+ 'TargetKeyId': target_key_id,
+ }
+ return self.make_request(action='CreateAlias',
+ body=json.dumps(params))
+
+ def create_grant(self, key_id, grantee_principal,
+ retiring_principal=None, operations=None,
+ constraints=None, grant_tokens=None):
+ """
+ Adds a grant to a key to specify who can access the key and
+ under what conditions. Grants are alternate permission
+ mechanisms to key policies. If absent, access to the key is
+ evaluated based on IAM policies attached to the user. By
+ default, grants do not expire. Grants can be listed, retired,
+ or revoked as indicated by the following APIs. Typically, when
+ you are finished using a grant, you retire it. When you want
+ to end a grant immediately, revoke it. For more information
+ about grants, see `Grants`_.
+
+ #. ListGrants
+ #. RetireGrant
+ #. RevokeGrant
+
+ :type key_id: string
+ :param key_id: A unique key identifier for a customer master key. This
+ value can be a globally unique identifier, an ARN, or an alias.
+
+ :type grantee_principal: string
+ :param grantee_principal: Principal given permission by the grant to
+ use the key identified by the `keyId` parameter.
+
+ :type retiring_principal: string
+ :param retiring_principal: Principal given permission to retire the
+ grant. For more information, see RetireGrant.
+
+ :type operations: list
+ :param operations: List of operations permitted by the grant. This can
+ be any combination of one or more of the following values:
+
+ #. Decrypt
+ #. Encrypt
+ #. GenerateDataKey
+ #. GenerateDataKeyWithoutPlaintext
+ #. ReEncryptFrom
+ #. ReEncryptTo
+ #. CreateGrant
+
+ :type constraints: dict
+ :param constraints: Specifies the conditions under which the actions
+ specified by the `Operations` parameter are allowed.
+
+ :type grant_tokens: list
+ :param grant_tokens: List of grant tokens.
+
+ """
+ params = {
+ 'KeyId': key_id,
+ 'GranteePrincipal': grantee_principal,
+ }
+ if retiring_principal is not None:
+ params['RetiringPrincipal'] = retiring_principal
+ if operations is not None:
+ params['Operations'] = operations
+ if constraints is not None:
+ params['Constraints'] = constraints
+ if grant_tokens is not None:
+ params['GrantTokens'] = grant_tokens
+ return self.make_request(action='CreateGrant',
+ body=json.dumps(params))
+
+ def create_key(self, policy=None, description=None, key_usage=None):
+ """
+ Creates a customer master key. Customer master keys can be
+ used to encrypt small amounts of data (less than 4K) directly,
+ but they are most commonly used to encrypt or envelope data
+ keys that are then used to encrypt customer data. For more
+ information about data keys, see GenerateDataKey and
+ GenerateDataKeyWithoutPlaintext.
+
+ :type policy: string
+ :param policy: Policy to be attached to the key. This is required and
+ delegates back to the account. The key is the root of trust.
+
+ :type description: string
+ :param description: Description of the key. We recommend that you
+ choose a description that helps your customer decide whether the
+ key is appropriate for a task.
+
+ :type key_usage: string
+ :param key_usage: Specifies the intended use of the key. Currently this
+ defaults to ENCRYPT/DECRYPT, and only symmetric encryption and
+ decryption are supported.
+
+ """
+ params = {}
+ if policy is not None:
+ params['Policy'] = policy
+ if description is not None:
+ params['Description'] = description
+ if key_usage is not None:
+ params['KeyUsage'] = key_usage
+ return self.make_request(action='CreateKey',
+ body=json.dumps(params))
+
+ def decrypt(self, ciphertext_blob, encryption_context=None,
+ grant_tokens=None):
+ """
+ Decrypts ciphertext. Ciphertext is plaintext that has been
+ previously encrypted by using the Encrypt function.
+
+ :type ciphertext_blob: blob
+ :param ciphertext_blob: Ciphertext including metadata.
+
+ :type encryption_context: map
+ :param encryption_context: The encryption context. If this was
+ specified in the Encrypt function, it must be specified here or the
+ decryption operation will fail. For more information, see
+ `Encryption Context`_.
+
+ :type grant_tokens: list
+ :param grant_tokens: A list of grant tokens that represent grants which
+ can be used to provide long term permissions to perform decryption.
+
+ """
+ if not isinstance(ciphertext_blob, six.binary_type):
+ raise TypeError(
+ "Value of argument ``ciphertext_blob`` "
+ "must be of type %s." % six.binary_type)
+ ciphertext_blob = base64.b64encode(ciphertext_blob)
+ params = {'CiphertextBlob': ciphertext_blob, }
+ if encryption_context is not None:
+ params['EncryptionContext'] = encryption_context
+ if grant_tokens is not None:
+ params['GrantTokens'] = grant_tokens
+ response = self.make_request(action='Decrypt',
+ body=json.dumps(params))
+ if response.get('Plaintext') is not None:
+ response['Plaintext'] = base64.b64decode(
+ response['Plaintext'].encode('utf-8'))
+ return response
+
+ def delete_alias(self, alias_name):
+ """
+ Deletes the specified alias.
+
+ :type alias_name: string
+ :param alias_name: The alias to be deleted.
+
+ """
+ params = {'AliasName': alias_name, }
+ return self.make_request(action='DeleteAlias',
+ body=json.dumps(params))
+
+ def describe_key(self, key_id):
+ """
+ Provides detailed information about the specified customer
+ master key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the customer master key to be
+ described. This can be an ARN, an alias, or a globally unique
+ identifier.
+
+ """
+ params = {'KeyId': key_id, }
+ return self.make_request(action='DescribeKey',
+ body=json.dumps(params))
+
+ def disable_key(self, key_id):
+ """
+ Marks a key as disabled, thereby preventing its use.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the customer master key to be
+ disabled. This can be an ARN, an alias, or a globally unique
+ identifier.
+
+ """
+ params = {'KeyId': key_id, }
+ return self.make_request(action='DisableKey',
+ body=json.dumps(params))
+
+ def disable_key_rotation(self, key_id):
+ """
+ Disables rotation of the specified key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the customer master key for which
+ rotation is to be disabled. This can be an ARN, an alias, or a
+ globally unique identifier.
+
+ """
+ params = {'KeyId': key_id, }
+ return self.make_request(action='DisableKeyRotation',
+ body=json.dumps(params))
+
+ def enable_key(self, key_id):
+ """
+ Marks a key as enabled, thereby permitting its use. You can
+ have up to 25 enabled keys at one time.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the customer master key to be
+ enabled. This can be an ARN, an alias, or a globally unique
+ identifier.
+
+ """
+ params = {'KeyId': key_id, }
+ return self.make_request(action='EnableKey',
+ body=json.dumps(params))
+
+ def enable_key_rotation(self, key_id):
+ """
+ Enables rotation of the specified customer master key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the customer master key for which
+ rotation is to be enabled. This can be an ARN, an alias, or a
+ globally unique identifier.
+
+ """
+ params = {'KeyId': key_id, }
+ return self.make_request(action='EnableKeyRotation',
+ body=json.dumps(params))
+
+ def encrypt(self, key_id, plaintext, encryption_context=None,
+ grant_tokens=None):
+ """
+ Encrypts plaintext into ciphertext by using a customer master
+ key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the customer master. This can be an
+ ARN, an alias, or the Key ID.
+
+ :type plaintext: blob
+ :param plaintext: Data to be encrypted.
+
+ :type encryption_context: map
+ :param encryption_context: Name:value pair that specifies the
+ encryption context to be used for authenticated encryption. For
+ more information, see `Authenticated Encryption`_.
+
+ :type grant_tokens: list
+ :param grant_tokens: A list of grant tokens that represent grants which
+ can be used to provide long term permissions to perform encryption.
+
+ """
+ if not isinstance(plaintext, six.binary_type):
+ raise TypeError(
+ "Value of argument ``plaintext`` "
+ "must be of type %s." % six.binary_type)
+ plaintext = base64.b64encode(plaintext)
+ params = {'KeyId': key_id, 'Plaintext': plaintext, }
+ if encryption_context is not None:
+ params['EncryptionContext'] = encryption_context
+ if grant_tokens is not None:
+ params['GrantTokens'] = grant_tokens
+ response = self.make_request(action='Encrypt',
+ body=json.dumps(params))
+ if response.get('CiphertextBlob') is not None:
+ response['CiphertextBlob'] = base64.b64decode(
+ response['CiphertextBlob'].encode('utf-8'))
+ return response
+
+ def generate_data_key(self, key_id, encryption_context=None,
+ number_of_bytes=None, key_spec=None,
+ grant_tokens=None):
+ """
+ Generates a secure data key. Data keys are used to encrypt and
+ decrypt data. They are wrapped by customer master keys.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the key. This can be an ARN, an
+ alias, or a globally unique identifier.
+
+ :type encryption_context: map
+ :param encryption_context: Name/value pair that contains additional
+ data to be authenticated during the encryption and decryption
+ processes that use the key. This value is logged by AWS CloudTrail
+ to provide context around the data encrypted by the key.
+
+ :type number_of_bytes: integer
+ :param number_of_bytes: Integer that contains the number of bytes to
+ generate. Common values are 128, 256, 512, 1024 and so on. 1024 is
+ the current limit.
+
+ :type key_spec: string
+ :param key_spec: Value that identifies the encryption algorithm and key
+ size to generate a data key for. Currently this can be AES_128 or
+ AES_256.
+
+ :type grant_tokens: list
+ :param grant_tokens: A list of grant tokens that represent grants which
+ can be used to provide long term permissions to generate a key.
+
+ """
+ params = {'KeyId': key_id, }
+ if encryption_context is not None:
+ params['EncryptionContext'] = encryption_context
+ if number_of_bytes is not None:
+ params['NumberOfBytes'] = number_of_bytes
+ if key_spec is not None:
+ params['KeySpec'] = key_spec
+ if grant_tokens is not None:
+ params['GrantTokens'] = grant_tokens
+ response = self.make_request(action='GenerateDataKey',
+ body=json.dumps(params))
+ if response.get('CiphertextBlob') is not None:
+ response['CiphertextBlob'] = base64.b64decode(
+ response['CiphertextBlob'].encode('utf-8'))
+ if response.get('Plaintext') is not None:
+ response['Plaintext'] = base64.b64decode(
+ response['Plaintext'].encode('utf-8'))
+ return response
+
+ def generate_data_key_without_plaintext(self, key_id,
+ encryption_context=None,
+ key_spec=None,
+ number_of_bytes=None,
+ grant_tokens=None):
+ """
+ Returns a key wrapped by a customer master key without the
+ plaintext copy of that key. To retrieve the plaintext, see
+ GenerateDataKey.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the key. This can be an ARN, an
+ alias, or a globally unique identifier.
+
+ :type encryption_context: map
+ :param encryption_context: Name:value pair that contains additional
+ data to be authenticated during the encryption and decryption
+ processes.
+
+ :type key_spec: string
+ :param key_spec: Value that identifies the encryption algorithm and key
+ size. Currently this can be AES_128 or AES_256.
+
+ :type number_of_bytes: integer
+ :param number_of_bytes: Integer that contains the number of bytes to
+ generate. Common values are 128, 256, 512, 1024 and so on.
+
+ :type grant_tokens: list
+ :param grant_tokens: A list of grant tokens that represent grants which
+ can be used to provide long term permissions to generate a key.
+
+ """
+ params = {'KeyId': key_id, }
+ if encryption_context is not None:
+ params['EncryptionContext'] = encryption_context
+ if key_spec is not None:
+ params['KeySpec'] = key_spec
+ if number_of_bytes is not None:
+ params['NumberOfBytes'] = number_of_bytes
+ if grant_tokens is not None:
+ params['GrantTokens'] = grant_tokens
+ response = self.make_request(action='GenerateDataKeyWithoutPlaintext',
+ body=json.dumps(params))
+ if response.get('CiphertextBlob') is not None:
+ response['CiphertextBlob'] = base64.b64decode(
+ response['CiphertextBlob'].encode('utf-8'))
+ return response
+
+ def generate_random(self, number_of_bytes=None):
+ """
+ Generates an unpredictable byte string.
+
+ :type number_of_bytes: integer
+ :param number_of_bytes: Integer that contains the number of bytes to
+ generate. Common values are 128, 256, 512, 1024 and so on. The
+ current limit is 1024 bytes.
+
+ """
+ params = {}
+ if number_of_bytes is not None:
+ params['NumberOfBytes'] = number_of_bytes
+ response = self.make_request(action='GenerateRandom',
+ body=json.dumps(params))
+ if response.get('Plaintext') is not None:
+ response['Plaintext'] = base64.b64decode(
+ response['Plaintext'].encode('utf-8'))
+ return response
+
+ def get_key_policy(self, key_id, policy_name):
+ """
+ Retrieves a policy attached to the specified key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the key. This can be an ARN, an
+ alias, or a globally unique identifier.
+
+ :type policy_name: string
+ :param policy_name: String that contains the name of the policy.
+ Currently, this must be "default". Policy names can be discovered
+ by calling ListKeyPolicies.
+
+ """
+ params = {'KeyId': key_id, 'PolicyName': policy_name, }
+ return self.make_request(action='GetKeyPolicy',
+ body=json.dumps(params))
+
+ def get_key_rotation_status(self, key_id):
+ """
+ Retrieves a Boolean value that indicates whether key rotation
+ is enabled for the specified key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the key. This can be an ARN, an
+ alias, or a globally unique identifier.
+
+ """
+ params = {'KeyId': key_id, }
+ return self.make_request(action='GetKeyRotationStatus',
+ body=json.dumps(params))
+
+ def list_aliases(self, limit=None, marker=None):
+ """
+ Lists all of the key aliases in the account.
+
+ :type limit: integer
+ :param limit: Specify this parameter when paginating results to
+ indicate the maximum number of aliases you want in each response.
+ If there are additional aliases beyond the maximum you specify, the
+ `Truncated` response element will be set to `true.`
+
+ :type marker: string
+ :param marker: Use this parameter when paginating results, and only in
+ a subsequent request after you've received a response where the
+ results are truncated. Set it to the value of the `NextMarker`
+ element in the response you just received.
+
+ """
+ params = {}
+ if limit is not None:
+ params['Limit'] = limit
+ if marker is not None:
+ params['Marker'] = marker
+ return self.make_request(action='ListAliases',
+ body=json.dumps(params))
+
+ def list_grants(self, key_id, limit=None, marker=None):
+ """
+ List the grants for a specified key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the key. This can be an ARN, an
+ alias, or a globally unique identifier.
+
+ :type limit: integer
+ :param limit: Specify this parameter only when paginating results to
+ indicate the maximum number of grants you want listed in the
+ response. If there are additional grants beyond the maximum you
+ specify, the `Truncated` response element will be set to `true.`
+
+ :type marker: string
+ :param marker: Use this parameter only when paginating results, and
+ only in a subsequent request after you've received a response where
+ the results are truncated. Set it to the value of the `NextMarker`
+ in the response you just received.
+
+ """
+ params = {'KeyId': key_id, }
+ if limit is not None:
+ params['Limit'] = limit
+ if marker is not None:
+ params['Marker'] = marker
+ return self.make_request(action='ListGrants',
+ body=json.dumps(params))
+
+ def list_key_policies(self, key_id, limit=None, marker=None):
+ """
+ Retrieves a list of policies attached to a key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the key. This can be an ARN, an
+ alias, or a globally unique identifier.
+
+ :type limit: integer
+ :param limit: Specify this parameter only when paginating results to
+ indicate the maximum number of policies you want listed in the
+ response. If there are additional policies beyond the maximum you
+ specify, the `Truncated` response element will be set to `true.`
+
+ :type marker: string
+ :param marker: Use this parameter only when paginating results, and
+ only in a subsequent request after you've received a response where
+ the results are truncated. Set it to the value of the `NextMarker`
+ in the response you just received.
+
+ """
+ params = {'KeyId': key_id, }
+ if limit is not None:
+ params['Limit'] = limit
+ if marker is not None:
+ params['Marker'] = marker
+ return self.make_request(action='ListKeyPolicies',
+ body=json.dumps(params))
+
+ def list_keys(self, limit=None, marker=None):
+ """
+ Lists the customer master keys.
+
+ :type limit: integer
+ :param limit: Specify this parameter only when paginating results to
+ indicate the maximum number of keys you want listed in the
+ response. If there are additional keys beyond the maximum you
+ specify, the `Truncated` response element will be set to `true.`
+
+ :type marker: string
+ :param marker: Use this parameter only when paginating results, and
+ only in a subsequent request after you've received a response where
+ the results are truncated. Set it to the value of the `NextMarker`
+ in the response you just received.
+
+ """
+ params = {}
+ if limit is not None:
+ params['Limit'] = limit
+ if marker is not None:
+ params['Marker'] = marker
+ return self.make_request(action='ListKeys',
+ body=json.dumps(params))
+
+ def put_key_policy(self, key_id, policy_name, policy):
+ """
+ Attaches a policy to the specified key.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the key. This can be an ARN, an
+ alias, or a globally unique identifier.
+
+ :type policy_name: string
+ :param policy_name: Name of the policy to be attached. Currently, the
+ only supported name is "default".
+
+ :type policy: string
+ :param policy: The policy, in JSON format, to be attached to the key.
+
+ """
+ params = {
+ 'KeyId': key_id,
+ 'PolicyName': policy_name,
+ 'Policy': policy,
+ }
+ return self.make_request(action='PutKeyPolicy',
+ body=json.dumps(params))
+
+ def re_encrypt(self, ciphertext_blob, destination_key_id,
+ source_encryption_context=None,
+ destination_encryption_context=None, grant_tokens=None):
+ """
+ Encrypts data on the server side with a new customer master
+ key without exposing the plaintext of the data on the client
+ side. The data is first decrypted and then encrypted. This
+ operation can also be used to change the encryption context of
+ a ciphertext.
+
+ :type ciphertext_blob: blob
+ :param ciphertext_blob: Ciphertext of the data to re-encrypt.
+
+ :type source_encryption_context: map
+ :param source_encryption_context: Encryption context used to encrypt
+ and decrypt the data specified in the `CiphertextBlob` parameter.
+
+ :type destination_key_id: string
+ :param destination_key_id: Key identifier of the key used to re-encrypt
+ the data.
+
+ :type destination_encryption_context: map
+ :param destination_encryption_context: Encryption context to be used
+ when the data is re-encrypted.
+
+ :type grant_tokens: list
+ :param grant_tokens: Grant tokens that identify the grants that have
+ permissions for the encryption and decryption process.
+
+ """
+ if not isinstance(ciphertext_blob, six.binary_type):
+ raise TypeError(
+ "Value of argument ``ciphertext_blob`` "
+ "must be of type %s." % six.binary_type)
+ ciphertext_blob = base64.b64encode(ciphertext_blob)
+ params = {
+ 'CiphertextBlob': ciphertext_blob,
+ 'DestinationKeyId': destination_key_id,
+ }
+ if source_encryption_context is not None:
+ params['SourceEncryptionContext'] = source_encryption_context
+ if destination_encryption_context is not None:
+ params['DestinationEncryptionContext'] = destination_encryption_context
+ if grant_tokens is not None:
+ params['GrantTokens'] = grant_tokens
+ response = self.make_request(action='ReEncrypt',
+ body=json.dumps(params))
+ if response.get('CiphertextBlob') is not None:
+ response['CiphertextBlob'] = base64.b64decode(
+ response['CiphertextBlob'].encode('utf-8'))
+ return response
+
+ def retire_grant(self, grant_token):
+ """
+ Retires a grant. You can retire a grant when you're done using
+ it to clean up. You should revoke a grant when you intend to
+ actively deny operations that depend on it.
+
+ :type grant_token: string
+ :param grant_token: Token that identifies the grant to be retired.
+
+ """
+ params = {'GrantToken': grant_token, }
+ return self.make_request(action='RetireGrant',
+ body=json.dumps(params))
+
+ def revoke_grant(self, key_id, grant_id):
+ """
+ Revokes a grant. You can revoke a grant to actively deny
+ operations that depend on it.
+
+ :type key_id: string
+ :param key_id: Unique identifier of the key associated with the grant.
+
+ :type grant_id: string
+ :param grant_id: Identifier of the grant to be revoked.
+
+ """
+ params = {'KeyId': key_id, 'GrantId': grant_id, }
+ return self.make_request(action='RevokeGrant',
+ body=json.dumps(params))
+
+ def update_key_description(self, key_id, description):
+ """
+
+
+ :type key_id: string
+ :param key_id:
+
+ :type description: string
+ :param description:
+
+ """
+ params = {'KeyId': key_id, 'Description': description, }
+ return self.make_request(action='UpdateKeyDescription',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read().decode('utf-8')
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/opsworks/layer1.py b/boto/opsworks/layer1.py
index 6610f32a..8894d1c3 100644
--- a/boto/opsworks/layer1.py
+++ b/boto/opsworks/layer1.py
@@ -65,12 +65,18 @@ class OpsWorksConnection(AWSQueryConnection):
endpoint. You can then use the API to direct AWS OpsWorks to
create stacks in any AWS Region.
- **Chef Version**
+ **Chef Versions**
When you call CreateStack, CloneStack, or UpdateStack we recommend
you use the `ConfigurationManager` parameter to specify the Chef
- version, 0.9, 11.4, or 11.10. The default value is currently 11.4.
- For more information, see `Chef Versions`_.
+ version, 0.9, 11.4, or 11.10. The default value is currently
+ 11.10. For more information, see `Chef Versions`_.
+
+ You can still specify Chef 0.9 for your stack, but new features
+ are not available for Chef 0.9 stacks, and support is scheduled to
+ end on July 24, 2014. We do not recommend using Chef 0.9 for new
+ stacks, and we recommend migrating your existing Chef 0.9 stacks
+ to Chef 11.10 as soon as possible.
"""
APIVersion = "2013-02-18"
DefaultRegionName = "us-east-1"
@@ -100,6 +106,33 @@ class OpsWorksConnection(AWSQueryConnection):
def _required_auth_capability(self):
return ['hmac-v4']
+ def assign_instance(self, instance_id, layer_ids):
+ """
+ Assign a registered instance to a custom layer. You cannot use
+ this action with instances that were created with AWS
+ OpsWorks.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type layer_ids: list
+ :param layer_ids: The layer ID, which must correspond to a custom
+ layer. You cannot assign a registered instance to a built-in layer.
+
+ """
+ params = {
+ 'InstanceId': instance_id,
+ 'LayerIds': layer_ids,
+ }
+ return self.make_request(action='AssignInstance',
+ body=json.dumps(params))
+
def assign_volume(self, volume_id, instance_id=None):
"""
Assigns one of the stack's registered Amazon EBS volumes to a
@@ -159,6 +192,13 @@ class OpsWorksConnection(AWSQueryConnection):
specified layer. For more information, see `Elastic Load
Balancing`_.
+
+ You must create the Elastic Load Balancing instance
+ separately, by using the Elastic Load Balancing console, API,
+ or CLI. For more information, see ` Elastic Load Balancing
+ Developer Guide`_.
+
+
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
@@ -214,7 +254,7 @@ class OpsWorksConnection(AWSQueryConnection):
:type vpc_id: string
:param vpc_id: The ID of the VPC that the cloned stack is to be
launched into. It must be in the specified region. All instances
- will be launched into this VPC, and you cannot change the ID later.
+ are launched into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
@@ -246,14 +286,20 @@ class OpsWorksConnection(AWSQueryConnection):
pairs to be added to the cloned stack.
:type service_role_arn: string
- :param service_role_arn: The stack AWS Identity and Access Management
- (IAM) role, which allows AWS OpsWorks to work with AWS resources on
- your behalf. You must set this parameter to the Amazon Resource
- Name (ARN) for an existing IAM role. If you create a stack by using
- the AWS OpsWorks console, it creates the role for you. You can
- obtain an existing stack's IAM ARN programmatically by calling
- DescribePermissions. For more information about IAM ARNs, see
- `Using Identifiers`_.
+ :param service_role_arn:
+ The stack AWS Identity and Access Management (IAM) role, which allows
+ AWS OpsWorks to work with AWS resources on your behalf. You must
+ set this parameter to the Amazon Resource Name (ARN) for an
+ existing IAM role. If you create a stack by using the AWS OpsWorks
+ console, it creates the role for you. You can obtain an existing
+ stack's IAM ARN programmatically by calling DescribePermissions.
+ For more information about IAM ARNs, see `Using Identifiers`_.
+
+
+ You must set this parameter to a valid service role ARN or the action
+ will fail; there is no default value. You can specify the source
+ stack's service role ARN, if you prefer, but you must do so
+ explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
@@ -261,9 +307,16 @@ class OpsWorksConnection(AWSQueryConnection):
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
- :param default_os: The cloned stack's default operating system, which
- must be set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default
- option is `Amazon Linux`.
+ :param default_os: The stacks's operating system, which must be set to
+ one of the following.
+
+ + Standard operating systems: an Amazon Linux version such as `Amazon
+ Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ + Custom AMIs: `Custom`. You specify the custom AMI you want to use
+ when you create instances.
+
+
+ The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
@@ -296,12 +349,13 @@ class OpsWorksConnection(AWSQueryConnection):
For more information, see the `VpcId` parameter description.
:type default_subnet_id: string
- :param default_subnet_id: The stack's default subnet ID. All instances
- will be launched into this subnet unless you specify otherwise when
- you create the instance. If you also specify a value for
- `DefaultAvailabilityZone`, the subnet must be in the same zone. For
- information on default values and when this parameter is required,
- see the `VpcId` parameter description.
+ :param default_subnet_id: The stack's default VPC subnet ID. This
+ parameter is required if you specify a value for the `VpcId`
+ parameter. All instances are launched into this subnet unless you
+ specify otherwise when you create the instance. If you also specify
+ a value for `DefaultAvailabilityZone`, the subnet must be in that
+ zone. For information on default values and when this parameter is
+ required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
@@ -424,7 +478,7 @@ class OpsWorksConnection(AWSQueryConnection):
def create_app(self, stack_id, name, type, shortname=None,
description=None, data_sources=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
- attributes=None):
+ attributes=None, environment=None):
"""
Creates an app for a specified stack. For more information,
see `Creating Apps`_.
@@ -474,6 +528,17 @@ class OpsWorksConnection(AWSQueryConnection):
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
+ :type environment: list
+ :param environment:
+ An array of `EnvironmentVariable` objects that specify environment
+ variables to be associated with the app. You can specify up to ten
+ environment variables. After you deploy the app, these variables
+ are defined on the associated app server instance.
+
+ This parameter is supported only by Chef 11.10 stacks. If you have
+ specified one or more environment variables, you cannot modify the
+ stack's Chef version.
+
"""
params = {'StackId': stack_id, 'Name': name, 'Type': type, }
if shortname is not None:
@@ -492,24 +557,16 @@ class OpsWorksConnection(AWSQueryConnection):
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
+ if environment is not None:
+ params['Environment'] = environment
return self.make_request(action='CreateApp',
body=json.dumps(params))
def create_deployment(self, stack_id, command, app_id=None,
instance_ids=None, comment=None, custom_json=None):
"""
- Deploys a stack or app.
-
-
- + App deployment generates a `deploy` event, which runs the
- associated recipes and passes them a JSON stack configuration
- object that includes information about the app.
- + Stack deployment runs the `deploy` recipes but does not
- raise an event.
-
-
- For more information, see `Deploying Apps`_ and `Run Stack
- Commands`_.
+ Runs deployment or stack commands. For more information, see
+ `Deploying Apps`_ and `Run Stack Commands`_.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
@@ -588,43 +645,36 @@ class OpsWorksConnection(AWSQueryConnection):
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
- :param auto_scaling_type:
- The instance auto scaling type, which has three possible values:
-
-
- + **AlwaysRunning**: A 24/7 instance, which is not affected by auto
- scaling.
- + **TimeBasedAutoScaling**: A time-based auto scaling instance, which
- is started and stopped based on a specified schedule. To specify
- the schedule, call SetTimeBasedAutoScaling.
- + **LoadBasedAutoScaling**: A load-based auto scaling instance, which
- is started and stopped based on load metrics. To use load-based
- auto scaling, you must enable it for the instance layer and
- configure the thresholds by calling SetLoadBasedAutoScaling.
+ :param auto_scaling_type: For load-based or time-based instances, the
+ type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
- :param os: The instance operating system, which must be set to one of
+ :param os: The instance's operating system, which must be set to one of
the following.
- + Standard operating systems: `Amazon Linux` or `Ubuntu 12.04 LTS`
+ + Standard operating systems: an Amazon Linux version such as `Amazon
+ Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
- The default option is `Amazon Linux`. If you set this parameter to
- `Custom`, you must use the CreateInstance action's AmiId parameter
- to specify the custom AMI that you want to use. For more
- information on the standard operating systems, see `Operating
+ The default option is the current Amazon Linux version. If you set this
+ parameter to `Custom`, you must use the CreateInstance action's
+ AmiId parameter to specify the custom AMI that you want to use. For
+ more information on the standard operating systems, see `Operating
Systems`_For more information on how to use custom AMIs with
OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
- :param ami_id: A custom AMI ID to be used to create the instance. The
- AMI should be based on one of the standard AWS OpsWorks APIs:
- Amazon Linux or Ubuntu 12.04 LTS. For more information, see
- `Instances`_
+ :param ami_id:
+ A custom AMI ID to be used to create the instance. The AMI should be
+ based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
+ Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
+ `Instances`_.
+
+ If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
@@ -655,13 +705,17 @@ class OpsWorksConnection(AWSQueryConnection):
information, see `Storage for the Root Device`_.
:type install_updates_on_boot: boolean
- :param install_updates_on_boot: Whether to install operating system and
- package updates when the instance boots. The default value is
- `True`. To control when updates are installed, set this value to
- `False`. You must then update your instances manually by using
- CreateDeployment to run the `update_dependencies` stack command or
- manually running `yum` (Amazon Linux) or `apt-get` (Ubuntu) on the
- instances.
+ :param install_updates_on_boot:
+ Whether to install operating system and package updates when the
+ instance boots. The default value is `True`. To control when
+ updates are installed, set this value to `False`. You must then
+ update your instances manually by using CreateDeployment to run the
+ `update_dependencies` stack command or manually running `yum`
+ (Amazon Linux) or `apt-get` (Ubuntu) on the instances.
+
+
+ We strongly recommend using the default value of `True` to ensure that
+ your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether to create an Amazon EBS-optimized
@@ -707,11 +761,22 @@ class OpsWorksConnection(AWSQueryConnection):
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
- use_ebs_optimized_instances=None):
+ use_ebs_optimized_instances=None,
+ lifecycle_event_configuration=None):
"""
Creates a layer. For more information, see `How to Create a
Layer`_.
+
+ You should use **CreateLayer** for noncustom layer types such
+ as PHP App Server only if the stack does not have an existing
+ layer of that type. A stack can have at most one instance of
+ each noncustom layer; if you attempt to create a second
+ instance, **CreateLayer** fails. A stack can have an arbitrary
+ number of custom layers, so you can call **CreateLayer** as
+ many times as you like for that layer type.
+
+
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
@@ -722,22 +787,8 @@ class OpsWorksConnection(AWSQueryConnection):
:param stack_id: The layer stack ID.
:type type: string
- :param type:
- The layer type. A stack cannot have more than one built-in layer of the
- same type. It can have any number of custom layers. This parameter
- must be set to one of the following:
-
-
- + custom: A custom layer
- + db-master: A MySQL layer
- + java-app: A Java App Server layer
- + rails-app: A Rails App Server layer
- + lb: An HAProxy layer
- + memcached: A Memcached layer
- + monitoring-master: A Ganglia layer
- + nodejs-app: A Node.js App Server layer
- + php-app: A PHP App Server layer
- + web: A Static Web Server layer
+ :param type: The layer type. A stack cannot have more than one built-in
+ layer of the same type. It can have any number of custom layers.
:type name: string
:param name: The layer name, which is used by the console.
@@ -789,18 +840,28 @@ class OpsWorksConnection(AWSQueryConnection):
layer custom recipes.
:type install_updates_on_boot: boolean
- :param install_updates_on_boot: Whether to install operating system and
- package updates when the instance boots. The default value is
- `True`. To control when updates are installed, set this value to
- `False`. You must then update your instances manually by using
- CreateDeployment to run the `update_dependencies` stack command or
- manually running `yum` (Amazon Linux) or `apt-get` (Ubuntu) on the
- instances.
+ :param install_updates_on_boot:
+ Whether to install operating system and package updates when the
+ instance boots. The default value is `True`. To control when
+ updates are installed, set this value to `False`. You must then
+ update your instances manually by using CreateDeployment to run the
+ `update_dependencies` stack command or manually running `yum`
+ (Amazon Linux) or `apt-get` (Ubuntu) on the instances.
+
+
+ We strongly recommend using the default value of `True`, to ensure that
+ your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
+ :type lifecycle_event_configuration: dict
+ :param lifecycle_event_configuration: A LifeCycleEventConfiguration
+ object that you can use to configure the Shutdown event to specify
+ an execution timeout and enable or disable Elastic Load Balancer
+ connection draining.
+
"""
params = {
'StackId': stack_id,
@@ -830,6 +891,8 @@ class OpsWorksConnection(AWSQueryConnection):
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
+ if lifecycle_event_configuration is not None:
+ params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='CreateLayer',
body=json.dumps(params))
@@ -860,8 +923,8 @@ class OpsWorksConnection(AWSQueryConnection):
:type vpc_id: string
:param vpc_id: The ID of the VPC that the stack is to be launched into.
- It must be in the specified region. All instances will be launched
- into this VPC, and you cannot change the ID later.
+ It must be in the specified region. All instances are launched into
+ this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
@@ -905,9 +968,16 @@ class OpsWorksConnection(AWSQueryConnection):
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
- :param default_os: The stack's default operating system, which must be
- set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is
- `Amazon Linux`.
+ :param default_os: The stack's operating system, which must be set to
+ one of the following.
+
+ + Standard operating systems: an Amazon Linux version such as `Amazon
+ Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ + Custom AMIs: `Custom`. You specify the custom AMI you want to use
+ when you create instances.
+
+
+ The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
@@ -940,12 +1010,13 @@ class OpsWorksConnection(AWSQueryConnection):
information, see the `VpcId` parameter description.
:type default_subnet_id: string
- :param default_subnet_id: The stack's default subnet ID. All instances
- will be launched into this subnet unless you specify otherwise when
- you create the instance. If you also specify a value for
- `DefaultAvailabilityZone`, the subnet must be in that zone. For
- information on default values and when this parameter is required,
- see the `VpcId` parameter description.
+ :param default_subnet_id: The stack's default VPC subnet ID. This
+ parameter is required if you specify a value for the `VpcId`
+ parameter. All instances are launched into this subnet unless you
+ specify otherwise when you create the instance. If you also specify
+ a value for `DefaultAvailabilityZone`, the subnet must be in that
+ zone. For information on default values and when this parameter is
+ required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
@@ -1111,9 +1182,11 @@ class OpsWorksConnection(AWSQueryConnection):
def delete_instance(self, instance_id, delete_elastic_ip=None,
delete_volumes=None):
"""
- Deletes a specified instance. You must stop an instance before
- you can delete it. For more information, see `Deleting
- Instances`_.
+ Deletes a specified instance, which terminates the associated
+ Amazon EC2 instance. You must stop an instance before you can
+ delete it.
+
+ For more information, see `Deleting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
@@ -1144,8 +1217,8 @@ class OpsWorksConnection(AWSQueryConnection):
def delete_layer(self, layer_id):
"""
Deletes a specified layer. You must first stop and then delete
- all associated instances. For more information, see `How to
- Delete a Layer`_.
+ all associated instances or unassign registered instances. For
+ more information, see `How to Delete a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
@@ -1164,8 +1237,8 @@ class OpsWorksConnection(AWSQueryConnection):
def delete_stack(self, stack_id):
"""
Deletes a specified stack. You must first delete all
- instances, layers, and apps. For more information, see `Shut
- Down a Stack`_.
+ instances, layers, and apps or deregister registered
+ instances. For more information, see `Shut Down a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
@@ -1218,10 +1291,37 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DeregisterElasticIp',
body=json.dumps(params))
+ def deregister_instance(self, instance_id):
+ """
+ Deregister a registered Amazon EC2 or on-premises instance.
+ This action removes the instance from the stack and returns it
+ to your control. This action can not be used with instances
+ that were created with AWS OpsWorks.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ """
+ params = {'InstanceId': instance_id, }
+ return self.make_request(action='DeregisterInstance',
+ body=json.dumps(params))
+
def deregister_rds_db_instance(self, rds_db_instance_arn):
"""
Deregisters an Amazon RDS instance.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
@@ -1254,8 +1354,10 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Requests a description of a specified set of apps.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1286,8 +1388,10 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Describes the results of specified commands.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1326,8 +1430,10 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Requests a description of a specified set of deployments.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1365,8 +1471,10 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Describes `Elastic IP addresses`_.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1404,8 +1512,10 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Describes a stack's Elastic Load Balancing instances.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1434,8 +1544,10 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Requests a description of a set of instances.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1474,8 +1586,10 @@ class OpsWorksConnection(AWSQueryConnection):
Requests a description of one or more layers in a specified
stack.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1504,8 +1618,10 @@ class OpsWorksConnection(AWSQueryConnection):
Describes load-based auto scaling configurations for specified
layers.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1539,8 +1655,6 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Describes the permissions for a specified stack.
- You must specify at least one of the parameters.
-
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
@@ -1563,12 +1677,15 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DescribePermissions',
body=json.dumps(params))
- def describe_raid_arrays(self, instance_id=None, raid_array_ids=None):
+ def describe_raid_arrays(self, instance_id=None, stack_id=None,
+ raid_array_ids=None):
"""
Describe an instance's RAID arrays.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1580,6 +1697,9 @@ class OpsWorksConnection(AWSQueryConnection):
`DescribeRaidArrays` returns descriptions of the RAID arrays
associated with the specified instance.
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
:type raid_array_ids: list
:param raid_array_ids: An array of RAID array IDs. If you use this
parameter, `DescribeRaidArrays` returns descriptions of the
@@ -1590,6 +1710,8 @@ class OpsWorksConnection(AWSQueryConnection):
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
+ if stack_id is not None:
+ params['StackId'] = stack_id
if raid_array_ids is not None:
params['RaidArrayIds'] = raid_array_ids
return self.make_request(action='DescribeRaidArrays',
@@ -1599,6 +1721,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Describes Amazon RDS instances.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the
+ stack, or an attached policy that explicitly grants
+ permissions. For more information on user permissions, see
+ `Managing User Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID that the instances are registered with.
The operation returns descriptions of all registered Amazon RDS
@@ -1653,6 +1781,24 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DescribeServiceErrors',
body=json.dumps(params))
+ def describe_stack_provisioning_parameters(self, stack_id):
+ """
+ Requests a description of a stack's provisioning parameters.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Show, Deploy, or Manage permissions level for the stack
+ or an attached policy that explicitly grants permissions. For
+ more information on user permissions, see `Managing User
+ Permissions`_.
+
+ :type stack_id: string
+ :param stack_id: The stack ID
+
+ """
+ params = {'StackId': stack_id, }
+ return self.make_request(action='DescribeStackProvisioningParameters',
+ body=json.dumps(params))
+
def describe_stack_summary(self, stack_id):
"""
Describes the number of layers and apps in a specified stack,
@@ -1700,6 +1846,10 @@ class OpsWorksConnection(AWSQueryConnection):
Describes time-based auto scaling configurations for specified
instances.
+
+ You must specify at least one of the parameters.
+
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1739,8 +1889,10 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Describes an instance's Amazon EBS volumes.
+
You must specify at least one of the parameters.
+
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
@@ -1890,11 +2042,81 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='RegisterElasticIp',
body=json.dumps(params))
+ def register_instance(self, stack_id, hostname=None, public_ip=None,
+ private_ip=None, rsa_public_key=None,
+ rsa_public_key_fingerprint=None,
+ instance_identity=None):
+ """
+ Registers instances with a specified stack that were created
+ outside of AWS OpsWorks.
+
+ We do not recommend using this action to register instances.
+ The complete registration operation has two primary steps,
+ installing the AWS OpsWorks agent on the instance and
+ registering the instance with the stack. `RegisterInstance`
+ handles only the second step. You should instead use the AWS
+ CLI `register` command, which performs the entire registration
+ operation.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
+ :type stack_id: string
+ :param stack_id: The ID of the stack that the instance is to be
+ registered with.
+
+ :type hostname: string
+ :param hostname: The instance's hostname.
+
+ :type public_ip: string
+ :param public_ip: The instance's public IP address.
+
+ :type private_ip: string
+ :param private_ip: The instance's private IP address.
+
+ :type rsa_public_key: string
+ :param rsa_public_key: The instances public RSA key. This key is used
+ to encrypt communication between the instance and the service.
+
+ :type rsa_public_key_fingerprint: string
+ :param rsa_public_key_fingerprint: The instances public RSA key
+ fingerprint.
+
+ :type instance_identity: dict
+ :param instance_identity: An InstanceIdentity object that contains the
+ instance's identity.
+
+ """
+ params = {'StackId': stack_id, }
+ if hostname is not None:
+ params['Hostname'] = hostname
+ if public_ip is not None:
+ params['PublicIp'] = public_ip
+ if private_ip is not None:
+ params['PrivateIp'] = private_ip
+ if rsa_public_key is not None:
+ params['RsaPublicKey'] = rsa_public_key
+ if rsa_public_key_fingerprint is not None:
+ params['RsaPublicKeyFingerprint'] = rsa_public_key_fingerprint
+ if instance_identity is not None:
+ params['InstanceIdentity'] = instance_identity
+ return self.make_request(action='RegisterInstance',
+ body=json.dumps(params))
+
def register_rds_db_instance(self, stack_id, rds_db_instance_arn,
db_user, db_password):
"""
Registers an Amazon RDS instance with a stack.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type stack_id: string
:param stack_id: The stack ID.
@@ -1951,6 +2173,14 @@ class OpsWorksConnection(AWSQueryConnection):
specified layer. For more information, see `Managing Load with
Time-based and Load-based Instances`_.
+
+ To use load-based auto scaling, you must create a set of load-
+ based auto scaling instances. Load-based auto scaling operates
+ only on the instances from that set, so you must ensure that
+ you have created enough instances to handle the maximum
+ anticipated load.
+
+
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
@@ -2141,6 +2371,28 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='StopStack',
body=json.dumps(params))
+ def unassign_instance(self, instance_id):
+ """
+ Unassigns a registered instance from all of it's layers. The
+ instance remains in the stack as an unassigned instance and
+ can be assigned to another layer, as needed. You cannot use
+ this action with instances that were created with AWS
+ OpsWorks.
+
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ """
+ params = {'InstanceId': instance_id, }
+ return self.make_request(action='UnassignInstance',
+ body=json.dumps(params))
+
def unassign_volume(self, volume_id):
"""
Unassigns an assigned Amazon EBS volume. The volume remains
@@ -2164,7 +2416,7 @@ class OpsWorksConnection(AWSQueryConnection):
def update_app(self, app_id, name=None, description=None,
data_sources=None, type=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
- attributes=None):
+ attributes=None, environment=None):
"""
Updates a specified app.
@@ -2207,6 +2459,17 @@ class OpsWorksConnection(AWSQueryConnection):
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
+ :type environment: list
+ :param environment:
+ An array of `EnvironmentVariable` objects that specify environment
+ variables to be associated with the app. You can specify up to ten
+ environment variables. After you deploy the app, these variables
+ are defined on the associated app server instances.
+
+ This parameter is supported only by Chef 11.10 stacks. If you have
+ specified one or more environment variables, you cannot modify the
+ stack's Chef version.
+
"""
params = {'AppId': app_id, }
if name is not None:
@@ -2227,6 +2490,8 @@ class OpsWorksConnection(AWSQueryConnection):
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
+ if environment is not None:
+ params['Environment'] = environment
return self.make_request(action='UpdateApp',
body=json.dumps(params))
@@ -2282,41 +2547,37 @@ class OpsWorksConnection(AWSQueryConnection):
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
- :param auto_scaling_type:
- The instance's auto scaling type, which has three possible values:
-
-
- + **AlwaysRunning**: A 24/7 instance, which is not affected by auto
- scaling.
- + **TimeBasedAutoScaling**: A time-based auto scaling instance, which
- is started and stopped based on a specified schedule.
- + **LoadBasedAutoScaling**: A load-based auto scaling instance, which
- is started and stopped based on load metrics.
+ :param auto_scaling_type: For load-based or time-based instances, the
+ type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
- :param os: The instance operating system, which must be set to one of
+ :param os: The instance's operating system, which must be set to one of
the following.
- + Standard operating systems: `Amazon Linux` or `Ubuntu 12.04 LTS`
+ + Standard operating systems: An Amazon Linux version such as `Amazon
+ Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
- The default option is `Amazon Linux`. If you set this parameter to
- `Custom`, you must use the CreateInstance action's AmiId parameter
- to specify the custom AMI that you want to use. For more
- information on the standard operating systems, see `Operating
- Systems`_For more information on how to use custom AMIs with
- OpsWorks, see `Using Custom AMIs`_.
+ The default option is the current Amazon Linux version, such as `Amazon
+ Linux 2014.09`. If you set this parameter to `Custom`, you must use
+ the CreateInstance action's AmiId parameter to specify the custom
+ AMI that you want to use. For more information on the standard
+ operating systems, see `Operating Systems`_For more information on
+ how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
- :param ami_id: A custom AMI ID to be used to create the instance. The
- AMI should be based on one of the standard AWS OpsWorks APIs:
- Amazon Linux or Ubuntu 12.04 LTS. For more information, see
+ :param ami_id:
+ A custom AMI ID to be used to create the instance. The AMI should be
+ based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
+ Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_
+ If you specify a custom AMI, you must set `Os` to `Custom`.
+
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
@@ -2327,13 +2588,17 @@ class OpsWorksConnection(AWSQueryConnection):
see `Instance Families and Types`_.
:type install_updates_on_boot: boolean
- :param install_updates_on_boot: Whether to install operating system and
- package updates when the instance boots. The default value is
- `True`. To control when updates are installed, set this value to
- `False`. You must then update your instances manually by using
- CreateDeployment to run the `update_dependencies` stack command or
- manually running `yum` (Amazon Linux) or `apt-get` (Ubuntu) on the
- instances.
+ :param install_updates_on_boot:
+ Whether to install operating system and package updates when the
+ instance boots. The default value is `True`. To control when
+ updates are installed, set this value to `False`. You must then
+ update your instances manually by using CreateDeployment to run the
+ `update_dependencies` stack command or manually running `yum`
+ (Amazon Linux) or `apt-get` (Ubuntu) on the instances.
+
+
+ We strongly recommend using the default value of `True`, to ensure that
+ your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether this is an Amazon EBS-optimized instance.
@@ -2370,7 +2635,8 @@ class OpsWorksConnection(AWSQueryConnection):
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
- use_ebs_optimized_instances=None):
+ use_ebs_optimized_instances=None,
+ lifecycle_event_configuration=None):
"""
Updates a specified layer.
@@ -2433,18 +2699,25 @@ class OpsWorksConnection(AWSQueryConnection):
layer's custom recipes.
:type install_updates_on_boot: boolean
- :param install_updates_on_boot: Whether to install operating system and
- package updates when the instance boots. The default value is
- `True`. To control when updates are installed, set this value to
- `False`. You must then update your instances manually by using
- CreateDeployment to run the `update_dependencies` stack command or
- manually running `yum` (Amazon Linux) or `apt-get` (Ubuntu) on the
- instances.
+ :param install_updates_on_boot:
+ Whether to install operating system and package updates when the
+ instance boots. The default value is `True`. To control when
+ updates are installed, set this value to `False`. You must then
+ update your instances manually by using CreateDeployment to run the
+ `update_dependencies` stack command or manually running `yum`
+ (Amazon Linux) or `apt-get` (Ubuntu) on the instances.
+
+
+ We strongly recommend using the default value of `True`, to ensure that
+ your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
+ :type lifecycle_event_configuration: dict
+ :param lifecycle_event_configuration:
+
"""
params = {'LayerId': layer_id, }
if name is not None:
@@ -2473,6 +2746,8 @@ class OpsWorksConnection(AWSQueryConnection):
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
+ if lifecycle_event_configuration is not None:
+ params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='UpdateLayer',
body=json.dumps(params))
@@ -2500,6 +2775,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates an Amazon RDS instance.
+ **Required Permissions**: To use this action, an IAM user must
+ have a Manage permissions level for the stack, or an attached
+ policy that explicitly grants permissions. For more
+ information on user permissions, see `Managing User
+ Permissions`_.
+
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
@@ -2548,11 +2829,18 @@ class OpsWorksConnection(AWSQueryConnection):
to the stack attributes.
:type service_role_arn: string
- :param service_role_arn: The stack AWS Identity and Access Management
- (IAM) role, which allows AWS OpsWorks to work with AWS resources on
- your behalf. You must set this parameter to the Amazon Resource
- Name (ARN) for an existing IAM role. For more information about IAM
- ARNs, see `Using Identifiers`_.
+ :param service_role_arn:
+ The stack AWS Identity and Access Management (IAM) role, which allows
+ AWS OpsWorks to work with AWS resources on your behalf. You must
+ set this parameter to the Amazon Resource Name (ARN) for an
+ existing IAM role. For more information about IAM ARNs, see `Using
+ Identifiers`_.
+
+
+ You must set this parameter to a valid service role ARN or the action
+ will fail; there is no default value. You can specify the stack's
+ current service role ARN, if you prefer, but you must do so
+ explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
@@ -2560,9 +2848,16 @@ class OpsWorksConnection(AWSQueryConnection):
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
- :param default_os: The stack's default operating system, which must be
- set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is
- `Amazon Linux`.
+ :param default_os: The stack's operating system, which must be set to
+ one of the following.
+
+ + Standard operating systems: an Amazon Linux version such as `Amazon
+ Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ + Custom AMIs: `Custom`. You specify the custom AMI you want to use
+ when you create instances.
+
+
+ The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's new host name theme, with spaces are
@@ -2595,11 +2890,13 @@ class OpsWorksConnection(AWSQueryConnection):
information, see CreateStack.
:type default_subnet_id: string
- :param default_subnet_id: The stack's default subnet ID. All instances
- will be launched into this subnet unless you specify otherwise when
- you create the instance. If you also specify a value for
- `DefaultAvailabilityZone`, the subnet must be in that zone. For
- more information, see CreateStack.
+ :param default_subnet_id: The stack's default VPC subnet ID. This
+ parameter is required if you specify a value for the `VpcId`
+ parameter. All instances are launched into this subnet unless you
+ specify otherwise when you create the instance. If you also specify
+ a value for `DefaultAvailabilityZone`, the subnet must be in that
+ zone. For information on default values and when this parameter is
+ required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
@@ -2794,3 +3091,4 @@ class OpsWorksConnection(AWSQueryConnection):
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
+
diff --git a/boto/sqs/message.py b/boto/sqs/message.py
index 04d7cec7..656734fa 100644
--- a/boto/sqs/message.py
+++ b/boto/sqs/message.py
@@ -68,6 +68,7 @@ import base64
import boto
from boto.compat import StringIO
+from boto.compat import six
from boto.sqs.attributes import Attributes
from boto.sqs.messageattributes import MessageAttributes
from boto.exception import SQSDecodeError
@@ -163,7 +164,9 @@ class Message(RawMessage):
"""
def encode(self, value):
- return base64.b64encode(value.encode('utf-8')).decode('utf-8')
+ if not isinstance(value, six.binary_type):
+ value = value.encode('utf-8')
+ return base64.b64encode(value).decode('utf-8')
def decode(self, value):
try:
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 76b7e93f..6b9a7f52 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -36,6 +36,8 @@ Currently Supported Services
* :doc:`Elastic MapReduce (EMR) <emr_tut>` -- (:doc:`API Reference <ref/emr>`) (Python 3)
* :doc:`Auto Scaling <autoscale_tut>` -- (:doc:`API Reference <ref/autoscale>`) (Python 3)
* Kinesis -- (:doc:`API Reference <ref/kinesis>`) (Python 3)
+ * Lambda -- (:doc:`API Reference <ref/awslambda>`) (Python 3)
+ * EC2 Container Service (ECS) -- (:doc:`API Reference <ref/ec2containerservice>`) (Python 3)
* **Content Delivery**
@@ -58,11 +60,15 @@ Currently Supported Services
* Data Pipeline -- (:doc:`API Reference <ref/datapipeline>`) (Python 3)
* Opsworks -- (:doc:`API Reference <ref/opsworks>`) (Python 3)
* CloudTrail -- (:doc:`API Reference <ref/cloudtrail>`) (Python 3)
+ * CodeDeploy -- (:doc:`API Reference <ref/codedeploy>`) (Python 3)
-* **Identity & Access**
+* **Administration & Security**
* Identity and Access Management (IAM) -- (:doc:`API Reference <ref/iam>`) (Python 3)
* Security Token Service (STS) -- (:doc:`API Reference <ref/sts>`) (Python 3)
+ * Key Management Service (KMS) -- (:doc:`API Reference <ref/kms>`) (Python 3)
+ * Config -- (:doc:`API Reference <ref/configservice>`) (Python 3)
+ * CloudHSM -- (:doc:`API Reference <ref/cloudhsm>`) (Python 3)
* **Application Services**
@@ -136,6 +142,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.36.0
releasenotes/v2.35.2
releasenotes/v2.35.1
releasenotes/v2.35.0
diff --git a/docs/source/ref/awslamba.rst b/docs/source/ref/awslamba.rst
new file mode 100644
index 00000000..68db5467
--- /dev/null
+++ b/docs/source/ref/awslamba.rst
@@ -0,0 +1,26 @@
+.. ref-awslambda
+
+==========
+AWS Lambda
+==========
+
+boto.awslambda
+--------------
+
+.. automodule:: boto.awslambda
+ :members:
+ :undoc-members:
+
+boto.awslambda.layer1
+---------------------
+
+.. automodule:: boto.awslambda.layer1
+ :members:
+ :undoc-members:
+
+boto.awslambda.exceptions
+-------------------------
+
+.. automodule:: boto.awslambda.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/cloudhsm.rst b/docs/source/ref/cloudhsm.rst
new file mode 100644
index 00000000..591c5594
--- /dev/null
+++ b/docs/source/ref/cloudhsm.rst
@@ -0,0 +1,26 @@
+.. ref-cloudhsm
+
+========
+CloudHSM
+========
+
+boto.cloudhsm
+-------------
+
+.. automodule:: boto.cloudhsm
+ :members:
+ :undoc-members:
+
+boto.cloudhsm.layer1
+--------------------
+
+.. automodule:: boto.cloudhsm.layer1
+ :members:
+ :undoc-members:
+
+boto.cloudhsm.exceptions
+------------------------
+
+.. automodule:: boto.cloudhsm.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/codedeploy.rst b/docs/source/ref/codedeploy.rst
new file mode 100644
index 00000000..3e3998a4
--- /dev/null
+++ b/docs/source/ref/codedeploy.rst
@@ -0,0 +1,26 @@
+.. ref-codedeploy
+
+==========
+CodeDeploy
+==========
+
+boto.codedeploy
+---------------
+
+.. automodule:: boto.codedeploy
+ :members:
+ :undoc-members:
+
+boto.codedeploy.layer1
+-------------------
+
+.. automodule:: boto.codedeploy.layer1
+ :members:
+ :undoc-members:
+
+boto.codedeploy.exceptions
+-----------------------
+
+.. automodule:: boto.codedeploy.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/configservice.rst b/docs/source/ref/configservice.rst
new file mode 100644
index 00000000..d5e43af9
--- /dev/null
+++ b/docs/source/ref/configservice.rst
@@ -0,0 +1,26 @@
+.. ref-configservice
+
+======
+Config
+======
+
+boto.configservice
+------------------
+
+.. automodule:: boto.configservice
+ :members:
+ :undoc-members:
+
+boto.configservice.layer1
+-------------------------
+
+.. automodule:: boto.configservice.layer1
+ :members:
+ :undoc-members:
+
+boto.configservice.exceptions
+-----------------------------
+
+.. automodule:: boto.configservice.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/ec2containerservice.rst b/docs/source/ref/ec2containerservice.rst
new file mode 100644
index 00000000..96ca539a
--- /dev/null
+++ b/docs/source/ref/ec2containerservice.rst
@@ -0,0 +1,26 @@
+.. ref-ec2containerservice
+
+=====================
+EC2 Container Service
+=====================
+
+boto.ec2containerservice
+------------------------
+
+.. automodule:: boto.ec2containerservice
+ :members:
+ :undoc-members:
+
+boto.ec2containerservice.layer1
+-------------------------------
+
+.. automodule:: boto.ec2containerservice.layer1
+ :members:
+ :undoc-members:
+
+boto.ec2containerservice.exceptions
+-----------------------------------
+
+.. automodule:: boto.ec2containerservice.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/kms.rst b/docs/source/ref/kms.rst
new file mode 100644
index 00000000..85f9e210
--- /dev/null
+++ b/docs/source/ref/kms.rst
@@ -0,0 +1,26 @@
+.. ref-kms
+
+===
+KMS
+===
+
+boto.kms
+--------
+
+.. automodule:: boto.kms
+ :members:
+ :undoc-members:
+
+boto.kms.layer1
+---------------
+
+.. automodule:: boto.kms.layer1
+ :members:
+ :undoc-members:
+
+boto.kms.exceptions
+-----------------------
+
+.. automodule:: boto.kms.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/releasenotes/v2.36.0.rst b/docs/source/releasenotes/v2.36.0.rst
new file mode 100644
index 00000000..7f4b6082
--- /dev/null
+++ b/docs/source/releasenotes/v2.36.0.rst
@@ -0,0 +1,27 @@
+boto v2.36.0
+============
+
+:date: 2015/01/27
+
+This release adds support for AWS Key Management Service (KMS), AWS Lambda,
+AWS CodeDeploy, AWS Config, AWS CloudHSM, Amazon EC2 Container Service (ECS),
+Amazon DynamoDB online indexing, and fixes a few issues.
+
+
+Changes
+-------
+* Add Amazon DynamoDB online indexing support.
+* Allow for binary to be passed to sqs message (:issue:`2913`, :sha:`8af9b42`)
+* Kinesis update (:issue:`2891`, :sha:`4874e19`)
+* Fixed spelling of boto.awslambda package. (:issue:`2914`, :sha:`de769ac`)
+* Add support for Amazon EC2 Container Service (:issue:`2908`, :sha:`4480fb4`)
+* Add support for CloudHSM (:issue:`2905`, :sha:`6055a35`)
+* Add support for AWS Config (:issue:`2904`, :sha:`51e9221`)
+* Add support for AWS CodeDeploy (:issue:`2899`, :sha:`d935356`)
+* Add support for AWS Lambda (:issue:`2896`, :sha:`6748016`)
+* Update both Cognito's to the latest APIs (:issue:`2909`, :sha:`18c1251`)
+* Add sts for eu-central-1. (:issue:`2906`, :sha:`54714ff`)
+* Update opsworks to latest API (:issue:`2892`, :sha:`aed3302`)
+* Add AWS Key Managment Support (:issue:`2894`, :sha:`ef7d2cd`)
+
+
diff --git a/setup.py b/setup.py
index 89c1bc65..de97d274 100644
--- a/setup.py
+++ b/setup.py
@@ -79,7 +79,9 @@ setup(name = "boto",
"boto.cloudsearch2", "boto.logs", "boto.vendored",
"boto.route53.domains", "boto.cognito",
"boto.cognito.identity", "boto.cognito.sync",
- "boto.cloudsearchdomain"],
+ "boto.cloudsearchdomain", "boto.kms",
+ "boto.awslambda", "boto.codedeploy", "boto.configservice",
+ "boto.cloudhsm", "boto.ec2containerservice"],
package_data = {
"boto.cacerts": ["cacerts.txt"],
"boto": ["endpoints.json"],
diff --git a/tests/integration/awslambda/__init__.py b/tests/integration/awslambda/__init__.py
new file mode 100644
index 00000000..a3575e2e
--- /dev/null
+++ b/tests/integration/awslambda/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/tests/integration/awslambda/test_awslambda.py b/tests/integration/awslambda/test_awslambda.py
new file mode 100644
index 00000000..8945922a
--- /dev/null
+++ b/tests/integration/awslambda/test_awslambda.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.awslambda.exceptions import ResourceNotFoundException
+from tests.compat import unittest
+
+
+class TestAWSLambda(unittest.TestCase):
+ def setUp(self):
+ self.awslambda = boto.connect_awslambda()
+
+ def test_list_functions(self):
+ response = self.awslambda.list_functions()
+ self.assertIn('Functions', response)
+
+ def test_resource_not_found_exceptions(self):
+ with self.assertRaises(ResourceNotFoundException):
+ self.awslambda.get_function(function_name='non-existant-function')
diff --git a/tests/integration/cloudhsm/__init__.py b/tests/integration/cloudhsm/__init__.py
new file mode 100644
index 00000000..a3575e2e
--- /dev/null
+++ b/tests/integration/cloudhsm/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/tests/integration/cloudhsm/test_cloudhsm.py b/tests/integration/cloudhsm/test_cloudhsm.py
new file mode 100644
index 00000000..0965d0d3
--- /dev/null
+++ b/tests/integration/cloudhsm/test_cloudhsm.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from tests.compat import unittest
+from boto.cloudhsm.exceptions import InvalidRequestException
+
+
+class TestCloudHSM(unittest.TestCase):
+ def setUp(self):
+ self.cloudhsm = boto.connect_cloudhsm()
+
+ def test_hapgs(self):
+ label = 'my-hapg'
+ response = self.cloudhsm.create_hapg(label=label)
+ hapg_arn = response['HapgArn']
+ self.addCleanup(self.cloudhsm.delete_hapg, hapg_arn)
+
+ response = self.cloudhsm.list_hapgs()
+ self.assertIn(hapg_arn, response['HapgList'])
+
+ def test_validation_exception(self):
+ invalid_arn = 'arn:aws:cloudhsm:us-east-1:123456789012:hapg-55214b8d'
+ with self.assertRaises(InvalidRequestException):
+ self.cloudhsm.describe_hapg(invalid_arn)
diff --git a/tests/integration/codedeploy/__init__.py b/tests/integration/codedeploy/__init__.py
new file mode 100644
index 00000000..a3575e2e
--- /dev/null
+++ b/tests/integration/codedeploy/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/tests/integration/codedeploy/test_codedeploy.py b/tests/integration/codedeploy/test_codedeploy.py
new file mode 100644
index 00000000..469b4faf
--- /dev/null
+++ b/tests/integration/codedeploy/test_codedeploy.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.codedeploy.exceptions import ApplicationDoesNotExistException
+from tests.compat import unittest
+
+
+class TestCodeDeploy(unittest.TestCase):
+ def setUp(self):
+ self.codedeploy = boto.connect_codedeploy()
+
+ def test_applications(self):
+ application_name = 'my-boto-application'
+ self.codedeploy.create_application(application_name=application_name)
+ self.addCleanup(self.codedeploy.delete_application, application_name)
+ response = self.codedeploy.list_applications()
+ self.assertIn(application_name, response['applications'])
+
+ def test_exception(self):
+ with self.assertRaises(ApplicationDoesNotExistException):
+ self.codedeploy.get_application('some-non-existant-app')
diff --git a/tests/integration/configservice/__init__.py b/tests/integration/configservice/__init__.py
new file mode 100644
index 00000000..a3575e2e
--- /dev/null
+++ b/tests/integration/configservice/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/tests/integration/configservice/test_configservice.py b/tests/integration/configservice/test_configservice.py
new file mode 100644
index 00000000..07f6472e
--- /dev/null
+++ b/tests/integration/configservice/test_configservice.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.configservice.exceptions import NoSuchConfigurationRecorderException
+from tests.compat import unittest
+
+
+class TestConfigService(unittest.TestCase):
+ def setUp(self):
+ self.configservice = boto.connect_configservice()
+
+ def test_describe_configuration_recorders(self):
+ response = self.configservice.describe_configuration_recorders()
+ self.assertIn('ConfigurationRecorders', response)
+
+ def test_handle_(self):
+ with self.assertRaises(NoSuchConfigurationRecorderException):
+ self.configservice.describe_configuration_recorders(
+ configuration_recorder_names=['non-existant-recorder'])
diff --git a/tests/integration/ec2containerservice/__init__.py b/tests/integration/ec2containerservice/__init__.py
new file mode 100644
index 00000000..8bfcf6af
--- /dev/null
+++ b/tests/integration/ec2containerservice/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/tests/integration/ec2containerservice/test_ec2containerservice.py b/tests/integration/ec2containerservice/test_ec2containerservice.py
new file mode 100644
index 00000000..32a677b8
--- /dev/null
+++ b/tests/integration/ec2containerservice/test_ec2containerservice.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.ec2containerservice.exceptions import ClientException
+from tests.compat import unittest
+
+
+class TestEC2ContainerService(unittest.TestCase):
+ def setUp(self):
+ self.ecs = boto.connect_ec2containerservice()
+
+ def test_list_clusters(self):
+ response = self.ecs.list_clusters()
+ self.assertIn('clusterArns',
+ response['ListClustersResponse']['ListClustersResult'])
+
+ def test_handle_not_found_exception(self):
+ with self.assertRaises(ClientException):
+ # Try to stop a task with an invalid arn.
+ self.ecs.stop_task(task='foo')
diff --git a/tests/integration/kinesis/test_kinesis.py b/tests/integration/kinesis/test_kinesis.py
index ed16d78a..3c61feeb 100644
--- a/tests/integration/kinesis/test_kinesis.py
+++ b/tests/integration/kinesis/test_kinesis.py
@@ -55,31 +55,57 @@ class TestKinesis(unittest.TestCase):
else:
raise TimeoutError('Stream is still not active, aborting...')
+ # Make a tag.
+ kinesis.add_tags_to_stream(stream_name='test', tags={'foo': 'bar'})
+
+ # Check that the correct tag is there.
+ response = kinesis.list_tags_for_stream(stream_name='test')
+ self.assertEqual(len(response['Tags']), 1)
+ self.assertEqual(response['Tags'][0],
+ {'Key':'foo', 'Value': 'bar'})
+
+ # Remove the tag and ensure it is removed.
+ kinesis.remove_tags_from_stream(stream_name='test', tag_keys=['foo'])
+ response = kinesis.list_tags_for_stream(stream_name='test')
+ self.assertEqual(len(response['Tags']), 0)
+
# Get ready to process some data from the stream
response = kinesis.get_shard_iterator('test', shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Write some data to the stream
data = 'Some data ...'
+ record = {
+ 'Data': data,
+ 'PartitionKey': data,
+ }
response = kinesis.put_record('test', data, data)
+ response = kinesis.put_records([record, record.copy()], 'test')
# Wait for the data to show up
tries = 0
+ num_collected = 0
+ num_expected_records = 3
+ collected_records = []
while tries < 100:
tries += 1
time.sleep(1)
response = kinesis.get_records(shard_iterator)
shard_iterator = response['NextShardIterator']
-
- if len(response['Records']):
+ for record in response['Records']:
+ if 'Data' in record:
+ collected_records.append(record['Data'])
+ num_collected += 1
+ if num_collected >= num_expected_records:
+ self.assertEqual(num_expected_records, num_collected)
break
else:
raise TimeoutError('No records found, aborting...')
# Read the data, which should be the same as what we wrote
- self.assertEqual(1, len(response['Records']))
- self.assertEqual(data, response['Records'][0]['Data'])
+ for record in collected_records:
+ self.assertEqual(data, record)
def test_describe_non_existent_stream(self):
with self.assertRaises(ResourceNotFoundException) as cm:
diff --git a/tests/integration/kms/test_kms.py b/tests/integration/kms/test_kms.py
new file mode 100644
index 00000000..10238a03
--- /dev/null
+++ b/tests/integration/kms/test_kms.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.kms.exceptions import NotFoundException
+from tests.compat import unittest
+
+
+class TestKMS(unittest.TestCase):
+ def setUp(self):
+ self.kms = boto.connect_kms()
+
+ def test_list_keys(self):
+ response = self.kms.list_keys()
+ self.assertIn('Keys', response)
+
+ def test_handle_not_found_exception(self):
+ with self.assertRaises(NotFoundException):
+ # Describe some key that does not exists
+ self.kms.describe_key(
+ key_id='nonexistant_key',
+ )
diff --git a/tests/unit/awslambda/__init__.py b/tests/unit/awslambda/__init__.py
new file mode 100644
index 00000000..a3575e2e
--- /dev/null
+++ b/tests/unit/awslambda/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/tests/unit/awslambda/test_awslambda.py b/tests/unit/awslambda/test_awslambda.py
new file mode 100644
index 00000000..3e36aee5
--- /dev/null
+++ b/tests/unit/awslambda/test_awslambda.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import tempfile
+import shutil
+import os
+import socket
+
+from boto.compat import json
+from boto.awslambda.layer1 import AWSLambdaConnection
+from tests.unit import AWSMockServiceTestCase
+from tests.compat import mock
+
+
+class TestAWSLambda(AWSMockServiceTestCase):
+ connection_class = AWSLambdaConnection
+
+ def default_body(self):
+ return b'{}'
+
+ def test_upload_function_binary(self):
+ self.set_http_response(status_code=201)
+ function_data = b'This is my file'
+ self.service_connection.upload_function(
+ function_name='my-function',
+ function_zip=function_data,
+ role='myrole',
+ handler='myhandler',
+ mode='event',
+ runtime='nodejs'
+ )
+ self.assertEqual(self.actual_request.body, function_data)
+ self.assertEqual(
+ self.actual_request.headers['Content-Length'],
+ str(len(function_data))
+ )
+ self.assertEqual(
+ self.actual_request.path,
+ '/2014-11-13/functions/my-function?Handler=myhandler&Mode'
+ '=event&Role=myrole&Runtime=nodejs'
+ )
+
+ def test_upload_function_file(self):
+ self.set_http_response(status_code=201)
+ rootdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, rootdir)
+
+ filename = 'test_file'
+ function_data = b'This is my file'
+ full_path = os.path.join(rootdir, filename)
+
+ with open(full_path, 'wb') as f:
+ f.write(function_data)
+
+ with open(full_path, 'rb') as f:
+ self.service_connection.upload_function(
+ function_name='my-function',
+ function_zip=f,
+ role='myrole',
+ handler='myhandler',
+ mode='event',
+ runtime='nodejs'
+ )
+ self.assertEqual(self.actual_request.body.read(),
+ function_data)
+ self.assertEqual(
+ self.actual_request.headers['Content-Length'],
+ str(len(function_data))
+ )
+ self.assertEqual(
+ self.actual_request.path,
+ '/2014-11-13/functions/my-function?Handler=myhandler&Mode'
+ '=event&Role=myrole&Runtime=nodejs'
+ )
+
+ def test_upload_function_unseekable_file_no_tell(self):
+ sock = socket.socket()
+ with self.assertRaises(TypeError):
+ self.service_connection.upload_function(
+ function_name='my-function',
+ function_zip=sock,
+ role='myrole',
+ handler='myhandler',
+ mode='event',
+ runtime='nodejs'
+ )
+
+ def test_upload_function_unseekable_file_cannot_tell(self):
+ mock_file = mock.Mock()
+ mock_file.tell.side_effect = IOError
+ with self.assertRaises(TypeError):
+ self.service_connection.upload_function(
+ function_name='my-function',
+ function_zip=mock_file,
+ role='myrole',
+ handler='myhandler',
+ mode='event',
+ runtime='nodejs'
+ )
diff --git a/tests/unit/kinesis/__init__.py b/tests/unit/kinesis/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/kinesis/__init__.py
diff --git a/tests/unit/kinesis/test_kinesis.py b/tests/unit/kinesis/test_kinesis.py
new file mode 100644
index 00000000..6ad8adf9
--- /dev/null
+++ b/tests/unit/kinesis/test_kinesis.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.compat import json
+from boto.kinesis.layer1 import KinesisConnection
+from tests.unit import AWSMockServiceTestCase
+
+
+class TestKinesis(AWSMockServiceTestCase):
+ connection_class = KinesisConnection
+
+ def default_body(self):
+ return b'{}'
+
+ def test_put_record_binary(self):
+ self.set_http_response(status_code=200)
+ self.service_connection.put_record('stream-name',
+ b'\x00\x01\x02\x03\x04\x05', 'partition-key')
+
+ body = json.loads(self.actual_request.body)
+ self.assertEqual(body['Data'], 'AAECAwQF')
+
+ target = self.actual_request.headers['X-Amz-Target']
+ self.assertTrue('PutRecord' in target)
+
+ def test_put_record_string(self):
+ self.set_http_response(status_code=200)
+ self.service_connection.put_record('stream-name',
+ 'data', 'partition-key')
+
+ body = json.loads(self.actual_request.body)
+ self.assertEqual(body['Data'], 'ZGF0YQ==')
+
+ target = self.actual_request.headers['X-Amz-Target']
+ self.assertTrue('PutRecord' in target)
+
+ def test_put_records(self):
+ self.set_http_response(status_code=200)
+ record_binary = {
+ 'Data': b'\x00\x01\x02\x03\x04\x05',
+ 'PartitionKey': 'partition-key'
+ }
+ record_str = {
+ 'Data': 'data',
+ 'PartitionKey': 'partition-key'
+ }
+ self.service_connection.put_records(stream_name='stream-name',
+ records=[record_binary, record_str])
+
+ body = json.loads(self.actual_request.body)
+ self.assertEqual(body['Records'][0]['Data'], 'AAECAwQF')
+ self.assertEqual(body['Records'][1]['Data'], 'ZGF0YQ==')
+
+ target = self.actual_request.headers['X-Amz-Target']
+ self.assertTrue('PutRecord' in target)
diff --git a/tests/unit/kms/__init__.py b/tests/unit/kms/__init__.py
new file mode 100644
index 00000000..70cc23fe
--- /dev/null
+++ b/tests/unit/kms/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
diff --git a/tests/unit/kms/test_kms.py b/tests/unit/kms/test_kms.py
new file mode 100644
index 00000000..c46e831a
--- /dev/null
+++ b/tests/unit/kms/test_kms.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.compat import json
+from boto.kms.layer1 import KMSConnection
+from tests.unit import AWSMockServiceTestCase
+
+
+class TestKinesis(AWSMockServiceTestCase):
+ connection_class = KMSConnection
+
+ def default_body(self):
+ return b'{}'
+
+ def test_binary_input(self):
+ """
+ This test ensures that binary is base64 encoded when it is sent to
+ the service.
+ """
+ self.set_http_response(status_code=200)
+ data = b'\x00\x01\x02\x03\x04\x05'
+ self.service_connection.encrypt(key_id='foo', plaintext=data)
+ body = json.loads(self.actual_request.body)
+ self.assertEqual(body['Plaintext'], 'AAECAwQF')
+
+ def test_non_binary_input_for_blobs_fails(self):
+ """
+ This test ensures that only binary is used for blob type parameters.
+ """
+ self.set_http_response(status_code=200)
+ data = u'\u00e9'
+ with self.assertRaises(TypeError):
+ self.service_connection.encrypt(key_id='foo', plaintext=data)
+
+ def test_binary_ouput(self):
+ """
+ This test ensures that the output is base64 decoded before
+ it is returned to the user.
+ """
+ content = {'Plaintext': 'AAECAwQF'}
+ self.set_http_response(status_code=200,
+ body=json.dumps(content).encode('utf-8'))
+ response = self.service_connection.decrypt(b'some arbitrary value')
+ self.assertEqual(response['Plaintext'], b'\x00\x01\x02\x03\x04\x05')
diff --git a/tests/unit/sqs/test_message.py b/tests/unit/sqs/test_message.py
index cc63cfa1..b026a2af 100644
--- a/tests/unit/sqs/test_message.py
+++ b/tests/unit/sqs/test_message.py
@@ -23,6 +23,7 @@ from tests.unit import unittest
from boto.sqs.message import MHMessage
from boto.sqs.message import RawMessage
+from boto.sqs.message import Message
from boto.sqs.bigmessage import BigMessage
from boto.exception import SQSDecodeError
@@ -69,6 +70,20 @@ class TestEncodeMessage(unittest.TestCase):
self.assertEquals(message.id, sample_value)
self.assertEquals(message.receipt_handle, sample_value)
+ @attr(sqs=True)
+ def test_encode_bytes_message(self):
+ message = Message()
+ body = b'\x00\x01\x02\x03\x04\x05'
+ message.set_body(body)
+ self.assertEqual(message.get_body_encoded(), 'AAECAwQF')
+
+ @attr(sqs=True)
+ def test_encode_string_message(self):
+ message = Message()
+ body = 'hello world'
+ message.set_body(body)
+ self.assertEqual(message.get_body_encoded(), 'aGVsbG8gd29ybGQ=')
+
class TestBigMessage(unittest.TestCase):