summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.rst5
-rw-r--r--boto/__init__.py25
-rw-r--r--boto/cloudtrail/__init__.py48
-rw-r--r--boto/cloudtrail/exceptions.py86
-rw-r--r--boto/cloudtrail/layer1.py309
-rw-r--r--boto/gs/resumable_upload_handler.py2
-rw-r--r--boto/redshift/exceptions.py269
-rw-r--r--boto/redshift/layer1.py941
-rw-r--r--docs/source/index.rst2
-rw-r--r--docs/source/ref/cloudtrail.rst26
-rw-r--r--docs/source/releasenotes/v2.17.0.rst21
-rw-r--r--setup.py2
-rw-r--r--tests/integration/cloudtrail/__init__.py0
-rw-r--r--tests/integration/cloudtrail/test_cloudtrail.py91
-rw-r--r--tests/unit/cloudtrail/__init__.py0
-rw-r--r--tests/unit/cloudtrail/test_layer1.py79
16 files changed, 1807 insertions, 99 deletions
diff --git a/README.rst b/README.rst
index b5bdab08..0cbaee4e 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.16.0
+boto 2.17.0
-Released: 8-November-2013
+Released: 14-November-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
@@ -42,6 +42,7 @@ At the moment, boto supports:
* AWS CloudFormation
* AWS Data Pipeline
* AWS Opsworks
+ * AWS CloudTrail
* Identity & Access
diff --git a/boto/__init__.py b/boto/__init__.py
index 9c4c4ba2..786f0f85 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -36,7 +36,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.16.0'
+__version__ = '2.17.0'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s Python/%s %s/%s' % (
@@ -721,6 +721,29 @@ def connect_support(aws_access_key_id=None,
)
+def connect_cloudtrail(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to AWS CloudTrail
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.cloudtrail.layer1.CloudtrailConnection`
+ :return: A connection to the AWS Cloudtrail service
+ """
+ from boto.cloudtrail.layer1 import CloudTrailConnection
+ return CloudTrailConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True, is_latest=False):
diff --git a/boto/cloudtrail/__init__.py b/boto/cloudtrail/__init__.py
new file mode 100644
index 00000000..836f57fc
--- /dev/null
+++ b/boto/cloudtrail/__init__.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the AWS Cloudtrail service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.cloudtrail.layer1 import CloudTrailConnection
+
+ return [RegionInfo(name='us-east-1',
+ endpoint='cloudtrail.us-east-1.amazonaws.com',
+ connection_cls=CloudTrailConnection),
+ RegionInfo(name='us-west-2',
+ endpoint='cloudtrail.us-west-2.amazonaws.com',
+ connection_cls=CloudTrailConnection),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/cloudtrail/exceptions.py b/boto/cloudtrail/exceptions.py
new file mode 100644
index 00000000..35c5c3d3
--- /dev/null
+++ b/boto/cloudtrail/exceptions.py
@@ -0,0 +1,86 @@
+"""
+Exceptions that are specific to the cloudtrail module.
+"""
+from boto.exception import BotoServerError
+
+
+class InvalidSnsTopicNameException(BotoServerError):
+ """
+ Raised when an invalid SNS topic name is passed to Cloudtrail.
+ """
+ pass
+
+
+class InvalidS3BucketNameException(BotoServerError):
+ """
+ Raised when an invalid S3 bucket name is passed to Cloudtrail.
+ """
+ pass
+
+
+class TrailAlreadyExistsException(BotoServerError):
+ """
+ Raised when the given trail name already exists.
+ """
+ pass
+
+class InsufficientSnsTopicPolicyException(BotoServerError):
+ """
+ Raised when the SNS topic does not allow Cloudtrail to post
+ messages.
+ """
+ pass
+
+class InvalidTrailNameException(BotoServerError):
+ """
+ Raised when the trail name is invalid.
+ """
+ pass
+
+class InternalErrorException(BotoServerError):
+ """
+ Raised when there was an internal Cloudtrail error.
+ """
+ pass
+
+class TrailNotFoundException(BotoServerError):
+ """
+ Raised when the given trail name is not found.
+ """
+ pass
+
+
+class S3BucketDoesNotExistException(BotoServerError):
+ """
+ Raised when the given S3 bucket does not exist.
+ """
+ pass
+
+
+class TrailNotProvidedException(BotoServerError):
+ """
+ Raised when no trail name was provided.
+ """
+ pass
+
+
+class InvalidS3PrefixException(BotoServerError):
+ """
+ Raised when an invalid key prefix is given.
+ """
+ pass
+
+
+class MaximumNumberOfTrailsExceededException(BotoServerError):
+ """
+ Raised when no more trails can be created.
+ """
+ pass
+
+
+class InsufficientS3BucketPolicyException(BotoServerError):
+ """
+ Raised when the S3 bucket does not allow Cloudtrail to
+ write files into the prefix.
+ """
+ pass
diff --git a/boto/cloudtrail/layer1.py b/boto/cloudtrail/layer1.py
new file mode 100644
index 00000000..e1e21453
--- /dev/null
+++ b/boto/cloudtrail/layer1.py
@@ -0,0 +1,309 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+import boto
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.cloudtrail import exceptions
+
+
+class CloudTrailConnection(AWSQueryConnection):
+ """
+ AWS Cloud Trail
+ This is the CloudTrail API Reference. It provides descriptions of
+ actions, data types, common parameters, and common errors for
+ CloudTrail.
+
+ CloudTrail is a web service that records AWS API calls for your
+ AWS account and delivers log files to an Amazon S3 bucket. The
+ recorded information includes the identity of the user, the start
+ time of the event, the source IP address, the request parameters,
+ and the response elements returned by the service.
+
+ As an alternative to using the API, you can use one of the AWS
+ SDKs, which consist of libraries and sample code for various
+ programming languages and platforms (Java, Ruby, .NET, iOS,
+ Android, etc.). The SDKs provide a convenient way to create
+ programmatic access to AWSCloudTrail. For example, the SDKs take
+ care of cryptographically signing requests, managing errors, and
+ retrying requests automatically. For information about the AWS
+ SDKs, including how to download and install them, see the Tools
+ for Amazon Web Services page.
+
+ See the CloudTrail User Guide for information about the data that
+ is included with each event listed in the log files.
+ """
+ APIVersion = "2013-11-01"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "cloudtrail.us-east-1.amazonaws.com"
+ ServiceName = "CloudTrail"
+ TargetPrefix = "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException,
+ "InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException,
+ "TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException,
+ "InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException,
+ "InvalidTrailNameException": exceptions.InvalidTrailNameException,
+ "InternalErrorException": exceptions.InternalErrorException,
+ "TrailNotFoundException": exceptions.TrailNotFoundException,
+ "S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException,
+ "TrailNotProvidedException": exceptions.TrailNotProvidedException,
+ "InvalidS3PrefixException": exceptions.InvalidS3PrefixException,
+ "MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException,
+ "InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs:
+ kwargs['host'] = region.endpoint
+
+ AWSQueryConnection.__init__(self, **kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def create_trail(self, trail=None):
+ """
+ From the command line, use create-subscription.
+
+ Creates a trail that specifies the settings for delivery of
+ log data to an Amazon S3 bucket. The request includes a Trail
+ structure that specifies the following:
+
+
+ + Trail name.
+ + The name of the Amazon S3 bucket to which CloudTrail
+ delivers your log files.
+ + The name of the Amazon S3 key prefix that precedes each log
+ file.
+ + The name of the Amazon SNS topic that notifies you that a
+ new file is available in your bucket.
+ + Whether the log file should include events from global
+ services. Currently, the only events included in CloudTrail
+ log files are from IAM and AWS STS.
+
+
+ Returns the appropriate HTTP status code if successful. If
+ not, it returns either one of the CommonErrors or a
+ FrontEndException with one of the following error codes:
+
+ **MaximumNumberOfTrailsExceeded**
+
+ An attempt was made to create more trails than allowed. You
+ can only create one trail for each account in each region.
+
+ **TrailAlreadyExists**
+
+ At attempt was made to create a trail with a name that already
+ exists.
+
+ **S3BucketDoesNotExist**
+
+ Specified Amazon S3 bucket does not exist.
+
+ **InsufficientS3BucketPolicy**
+
+ Policy on Amazon S3 bucket does not permit CloudTrail to write
+ to your bucket. See the AWS AWS CloudTrail User Guide for the
+ required bucket policy.
+
+ **InsufficientSnsTopicPolicy**
+
+ The policy on Amazon SNS topic does not permit CloudTrail to
+ write to it. Can also occur when an Amazon SNS topic does not
+ exist.
+
+ :type trail: dict
+ :param trail: Contains the Trail structure that specifies the settings
+ for each trail.
+
+ """
+ params = {}
+ if trail is not None:
+ params['trail'] = trail
+ return self.make_request(action='CreateTrail',
+ body=json.dumps(params))
+
+ def delete_trail(self, name=None):
+ """
+ Deletes a trail.
+
+ :type name: string
+ :param name: The name of a trail to be deleted.
+
+ """
+ params = {}
+ if name is not None:
+ params['Name'] = name
+ return self.make_request(action='DeleteTrail',
+ body=json.dumps(params))
+
+ def describe_trails(self, trail_name_list=None):
+ """
+ Retrieves the settings for some or all trails associated with
+ an account. Returns a list of Trail structures in JSON format.
+
+ :type trail_name_list: list
+ :param trail_name_list: The list of Trail object names.
+
+ """
+ params = {}
+ if trail_name_list is not None:
+ params['trailNameList'] = trail_name_list
+ return self.make_request(action='DescribeTrails',
+ body=json.dumps(params))
+
+ def get_trail_status(self, name=None):
+ """
+ Returns GetTrailStatusResult, which contains a JSON-formatted
+ list of information about the trail specified in the request.
+ JSON fields include information such as delivery errors,
+ Amazon SNS and Amazon S3 errors, and times that logging
+ started and stopped for each trail.
+
+ :type name: string
+ :param name: The name of the trail for which you are requesting the
+ current status.
+
+ """
+ params = {}
+ if name is not None:
+ params['Name'] = name
+ return self.make_request(action='GetTrailStatus',
+ body=json.dumps(params))
+
+ def start_logging(self, name=None):
+ """
+ Starts the processing of recording user activity events and
+ log file delivery for a trail.
+
+ :type name: string
+ :param name: The name of the Trail for which CloudTrail logs events.
+
+ """
+ params = {}
+ if name is not None:
+ params['Name'] = name
+ return self.make_request(action='StartLogging',
+ body=json.dumps(params))
+
+ def stop_logging(self, name=None):
+ """
+ Suspends the recording of user activity events and log file
+ delivery for the specified trail. Under most circumstances,
+ there is no need to use this action. You can update a trail
+ without stopping it first. This action is the only way to stop
+ logging activity.
+
+ :type name: string
+ :param name: Communicates to CloudTrail the name of the Trail for which
+ to stop logging events.
+
+ """
+ params = {}
+ if name is not None:
+ params['Name'] = name
+ return self.make_request(action='StopLogging',
+ body=json.dumps(params))
+
+ def update_trail(self, trail=None):
+ """
+ From the command line, use update-subscription.
+
+ Updates the settings that specify delivery of log files.
+ Changes to a trail do not require stopping the CloudTrail
+ service. You can use this action to designate an existing
+ bucket for log delivery, or to create a new bucket and prefix.
+ If the existing bucket has previously been a target for
+ CloudTrail log files, an IAM policy exists for the bucket. If
+ you create a new bucket using UpdateTrail, you need to apply
+ the policy to the bucket using one of the means provided by
+ the Amazon S3 service.
+
+ The request includes a Trail structure that specifies the
+ following:
+
+
+ + Trail name.
+ + The name of the Amazon S3 bucket to which CloudTrail
+ delivers your log files.
+ + The name of the Amazon S3 key prefix that precedes each log
+ file.
+ + The name of the Amazon SNS topic that notifies you that a
+ new file is available in your bucket.
+ + Whether the log file should include events from global
+ services, such as IAM or AWS STS.
+
+ **CreateTrail** returns the appropriate HTTP status code if
+ successful. If not, it returns either one of the common errors
+ or one of the exceptions listed at the end of this page.
+
+ :type trail: dict
+ :param trail: Represents the Trail structure that contains the
+ CloudTrail setting for an account.
+
+ """
+ params = {}
+ if trail is not None:
+ params['trail'] = trail
+ return self.make_request(action='UpdateTrail',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read()
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/gs/resumable_upload_handler.py b/boto/gs/resumable_upload_handler.py
index e1b74347..d3d86297 100644
--- a/boto/gs/resumable_upload_handler.py
+++ b/boto/gs/resumable_upload_handler.py
@@ -482,7 +482,7 @@ class ResumableUploadHandler(object):
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
- http_conn = conn.new_http_connection(self.tracker_uri_host,
+ http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
diff --git a/boto/redshift/exceptions.py b/boto/redshift/exceptions.py
index b4f60dd8..0457dcd1 100644
--- a/boto/redshift/exceptions.py
+++ b/boto/redshift/exceptions.py
@@ -188,3 +188,272 @@ class AccessToSnapshotDeniedFault(JSONResponseError):
class UnauthorizedOperationFault(JSONResponseError):
pass
+
+
+class SnapshotCopyAlreadyDisabled(JSONResponseError):
+ pass
+
+
+class ClusterNotFound(JSONResponseError):
+ pass
+
+
+class UnknownSnapshotCopyRegion(JSONResponseError):
+ pass
+
+
+class InvalidClusterSubnetState(JSONResponseError):
+ pass
+
+
+class ReservedNodeQuotaExceeded(JSONResponseError):
+ pass
+
+
+class InvalidClusterState(JSONResponseError):
+ pass
+
+
+class HsmClientCertificateQuotaExceeded(JSONResponseError):
+ pass
+
+
+class SubscriptionCategoryNotFound(JSONResponseError):
+ pass
+
+
+class HsmClientCertificateNotFound(JSONResponseError):
+ pass
+
+
+class SubscriptionEventIdNotFound(JSONResponseError):
+ pass
+
+
+class ClusterSecurityGroupAlreadyExists(JSONResponseError):
+ pass
+
+
+class HsmConfigurationAlreadyExists(JSONResponseError):
+ pass
+
+
+class NumberOfNodesQuotaExceeded(JSONResponseError):
+ pass
+
+
+class ReservedNodeOfferingNotFound(JSONResponseError):
+ pass
+
+
+class BucketNotFound(JSONResponseError):
+ pass
+
+
+class InsufficientClusterCapacity(JSONResponseError):
+ pass
+
+
+class InvalidRestore(JSONResponseError):
+ pass
+
+
+class UnauthorizedOperation(JSONResponseError):
+ pass
+
+
+class ClusterQuotaExceeded(JSONResponseError):
+ pass
+
+
+class InvalidVPCNetworkState(JSONResponseError):
+ pass
+
+
+class ClusterSnapshotNotFound(JSONResponseError):
+ pass
+
+
+class AuthorizationQuotaExceeded(JSONResponseError):
+ pass
+
+
+class InvalidHsmClientCertificateState(JSONResponseError):
+ pass
+
+
+class SNSTopicArnNotFound(JSONResponseError):
+ pass
+
+
+class ResizeNotFound(JSONResponseError):
+ pass
+
+
+class ClusterSubnetGroupNotFound(JSONResponseError):
+ pass
+
+
+class SNSNoAuthorization(JSONResponseError):
+ pass
+
+
+class ClusterSnapshotQuotaExceeded(JSONResponseError):
+ pass
+
+
+class AccessToSnapshotDenied(JSONResponseError):
+ pass
+
+
+class InvalidClusterSecurityGroupState(JSONResponseError):
+ pass
+
+
+class NumberOfNodesPerClusterLimitExceeded(JSONResponseError):
+ pass
+
+
+class ClusterSubnetQuotaExceeded(JSONResponseError):
+ pass
+
+
+class SNSInvalidTopic(JSONResponseError):
+ pass
+
+
+class ClusterSecurityGroupNotFound(JSONResponseError):
+ pass
+
+
+class InvalidElasticIp(JSONResponseError):
+ pass
+
+
+class InvalidClusterParameterGroupState(JSONResponseError):
+ pass
+
+
+class InvalidHsmConfigurationState(JSONResponseError):
+ pass
+
+
+
+class ClusterAlreadyExists(JSONResponseError):
+ pass
+
+
+class HsmConfigurationQuotaExceeded(JSONResponseError):
+ pass
+
+
+class ClusterSnapshotAlreadyExists(JSONResponseError):
+ pass
+
+
+class SubscriptionSeverityNotFound(JSONResponseError):
+ pass
+
+
+class SourceNotFound(JSONResponseError):
+ pass
+
+
+class ReservedNodeAlreadyExists(JSONResponseError):
+ pass
+
+
+class ClusterSubnetGroupQuotaExceeded(JSONResponseError):
+ pass
+
+
+class ClusterParameterGroupNotFound(JSONResponseError):
+ pass
+
+
+class InvalidS3BucketName(JSONResponseError):
+ pass
+
+
+class InvalidS3KeyPrefix(JSONResponseError):
+ pass
+
+
+class SubscriptionAlreadyExist(JSONResponseError):
+ pass
+
+
+class HsmConfigurationNotFound(JSONResponseError):
+ pass
+
+
+class AuthorizationNotFound(JSONResponseError):
+ pass
+
+
+class ClusterSecurityGroupQuotaExceeded(JSONResponseError):
+ pass
+
+
+class EventSubscriptionQuotaExceeded(JSONResponseError):
+ pass
+
+
+class AuthorizationAlreadyExists(JSONResponseError):
+ pass
+
+
+class InvalidClusterSnapshotState(JSONResponseError):
+ pass
+
+
+class ClusterParameterGroupQuotaExceeded(JSONResponseError):
+ pass
+
+
+class SnapshotCopyDisabled(JSONResponseError):
+ pass
+
+
+class ClusterSubnetGroupAlreadyExists(JSONResponseError):
+ pass
+
+
+class ReservedNodeNotFound(JSONResponseError):
+ pass
+
+
+class HsmClientCertificateAlreadyExists(JSONResponseError):
+ pass
+
+
+class InvalidClusterSubnetGroupState(JSONResponseError):
+ pass
+
+
+class SubscriptionNotFound(JSONResponseError):
+ pass
+
+
+class InsufficientS3BucketPolicy(JSONResponseError):
+ pass
+
+
+class ClusterParameterGroupAlreadyExists(JSONResponseError):
+ pass
+
+
+class UnsupportedOption(JSONResponseError):
+ pass
+
+
+class CopyToRegionDisabled(JSONResponseError):
+ pass
+
+
+class SnapshotCopyAlreadyEnabled(JSONResponseError):
+ pass
+
+
+class IncompatibleOrderableOptions(JSONResponseError):
+ pass
diff --git a/boto/redshift/layer1.py b/boto/redshift/layer1.py
index 6ba3fd3d..2f5a332a 100644
--- a/boto/redshift/layer1.py
+++ b/boto/redshift/layer1.py
@@ -31,56 +31,31 @@ from boto.redshift import exceptions
class RedshiftConnection(AWSQueryConnection):
"""
Amazon Redshift **Overview**
- This is the Amazon Redshift API Reference. This guide provides
- descriptions and samples of the Amazon Redshift API.
+ This is an interface reference for Amazon Redshift. It contains
+ documentation for one of the programming or command line
+ interfaces you can use to manage Amazon Redshift clusters. Note
+ that Amazon Redshift is asynchronous, which means that some
+ interfaces may require techniques, such as polling or asynchronous
+ callback handlers, to determine when a command has been applied.
+ In this reference, the parameter descriptions indicate whether a
+ change is applied immediately, on the next instance reboot, or
+ during the next maintenance window. For a summary of the Amazon
+ Redshift cluster management interfaces, go to `Using the Amazon
+ Redshift Management Interfaces `_.
Amazon Redshift manages all the work of setting up, operating, and
scaling a data warehouse: provisioning capacity, monitoring and
backing up the cluster, and applying patches and upgrades to the
Amazon Redshift engine. You can focus on using your data to
acquire new insights for your business and customers.
- **Are You a First-Time Amazon Redshift User?**
- If you are a first-time user of Amazon Redshift, we recommend that
- you begin by reading the following sections:
-
-
-
- + Service Highlights and Pricing - The `product detail page`_
- provides the Amazon Redshift value proposition, service highlights
- and pricing.
- + Getting Started - The `Getting Started Guide`_ includes an
- example that walks you through the process of creating a cluster,
- creating database tables, uploading data, and testing queries.
-
-
-
- After you complete the Getting Started Guide, we recommend that
- you explore one of the following guides:
-
-
- + Cluster Management - If you are responsible for managing Amazon
- Redshift clusters, the `Cluster Management Guide`_ shows you how
- to create and manage Amazon Redshift clusters. If you are an
- application developer, you can use the Amazon Redshift Query API
- to manage clusters programmatically. Additionally, the AWS SDK
- libraries that wrap the underlying Amazon Redshift API simplify
- your programming tasks. If you prefer a more interactive way of
- managing clusters, you can use the Amazon Redshift console and the
- AWS command line interface (AWS CLI). For information about the
- API and CLI, go to the following manuals :
-
- + API Reference ( this document )
- + `CLI Reference`_
-
- + Amazon Redshift Database Database Developer - If you are a
- database developer, the Amazon Redshift `Database Developer
- Guide`_ explains how to design, build, query, and maintain the
- databases that make up your data warehouse.
+ If you are a first-time user of Amazon Redshift, we recommend that
+ you begin by reading the The `Amazon Redshift Getting Started
+ Guide`_
- For a list of supported AWS regions where you can provision a
- cluster, go to the `Regions and Endpoints`_ section in the Amazon
- Web Services Glossary .
+ If you are a database developer, the `Amazon Redshift Database
+ Developer Guide`_ explains how to design, build, query, and
+ maintain the databases that make up your data warehouse.
"""
APIVersion = "2012-12-01"
DefaultRegionName = "us-east-1"
@@ -88,48 +63,75 @@ class RedshiftConnection(AWSQueryConnection):
ResponseError = JSONResponseError
_faults = {
- "ClusterNotFound": exceptions.ClusterNotFoundFault,
- "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetStateFault,
- "InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupStateFault,
- "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceededFault,
- "InvalidClusterState": exceptions.InvalidClusterStateFault,
- "InvalidRestore": exceptions.InvalidRestoreFault,
- "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExistsFault,
- "NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceededFault,
- "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFoundFault,
- "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacityFault,
- "UnauthorizedOperation": exceptions.UnauthorizedOperationFault,
- "ClusterQuotaExceeded": exceptions.ClusterQuotaExceededFault,
- "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkStateFault,
- "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFoundFault,
- "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceededFault,
- "InvalidSubne": exceptions.InvalidSubnet,
- "ResizeNotFound": exceptions.ResizeNotFoundFault,
- "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFoundFault,
- "ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceededFault,
- "AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault,
- "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupStateFault,
- "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceededFault,
- "ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceededFault,
- "UnsupportedOption": exceptions.UnsupportedOptionFault,
- "ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFoundFault,
- "ClusterAlreadyExists": exceptions.ClusterAlreadyExistsFault,
- "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExistsFault,
- "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExistsFault,
- "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceededFault,
- "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFoundFault,
- "AuthorizationNotFound": exceptions.AuthorizationNotFoundFault,
- "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceededFault,
- "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExistsFault,
- "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotStateFault,
- "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceededFault,
- "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExistsFault,
- "ReservedNodeNotFound": exceptions.ReservedNodeNotFoundFault,
- "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupStateFault,
- "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExistsFault,
+ "SnapshotCopyAlreadyDisabled": exceptions.SnapshotCopyAlreadyDisabled,
+ "ClusterNotFound": exceptions.ClusterNotFound,
+ "UnknownSnapshotCopyRegion": exceptions.UnknownSnapshotCopyRegion,
+ "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetState,
+ "InvalidSubnet": exceptions.InvalidSubnet,
+ "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceeded,
+ "InvalidClusterState": exceptions.InvalidClusterState,
+ "HsmClientCertificateQuotaExceeded": exceptions.HsmClientCertificateQuotaExceeded,
+ "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
+ "HsmClientCertificateNotFound": exceptions.HsmClientCertificateNotFound,
+ "SubscriptionEventIdNotFound": exceptions.SubscriptionEventIdNotFound,
+ "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExists,
+ "HsmConfigurationAlreadyExists": exceptions.HsmConfigurationAlreadyExists,
+ "NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceeded,
+ "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFound,
+ "BucketNotFound": exceptions.BucketNotFound,
+ "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacity,
+ "InvalidRestore": exceptions.InvalidRestore,
+ "UnauthorizedOperation": exceptions.UnauthorizedOperation,
+ "ClusterQuotaExceeded": exceptions.ClusterQuotaExceeded,
+ "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
+ "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFound,
+ "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
+ "InvalidHsmClientCertificateState": exceptions.InvalidHsmClientCertificateState,
+ "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
+ "ResizeNotFound": exceptions.ResizeNotFound,
+ "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFound,
+ "SNSNoAuthorization": exceptions.SNSNoAuthorization,
+ "ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceeded,
+ "AccessToSnapshotDenied": exceptions.AccessToSnapshotDenied,
+ "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupState,
+ "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceeded,
+ "ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceeded,
+ "SNSInvalidTopic": exceptions.SNSInvalidTopic,
+ "ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFound,
+ "InvalidElasticIp": exceptions.InvalidElasticIp,
+ "InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupState,
+ "InvalidHsmConfigurationState": exceptions.InvalidHsmConfigurationState,
+ "ClusterAlreadyExists": exceptions.ClusterAlreadyExists,
+ "HsmConfigurationQuotaExceeded": exceptions.HsmConfigurationQuotaExceeded,
+ "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExists,
+ "SubscriptionSeverityNotFound": exceptions.SubscriptionSeverityNotFound,
+ "SourceNotFound": exceptions.SourceNotFound,
+ "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExists,
+ "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceeded,
+ "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFound,
+ "InvalidS3BucketName": exceptions.InvalidS3BucketName,
+ "InvalidS3KeyPrefix": exceptions.InvalidS3KeyPrefix,
+ "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
+ "HsmConfigurationNotFound": exceptions.HsmConfigurationNotFound,
+ "AuthorizationNotFound": exceptions.AuthorizationNotFound,
+ "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
- "AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault,
- "UnauthorizedOperation": exceptions.UnauthorizedOperationFault,
+ "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
+ "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
+ "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotState,
+ "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceeded,
+ "SnapshotCopyDisabled": exceptions.SnapshotCopyDisabled,
+ "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExists,
+ "ReservedNodeNotFound": exceptions.ReservedNodeNotFound,
+ "HsmClientCertificateAlreadyExists": exceptions.HsmClientCertificateAlreadyExists,
+ "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupState,
+ "SubscriptionNotFound": exceptions.SubscriptionNotFound,
+ "InsufficientS3BucketPolicy": exceptions.InsufficientS3BucketPolicy,
+ "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExists,
+ "UnsupportedOption": exceptions.UnsupportedOption,
+ "CopyToRegionDisabled": exceptions.CopyToRegionDisabled,
+ "SnapshotCopyAlreadyEnabled": exceptions.SnapshotCopyAlreadyEnabled,
+ "IncompatibleOrderableOptions": exceptions.IncompatibleOrderableOptions,
}
@@ -138,7 +140,8 @@ class RedshiftConnection(AWSQueryConnection):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
- kwargs['host'] = region.endpoint
+ if 'host' not in kwargs:
+ kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
self.region = region
@@ -218,7 +221,10 @@ class RedshiftConnection(AWSQueryConnection):
is authorized to restore.
:type snapshot_cluster_identifier: string
- :param snapshot_cluster_identifier:
+ :param snapshot_cluster_identifier: The identifier of the cluster the
+ snapshot was created from. This parameter is required if your IAM
+ user has a policy containing a snapshot resource element that
+ specifies anything other than * for the cluster name.
:type account_with_restore_access: string
:param account_with_restore_access: The identifier of the AWS customer
@@ -267,6 +273,15 @@ class RedshiftConnection(AWSQueryConnection):
:type source_snapshot_cluster_identifier: string
:param source_snapshot_cluster_identifier:
+ The identifier of the cluster the source snapshot was created from.
+ This parameter is required if your IAM user has a policy containing
+ a snapshot resource element that specifies anything other than *
+ for the cluster name.
+
+ Constraints:
+
+
+ + Must be the identifier for a valid cluster.
:type target_snapshot_identifier: string
:param target_snapshot_identifier:
@@ -304,7 +319,9 @@ class RedshiftConnection(AWSQueryConnection):
automated_snapshot_retention_period=None, port=None,
cluster_version=None, allow_version_upgrade=None,
number_of_nodes=None, publicly_accessible=None,
- encrypted=None):
+ encrypted=None,
+ hsm_client_certificate_identifier=None,
+ hsm_configuration_identifier=None, elastic_ip=None):
"""
Creates a new cluster. To create the cluster in virtual
private cloud (VPC), you must provide cluster subnet group
@@ -323,7 +340,7 @@ class RedshiftConnection(AWSQueryConnection):
To create additional databases after the cluster is created, connect to
the cluster with a SQL client and use SQL commands to create a
database. For more information, go to `Create a Database`_ in the
- Amazon Redshift Developer Guide.
+ Amazon Redshift Database Developer Guide.
Default: `dev`
@@ -334,7 +351,7 @@ class RedshiftConnection(AWSQueryConnection):
+ Must contain only lowercase letters.
+ Cannot be a word that is reserved by the service. A list of reserved
words can be found in `Reserved Words`_ in the Amazon Redshift
- Developer Guide.
+ Database Developer Guide.
:type cluster_identifier: string
:param cluster_identifier: A unique identifier for the cluster. You use
@@ -382,7 +399,7 @@ class RedshiftConnection(AWSQueryConnection):
+ Must be 1 - 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word. A list of reserved words can be found in
- `Reserved Words`_ in the Amazon Redshift Developer Guide.
+ `Reserved Words`_ in the Amazon Redshift Database Developer Guide.
:type master_user_password: string
:param master_user_password:
@@ -527,6 +544,23 @@ class RedshiftConnection(AWSQueryConnection):
:param encrypted: If `True`, the data in cluster is encrypted at rest.
Default: false
+ :type hsm_client_certificate_identifier: string
+ :param hsm_client_certificate_identifier: Specifies the name of the HSM
+ client certificate the Amazon Redshift cluster uses to retrieve the
+ data encryption keys stored in an HSM.
+
+ :type hsm_configuration_identifier: string
+ :param hsm_configuration_identifier: Specifies the name of the HSM
+ configuration that contains the information the Amazon Redshift
+ cluster can use to retrieve and store keys in an HSM.
+
+ :type elastic_ip: string
+ :param elastic_ip: The Elastic IP (EIP) address for the cluster.
+ Constraints: The cluster must be provisioned in EC2-VPC and publicly-
+ accessible through an Internet gateway. For more information about
+ provisioning clusters in EC2-VPC, go to `Supported Platforms to
+ Launch Your Cluster`_ in the Amazon Redshift Management Guide.
+
"""
params = {
'ClusterIdentifier': cluster_identifier,
@@ -571,6 +605,12 @@ class RedshiftConnection(AWSQueryConnection):
if encrypted is not None:
params['Encrypted'] = str(
encrypted).lower()
+ if hsm_client_certificate_identifier is not None:
+ params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier
+ if hsm_configuration_identifier is not None:
+ params['HsmConfigurationIdentifier'] = hsm_configuration_identifier
+ if elastic_ip is not None:
+ params['ElasticIp'] = elastic_ip
return self._make_request(
action='CreateCluster',
verb='POST',
@@ -756,6 +796,203 @@ class RedshiftConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
+ def create_event_subscription(self, subscription_name, sns_topic_arn,
+ source_type=None, source_ids=None,
+ event_categories=None, severity=None,
+ enabled=None):
+ """
+ Creates an Amazon Redshift event notification subscription.
+ This action requires an ARN (Amazon Resource Name) of an
+ Amazon SNS topic created by either the Amazon Redshift
+ console, the Amazon SNS console, or the Amazon SNS API. To
+ obtain an ARN with Amazon SNS, you must create a topic in
+ Amazon SNS and subscribe to the topic. The ARN is displayed in
+ the SNS console.
+
+ You can specify the source type, and lists of Amazon Redshift
+ source IDs, event categories, and event severities.
+ Notifications will be sent for all events you want that match
+ those criteria. For example, you can specify source type =
+ cluster, source ID = my-cluster-1 and mycluster2, event
+ categories = Availability, Backup, and severity = ERROR. The
+ subsription will only send notifications for those ERROR
+ events in the Availability and Backup categores for the
+ specified clusters.
+
+ If you specify both the source type and source IDs, such as
+ source type = cluster and source identifier = my-cluster-1,
+ notifiactions will be sent for all the cluster events for my-
+ cluster-1. If you specify a source type but do not specify a
+ source identifier, you will receive notice of the events for
+ the objects of that type in your AWS account. If you do not
+ specify either the SourceType nor the SourceIdentifier, you
+ will be notified of events generated from all Amazon Redshift
+ sources belonging to your AWS account. You must specify a
+ source type if you specify a source ID.
+
+ :type subscription_name: string
+ :param subscription_name:
+ The name of the event subscription to be created.
+
+ Constraints:
+
+
+ + Cannot be null, empty, or blank.
+ + Must contain from 1 to 255 alphanumeric characters or hyphens.
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+
+ :type sns_topic_arn: string
+ :param sns_topic_arn: The Amazon Resource Name (ARN) of the Amazon SNS
+ topic used to transmit the event notifications. The ARN is created
+ by Amazon SNS when you create a topic and subscribe to it.
+
+ :type source_type: string
+ :param source_type: The type of source that will be generating the
+ events. For example, if you want to be notified of events generated
+ by a cluster, you would set this parameter to cluster. If this
+ value is not specified, events are returned for all Amazon Redshift
+ objects in your AWS account. You must specify a source type in
+ order to specify source IDs.
+ Valid values: cluster, cluster-parameter-group, cluster-security-group,
+ and cluster-snapshot.
+
+ :type source_ids: list
+ :param source_ids: A list of one or more identifiers of Amazon Redshift
+ source objects. All of the objects must be of the same type as was
+ specified in the source type parameter. The event subscription will
+ return only events generated by the specified objects. If not
+ specified, then events are returned for all objects within the
+ source type specified.
+ Example: my-cluster-1, my-cluster-2
+
+ Example: my-snapshot-20131010
+
+ :type event_categories: list
+ :param event_categories: Specifies the Amazon Redshift event categories
+ to be published by the event notification subscription.
+ Values: Configuration, Management, Monitoring, Security
+
+ :type severity: string
+ :param severity: Specifies the Amazon Redshift event severity to be
+ published by the event notification subscription.
+ Values: ERROR, INFO
+
+ :type enabled: boolean
+ :param enabled: A Boolean value; set to `True` to activate the
+ subscription, set to `False` to create the subscription but not
+ active it.
+
+ """
+ params = {
+ 'SubscriptionName': subscription_name,
+ 'SnsTopicArn': sns_topic_arn,
+ }
+ if source_type is not None:
+ params['SourceType'] = source_type
+ if source_ids is not None:
+ self.build_list_params(params,
+ source_ids,
+ 'SourceIds.member')
+ if event_categories is not None:
+ self.build_list_params(params,
+ event_categories,
+ 'EventCategories.member')
+ if severity is not None:
+ params['Severity'] = severity
+ if enabled is not None:
+ params['Enabled'] = str(
+ enabled).lower()
+ return self._make_request(
+ action='CreateEventSubscription',
+ verb='POST',
+ path='/', params=params)
+
+ def create_hsm_client_certificate(self,
+ hsm_client_certificate_identifier):
+ """
+ Creates an HSM client certificate that an Amazon Redshift
+ cluster will use to connect to the client's HSM in order to
+ store and retrieve the keys used to encrypt the cluster
+ databases.
+
+ The command returns a public key, which you must store in the
+ HSM. After creating the HSM certificate, you must create an
+ Amazon Redshift HSM configuration that provides a cluster the
+ information needed to store and retrieve database encryption
+ keys in the HSM. For more information, go to aLinkToHSMTopic
+ in the Amazon Redshift Management Guide.
+
+ :type hsm_client_certificate_identifier: string
+ :param hsm_client_certificate_identifier: The identifier to be assigned
+ to the new HSM client certificate that the cluster will use to
+ connect to the HSM to retrieve the database encryption keys.
+
+ """
+ params = {
+ 'HsmClientCertificateIdentifier': hsm_client_certificate_identifier,
+ }
+ return self._make_request(
+ action='CreateHsmClientCertificate',
+ verb='POST',
+ path='/', params=params)
+
+ def create_hsm_configuration(self, hsm_configuration_identifier,
+ description, hsm_ip_address,
+ hsm_partition_name, hsm_partition_password,
+ hsm_server_public_certificate):
+ """
+ Creates an HSM configuration that contains the information
+ required by an Amazon Redshift cluster to store and retrieve
+ database encryption keys in a Hardware Storeage Module (HSM).
+ After creating the HSM configuration, you can specify it as a
+ parameter when creating a cluster. The cluster will then store
+ its encryption keys in the HSM.
+
+ Before creating an HSM configuration, you must have first
+ created an HSM client certificate. For more information, go to
+ aLinkToHSMTopic in the Amazon Redshift Management Guide.
+
+ :type hsm_configuration_identifier: string
+ :param hsm_configuration_identifier: The identifier to be assigned to
+ the new Amazon Redshift HSM configuration.
+
+ :type description: string
+ :param description: A text description of the HSM configuration to be
+ created.
+
+ :type hsm_ip_address: string
+ :param hsm_ip_address: The IP address that the Amazon Redshift cluster
+ must use to access the HSM.
+
+ :type hsm_partition_name: string
+ :param hsm_partition_name: The name of the partition in the HSM where
+ the Amazon Redshift clusters will store their database encryption
+ keys.
+
+ :type hsm_partition_password: string
+ :param hsm_partition_password: The password required to access the HSM
+ partition.
+
+ :type hsm_server_public_certificate: string
+ :param hsm_server_public_certificate: The public key used to access the
+ HSM client certificate, which was created by calling the Amazon
+ Redshift create HSM certificate command.
+
+ """
+ params = {
+ 'HsmConfigurationIdentifier': hsm_configuration_identifier,
+ 'Description': description,
+ 'HsmIpAddress': hsm_ip_address,
+ 'HsmPartitionName': hsm_partition_name,
+ 'HsmPartitionPassword': hsm_partition_password,
+ 'HsmServerPublicCertificate': hsm_server_public_certificate,
+ }
+ return self._make_request(
+ action='CreateHsmConfiguration',
+ verb='POST',
+ path='/', params=params)
+
def delete_cluster(self, cluster_identifier,
skip_final_cluster_snapshot=None,
final_cluster_snapshot_identifier=None):
@@ -885,7 +1122,11 @@ class RedshiftConnection(AWSQueryConnection):
`available` state.
:type snapshot_cluster_identifier: string
- :param snapshot_cluster_identifier:
+ :param snapshot_cluster_identifier: The unique identifier of the
+ cluster the snapshot was created from. This parameter is required
+ if your IAM user has a policy containing a snapshot resource
+ element that specifies anything other than * for the cluster name.
+ Constraints: Must be the name of valid cluster.
"""
params = {'SnapshotIdentifier': snapshot_identifier, }
@@ -913,6 +1154,56 @@ class RedshiftConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
+ def delete_event_subscription(self, subscription_name):
+ """
+ Deletes an Amazon Redshift event notification subscription.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the Amazon Redshift event
+ notification subscription to be deleted.
+
+ """
+ params = {'SubscriptionName': subscription_name, }
+ return self._make_request(
+ action='DeleteEventSubscription',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_hsm_client_certificate(self,
+ hsm_client_certificate_identifier):
+ """
+ Deletes the specified HSM client certificate.
+
+ :type hsm_client_certificate_identifier: string
+ :param hsm_client_certificate_identifier: The identifier of the HSM
+ client certificate to be deleted.
+
+ """
+ params = {
+ 'HsmClientCertificateIdentifier': hsm_client_certificate_identifier,
+ }
+ return self._make_request(
+ action='DeleteHsmClientCertificate',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_hsm_configuration(self, hsm_configuration_identifier):
+ """
+ Deletes the specified Amazon Redshift HSM configuration.
+
+ :type hsm_configuration_identifier: string
+ :param hsm_configuration_identifier: The identifier of the Amazon
+ Redshift HSM configuration to be deleted.
+
+ """
+ params = {
+ 'HsmConfigurationIdentifier': hsm_configuration_identifier,
+ }
+ return self._make_request(
+ action='DeleteHsmConfiguration',
+ verb='POST',
+ path='/', params=params)
+
def describe_cluster_parameter_groups(self, parameter_group_name=None,
max_records=None, marker=None):
"""
@@ -1334,6 +1625,67 @@ class RedshiftConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
+ def describe_event_categories(self, source_type=None):
+ """
+ Displays a list of event categories for all event source
+ types, or for a specified source type. For a list of the event
+ categories and source types, go to `Amazon Redshift Event
+ Notifications`_.
+
+ :type source_type: string
+ :param source_type: The source type, such as cluster or parameter
+ group, to which the described event categories apply.
+ Valid values: cluster, snapshot, parameter group, and security group.
+
+ """
+ params = {}
+ if source_type is not None:
+ params['SourceType'] = source_type
+ return self._make_request(
+ action='DescribeEventCategories',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_event_subscriptions(self, subscription_name=None,
+ max_records=None, marker=None):
+ """
+ Lists descriptions of all the Amazon Redshift event
+ notifications subscription for a customer account. If you
+ specify a subscription name, lists the description for that
+ subscription.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the Amazon Redshift event
+ notification subscription to be described.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified MaxRecords
+ value, a pagination token called a marker is included in the
+ response so that the remaining results can be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ DescribeOrderableClusterOptions request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by MaxRecords.
+
+ """
+ params = {}
+ if subscription_name is not None:
+ params['SubscriptionName'] = subscription_name
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeEventSubscriptions',
+ verb='POST',
+ path='/', params=params)
+
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
max_records=None, marker=None):
@@ -1436,6 +1788,110 @@ class RedshiftConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
+ def describe_hsm_client_certificates(self,
+ hsm_client_certificate_identifier=None,
+ max_records=None, marker=None):
+ """
+ Returns information about the specified HSM client
+ certificate. If no certificate ID is specified, returns
+ information about all the HSM certificates owned by your AWS
+ customer account.
+
+ :type hsm_client_certificate_identifier: string
+ :param hsm_client_certificate_identifier: The identifier of a specific
+ HSM client certificate for which you want information. If no
+ identifier is specified, information is returned for all HSM client
+ certificates associated with Amazon Redshift clusters owned by your
+ AWS customer account.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a marker is included in the response so that the remaining
+ results may be retrieved.
+ Default: `100`
+
+ Constraints: minimum 20, maximum 100.
+
+ :type marker: string
+ :param marker: An optional marker returned from a previous
+ **DescribeOrderableClusterOptions** request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by `MaxRecords`.
+
+ """
+ params = {}
+ if hsm_client_certificate_identifier is not None:
+ params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeHsmClientCertificates',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_hsm_configurations(self, hsm_configuration_identifier=None,
+ max_records=None, marker=None):
+ """
+ Returns information about the specified Amazon Redshift HSM
+ configuration. If no configuration ID is specified, returns
+ information about all the HSM configurations owned by your AWS
+ customer account.
+
+ :type hsm_configuration_identifier: string
+ :param hsm_configuration_identifier: The identifier of a specific
+ Amazon Redshift HSM configuration to be described. If no identifier
+ is specified, information is returned for all HSM configurations
+ owned by your AWS customer account.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a marker is included in the response so that the remaining
+ results may be retrieved.
+ Default: `100`
+
+ Constraints: minimum 20, maximum 100.
+
+ :type marker: string
+ :param marker: An optional marker returned from a previous
+ **DescribeOrderableClusterOptions** request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by `MaxRecords`.
+
+ """
+ params = {}
+ if hsm_configuration_identifier is not None:
+ params['HsmConfigurationIdentifier'] = hsm_configuration_identifier
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeHsmConfigurations',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_logging_status(self, cluster_identifier):
+ """
+ Describes whether information, such as queries and connection
+ attempts, is being logged for the specified Amazon Redshift
+ cluster.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The identifier of the cluster to get the
+ logging status from.
+ Example: `examplecluster`
+
+ """
+ params = {'ClusterIdentifier': cluster_identifier, }
+ return self._make_request(
+ action='DescribeLoggingStatus',
+ verb='POST',
+ path='/', params=params)
+
def describe_orderable_cluster_options(self, cluster_version=None,
node_type=None, max_records=None,
marker=None):
@@ -1607,6 +2063,132 @@ class RedshiftConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
+ def disable_logging(self, cluster_identifier):
+ """
+ Stops logging information, such as queries and connection
+ attempts, for the specified Amazon Redshift cluster.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The identifier of the cluster on which
+ logging is to be stopped.
+ Example: `examplecluster`
+
+ """
+ params = {'ClusterIdentifier': cluster_identifier, }
+ return self._make_request(
+ action='DisableLogging',
+ verb='POST',
+ path='/', params=params)
+
+ def disable_snapshot_copy(self, cluster_identifier):
+ """
+ Disables the automatic copying of snapshots from one region to
+ another region for a specified cluster.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The unique identifier of the source cluster
+ that you want to disable copying of snapshots to a destination
+ region.
+ Constraints: Must be the valid name of an existing cluster that has
+ cross-region snapshot copy enabled.
+
+ """
+ params = {'ClusterIdentifier': cluster_identifier, }
+ return self._make_request(
+ action='DisableSnapshotCopy',
+ verb='POST',
+ path='/', params=params)
+
+ def enable_logging(self, cluster_identifier, bucket_name,
+ s3_key_prefix=None):
+ """
+ Starts logging information, such as queries and connection
+ attempts, for the specified Amazon Redshift cluster.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The identifier of the cluster on which
+ logging is to be started.
+ Example: `examplecluster`
+
+ :type bucket_name: string
+ :param bucket_name:
+ The name of an existing S3 bucket where the log files are to be stored.
+
+ Constraints:
+
+
+ + Must be in the same region as the cluster
+ + The cluster must have read bucket and put object permissions
+
+ :type s3_key_prefix: string
+ :param s3_key_prefix:
+ The prefix applied to the log file names.
+
+ Constraints:
+
+
+ + Cannot exceed 512 characters
+ + Cannot contain spaces( ), double quotes ("), single quotes ('), a
+ backslash (\), or control characters. The hexadecimal codes for
+ invalid characters are:
+
+ + x00 to x20
+ + x22
+ + x27
+ + x5c
+ + x7f or larger
+
+ """
+ params = {
+ 'ClusterIdentifier': cluster_identifier,
+ 'BucketName': bucket_name,
+ }
+ if s3_key_prefix is not None:
+ params['S3KeyPrefix'] = s3_key_prefix
+ return self._make_request(
+ action='EnableLogging',
+ verb='POST',
+ path='/', params=params)
+
+ def enable_snapshot_copy(self, cluster_identifier, destination_region,
+ retention_period=None):
+ """
+ Enables the automatic copy of snapshots from one region to
+ another region for a specified cluster.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The unique identifier of the source cluster
+ to copy snapshots from.
+ Constraints: Must be the valid name of an existing cluster that does
+ not already have cross-region snapshot copy enabled.
+
+ :type destination_region: string
+ :param destination_region: The destination region that you want to copy
+ snapshots to.
+ Constraints: Must be the name of a valid region. For more information,
+ see `Regions and Endpoints`_ in the Amazon Web Services General
+ Reference.
+
+ :type retention_period: integer
+ :param retention_period: The number of days to retain automated
+ snapshots in the destination region after they are copied from the
+ source region.
+ Default: 7.
+
+ Constraints: Must be at least 1 and no more than 35.
+
+ """
+ params = {
+ 'ClusterIdentifier': cluster_identifier,
+ 'DestinationRegion': destination_region,
+ }
+ if retention_period is not None:
+ params['RetentionPeriod'] = retention_period
+ return self._make_request(
+ action='EnableSnapshotCopy',
+ verb='POST',
+ path='/', params=params)
+
def modify_cluster(self, cluster_identifier, cluster_type=None,
node_type=None, number_of_nodes=None,
cluster_security_groups=None,
@@ -1615,7 +2197,9 @@ class RedshiftConnection(AWSQueryConnection):
cluster_parameter_group_name=None,
automated_snapshot_retention_period=None,
preferred_maintenance_window=None,
- cluster_version=None, allow_version_upgrade=None):
+ cluster_version=None, allow_version_upgrade=None,
+ hsm_client_certificate_identifier=None,
+ hsm_configuration_identifier=None):
"""
Modifies the settings for a cluster. For example, you can add
another security or parameter group, update the preferred
@@ -1782,6 +2366,16 @@ class RedshiftConnection(AWSQueryConnection):
automatically to the cluster during the maintenance window.
Default: `False`
+ :type hsm_client_certificate_identifier: string
+ :param hsm_client_certificate_identifier: Specifies the name of the HSM
+ client certificate the Amazon Redshift cluster uses to retrieve the
+ data encryption keys stored in an HSM.
+
+ :type hsm_configuration_identifier: string
+ :param hsm_configuration_identifier: Specifies the name of the HSM
+ configuration that contains the information the Amazon Redshift
+ cluster can use to retrieve and store keys in an HSM.
+
"""
params = {'ClusterIdentifier': cluster_identifier, }
if cluster_type is not None:
@@ -1811,6 +2405,10 @@ class RedshiftConnection(AWSQueryConnection):
if allow_version_upgrade is not None:
params['AllowVersionUpgrade'] = str(
allow_version_upgrade).lower()
+ if hsm_client_certificate_identifier is not None:
+ params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier
+ if hsm_configuration_identifier is not None:
+ params['HsmConfigurationIdentifier'] = hsm_configuration_identifier
return self._make_request(
action='ModifyCluster',
verb='POST',
@@ -1880,6 +2478,116 @@ class RedshiftConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
+ def modify_event_subscription(self, subscription_name,
+ sns_topic_arn=None, source_type=None,
+ source_ids=None, event_categories=None,
+ severity=None, enabled=None):
+ """
+ Modifies an existing Amazon Redshift event notification
+ subscription.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the modified Amazon Redshift
+ event notification subscription.
+
+ :type sns_topic_arn: string
+ :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
+ to be used by the event notification subscription.
+
+ :type source_type: string
+ :param source_type: The type of source that will be generating the
+ events. For example, if you want to be notified of events generated
+ by a cluster, you would set this parameter to cluster. If this
+ value is not specified, events are returned for all Amazon Redshift
+ objects in your AWS account. You must specify a source type in
+ order to specify source IDs.
+ Valid values: cluster, cluster-parameter-group, cluster-security-group,
+ and cluster-snapshot.
+
+ :type source_ids: list
+ :param source_ids: A list of one or more identifiers of Amazon Redshift
+ source objects. All of the objects must be of the same type as was
+ specified in the source type parameter. The event subscription will
+ return only events generated by the specified objects. If not
+ specified, then events are returned for all objects within the
+ source type specified.
+ Example: my-cluster-1, my-cluster-2
+
+ Example: my-snapshot-20131010
+
+ :type event_categories: list
+ :param event_categories: Specifies the Amazon Redshift event categories
+ to be published by the event notification subscription.
+ Values: Configuration, Management, Monitoring, Security
+
+ :type severity: string
+ :param severity: Specifies the Amazon Redshift event severity to be
+ published by the event notification subscription.
+ Values: ERROR, INFO
+
+ :type enabled: boolean
+ :param enabled: A Boolean value indicating if the subscription is
+ enabled. `True` indicates the subscription is enabled
+
+ """
+ params = {'SubscriptionName': subscription_name, }
+ if sns_topic_arn is not None:
+ params['SnsTopicArn'] = sns_topic_arn
+ if source_type is not None:
+ params['SourceType'] = source_type
+ if source_ids is not None:
+ self.build_list_params(params,
+ source_ids,
+ 'SourceIds.member')
+ if event_categories is not None:
+ self.build_list_params(params,
+ event_categories,
+ 'EventCategories.member')
+ if severity is not None:
+ params['Severity'] = severity
+ if enabled is not None:
+ params['Enabled'] = str(
+ enabled).lower()
+ return self._make_request(
+ action='ModifyEventSubscription',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_snapshot_copy_retention_period(self, cluster_identifier,
+ retention_period):
+ """
+ Modifies the number of days to retain automated snapshots in
+ the destination region after they are copied from the source
+ region.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The unique identifier of the cluster for
+ which you want to change the retention period for automated
+ snapshots that are copied to a destination region.
+ Constraints: Must be the valid name of an existing cluster that has
+ cross-region snapshot copy enabled.
+
+ :type retention_period: integer
+ :param retention_period: The number of days to retain automated
+ snapshots in the destination region after they are copied from the
+ source region.
+ If you decrease the retention period for automated snapshots that are
+ copied to a destination region, Amazon Redshift will delete any
+ existing automated snapshots that were copied to the destination
+ region and that fall outside of the new retention period.
+
+ Constraints: Must be at least 1 and no more than 35.
+
+ """
+ params = {
+ 'ClusterIdentifier': cluster_identifier,
+ 'RetentionPeriod': retention_period,
+ }
+ return self._make_request(
+ action='ModifySnapshotCopyRetentionPeriod',
+ verb='POST',
+ path='/', params=params)
+
def purchase_reserved_node_offering(self, reserved_node_offering_id,
node_count=None):
"""
@@ -1983,7 +2691,10 @@ class RedshiftConnection(AWSQueryConnection):
allow_version_upgrade=None,
cluster_subnet_group_name=None,
publicly_accessible=None,
- owner_account=None):
+ owner_account=None,
+ hsm_client_certificate_identifier=None,
+ hsm_configuration_identifier=None,
+ elastic_ip=None):
"""
Creates a new cluster from a snapshot. Amazon Redshift creates
the resulting cluster with the same configuration as the
@@ -2023,7 +2734,10 @@ class RedshiftConnection(AWSQueryConnection):
Example: `my-snapshot-id`
:type snapshot_cluster_identifier: string
- :param snapshot_cluster_identifier:
+ :param snapshot_cluster_identifier: The name of the cluster the source
+ snapshot was created from. This parameter is required if your IAM
+ user has a policy containing a snapshot resource element that
+ specifies anything other than * for the cluster name.
:type port: integer
:param port: The port number on which the cluster accepts connections.
@@ -2060,6 +2774,19 @@ class RedshiftConnection(AWSQueryConnection):
the snapshot. Required if you are restoring a snapshot you do not
own, optional if you own the snapshot.
+ :type hsm_client_certificate_identifier: string
+ :param hsm_client_certificate_identifier: Specifies the name of the HSM
+ client certificate the Amazon Redshift cluster uses to retrieve the
+ data encryption keys stored in an HSM.
+
+ :type hsm_configuration_identifier: string
+ :param hsm_configuration_identifier: Specifies the name of the HSM
+ configuration that contains the information the Amazon Redshift
+ cluster can use to retrieve and store keys in an HSM.
+
+ :type elastic_ip: string
+ :param elastic_ip: The elastic IP (EIP) address for the cluster.
+
"""
params = {
'ClusterIdentifier': cluster_identifier,
@@ -2081,6 +2808,12 @@ class RedshiftConnection(AWSQueryConnection):
publicly_accessible).lower()
if owner_account is not None:
params['OwnerAccount'] = owner_account
+ if hsm_client_certificate_identifier is not None:
+ params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier
+ if hsm_configuration_identifier is not None:
+ params['HsmConfigurationIdentifier'] = hsm_configuration_identifier
+ if elastic_ip is not None:
+ params['ElasticIp'] = elastic_ip
return self._make_request(
action='RestoreFromClusterSnapshot',
verb='POST',
@@ -2155,7 +2888,10 @@ class RedshiftConnection(AWSQueryConnection):
account can no longer access.
:type snapshot_cluster_identifier: string
- :param snapshot_cluster_identifier:
+ :param snapshot_cluster_identifier: The identifier of the cluster the
+ snapshot was created from. This parameter is required if your IAM
+ user has a policy containing a snapshot resource element that
+ specifies anything other than * for the cluster name.
:type account_with_restore_access: string
:param account_with_restore_access: The identifier of the AWS customer
@@ -2173,6 +2909,23 @@ class RedshiftConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
+ def rotate_encryption_key(self, cluster_identifier):
+ """
+ Rotates the encryption keys for a cluster.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The unique identifier of the cluster that
+ you want to rotate the encryption keys for.
+ Constraints: Must be the name of valid cluster that has encryption
+ enabled.
+
+ """
+ params = {'ClusterIdentifier': cluster_identifier, }
+ return self._make_request(
+ action='RotateEncryptionKey',
+ verb='POST',
+ path='/', params=params)
+
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 1fc054d1..ab838685 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -44,6 +44,7 @@ Currently Supported Services
* Elastic Beanstalk -- (:doc:`API Reference <ref/beanstalk>`)
* Data Pipeline -- (:doc:`API Reference <ref/datapipeline>`)
* Opsworks -- (:doc:`API Reference <ref/opsworks>`)
+ * CloudTrail -- (:doc:`API Reference <ref/cloudtrail>`)
* **Identity & Access**
@@ -114,6 +115,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.17.0
releasenotes/v2.16.0
releasenotes/v2.15.0
releasenotes/v2.14.0
diff --git a/docs/source/ref/cloudtrail.rst b/docs/source/ref/cloudtrail.rst
new file mode 100644
index 00000000..a2ae6122
--- /dev/null
+++ b/docs/source/ref/cloudtrail.rst
@@ -0,0 +1,26 @@
+.. _ref-cloudtrail:
+
+==========
+CloudTrail
+==========
+
+boto.cloudtrail
+---------------
+
+.. automodule:: boto.cloudtrail
+ :members:
+ :undoc-members:
+
+boto.cloudtrail.layer1
+----------------------
+
+.. automodule:: boto.cloudtrail.layer1
+ :members:
+ :undoc-members:
+
+boto.cloudtrail.exceptions
+--------------------------
+
+.. automodule:: boto.cloudtrail.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/releasenotes/v2.17.0.rst b/docs/source/releasenotes/v2.17.0.rst
new file mode 100644
index 00000000..b64ba5f5
--- /dev/null
+++ b/docs/source/releasenotes/v2.17.0.rst
@@ -0,0 +1,21 @@
+boto v2.17.0
+============
+
+:date: 2013/11/14
+
+This release adds support for the new AWS CloudTrail service, support for
+Amazon Redshift's new features related encryption, audit logging, data load
+from external hosts, WLM configuration, database distribution styles and
+functions, as well as cross region snapshot copying.
+
+
+Features
+--------
+
+* Add support for AWS CloudTrail (:sha:`53ba0c9`)
+* Add support for new Amazon Redshift features (:sha:`d94b48c`)
+
+Bugfixes
+--------
+
+* Add missing argument for Google Storage resumable uploads. (:sha:`b777b62`)
diff --git a/setup.py b/setup.py
index 50e12f62..16cbd814 100644
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,7 @@ setup(name = "boto",
"boto.swf", "boto.mws", "boto.cloudsearch", "boto.glacier",
"boto.beanstalk", "boto.datapipeline", "boto.elasticache",
"boto.elastictranscoder", "boto.opsworks", "boto.redshift",
- "boto.dynamodb2", "boto.support"],
+ "boto.dynamodb2", "boto.support", "boto.cloudtrail"],
package_data = {"boto.cacerts": ["cacerts.txt"]},
license = "MIT",
platforms = "Posix; MacOS X; Windows",
diff --git a/tests/integration/cloudtrail/__init__.py b/tests/integration/cloudtrail/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/cloudtrail/__init__.py
diff --git a/tests/integration/cloudtrail/test_cloudtrail.py b/tests/integration/cloudtrail/test_cloudtrail.py
new file mode 100644
index 00000000..3cbb3e85
--- /dev/null
+++ b/tests/integration/cloudtrail/test_cloudtrail.py
@@ -0,0 +1,91 @@
+import boto
+
+from time import time
+from unittest import TestCase
+
+DEFAULT_S3_POLICY = """{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AWSCloudTrailAclCheck20131101",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ "arn:aws:iam::086441151436:root",
+ "arn:aws:iam::113285607260:root"
+ ]
+ },
+ "Action": "s3:GetBucketAcl",
+ "Resource": "arn:aws:s3:::<BucketName>"
+ },
+ {
+ "Sid": "AWSCloudTrailWrite20131101",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ "arn:aws:iam::086441151436:root",
+ "arn:aws:iam::113285607260:root"
+ ]
+ },
+ "Action": "s3:PutObject",
+ "Resource": "arn:aws:s3:::<BucketName>/<Prefix>/AWSLogs/<CustomerAccountID>/*",
+ "Condition": {
+ "StringEquals": {
+ "s3:x-amz-acl": "bucket-owner-full-control"
+ }
+ }
+ }
+ ]
+}"""
+
+class TestCloudTrail(TestCase):
+ def test_cloudtrail(self):
+ cloudtrail = boto.connect_cloudtrail()
+
+ # Don't delete existing customer data!
+ res = cloudtrail.describe_trails()
+ if len(res['trailList']):
+ self.fail('A trail already exists on this account!')
+
+ # Who am I?
+ iam = boto.connect_iam()
+ response = iam.get_user()
+ account_id = response['get_user_response']['get_user_result'] \
+ ['user']['user_id']
+
+ # Setup a new bucket
+ s3 = boto.connect_s3()
+ bucket_name = 'cloudtrail-integ-{0}'.format(time())
+ policy = DEFAULT_S3_POLICY.replace('<BucketName>', bucket_name)\
+ .replace('<CustomerAccountID>', account_id)\
+ .replace('<Prefix>/', '')
+ b = s3.create_bucket(bucket_name)
+ b.set_policy(policy)
+
+ # Setup CloudTrail
+ cloudtrail.create_trail(trail={'Name': 'test', 'S3BucketName': bucket_name})
+
+ cloudtrail.update_trail(trail={'Name': 'test', 'IncludeGlobalServiceEvents': False})
+
+ trails = cloudtrail.describe_trails()
+
+ self.assertEqual('test', trails['trailList'][0]['Name'])
+ self.assertFalse(trails['trailList'][0]['IncludeGlobalServiceEvents'])
+
+ cloudtrail.start_logging(name='test')
+
+ status = cloudtrail.get_trail_status(name='test')
+ self.assertTrue(status['IsLogging'])
+
+ cloudtrail.stop_logging(name='test')
+
+ status = cloudtrail.get_trail_status(name='test')
+ self.assertFalse(status['IsLogging'])
+
+ # Clean up
+ cloudtrail.delete_trail(name='test')
+
+ for key in b.list():
+ key.delete()
+
+ s3.delete_bucket(bucket_name)
diff --git a/tests/unit/cloudtrail/__init__.py b/tests/unit/cloudtrail/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/cloudtrail/__init__.py
diff --git a/tests/unit/cloudtrail/test_layer1.py b/tests/unit/cloudtrail/test_layer1.py
new file mode 100644
index 00000000..118fc169
--- /dev/null
+++ b/tests/unit/cloudtrail/test_layer1.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+import json
+
+from boto.cloudtrail.layer1 import CloudTrailConnection
+from tests.unit import AWSMockServiceTestCase
+
+
+class TestDescribeTrails(AWSMockServiceTestCase):
+ connection_class = CloudTrailConnection
+
+ def default_body(self):
+ return '''
+ {"trailList":
+ [
+ {
+ "IncludeGlobalServiceEvents": false,
+ "Name": "test",
+ "SnsTopicName": "cloudtrail-1",
+ "S3BucketName": "cloudtrail-1"
+ }
+ ]
+ }'''
+
+ def test_describe(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.describe_trails()
+
+ self.assertEqual(1, len(api_response['trailList']))
+ self.assertEqual('test', api_response['trailList'][0]['Name'])
+
+ self.assert_request_parameters({})
+
+ target = self.actual_request.headers['X-Amz-Target']
+ self.assertTrue('DescribeTrails' in target)
+
+ def test_describe_name_list(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.describe_trails(
+ trail_name_list=['test'])
+
+ self.assertEqual(1, len(api_response['trailList']))
+ self.assertEqual('test', api_response['trailList'][0]['Name'])
+
+ self.assertEqual(json.dumps({
+ 'trailNameList': ['test']
+ }), self.actual_request.body)
+
+ target = self.actual_request.headers['X-Amz-Target']
+ self.assertTrue('DescribeTrails' in target)
+
+
+class TestCreateTrail(AWSMockServiceTestCase):
+ connection_class = CloudTrailConnection
+
+ def default_body(self):
+ return '''
+ {"trail":
+ {
+ "IncludeGlobalServiceEvents": false,
+ "Name": "test",
+ "SnsTopicName": "cloudtrail-1",
+ "S3BucketName": "cloudtrail-1"
+ }
+ }'''
+
+ def test_create(self):
+ self.set_http_response(status_code=200)
+
+ trail = {'Name': 'test', 'S3BucketName': 'cloudtrail-1',
+ 'SnsTopicName': 'cloudtrail-1',
+ 'IncludeGlobalServiceEvents': False}
+
+ api_response = self.service_connection.create_trail(trail=trail)
+
+ self.assertEqual(trail, api_response['trail'])
+
+ target = self.actual_request.headers['X-Amz-Target']
+ self.assertTrue('CreateTrail' in target)