summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Lindsley <daniel@toastdriven.com>2014-02-27 11:15:26 -0800
committerDaniel Lindsley <daniel@toastdriven.com>2014-02-27 11:15:26 -0800
commitb7d9f446421acd7af9227c3e62bde967ef433afe (patch)
tree07b31276a703e9a19881ebe37eb43cd938f694e3
parent522d67f9ff32af03df4be73ac7b9541c3ab6e3c0 (diff)
parent3d3763ebd2a76ca5b8085b2cd2cd37a149af9467 (diff)
downloadboto-b7d9f446421acd7af9227c3e62bde967ef433afe.tar.gz
Merge branch 'release-2.26.0'2.26.0
-rw-r--r--README.rst4
-rwxr-xr-xbin/cq3
-rwxr-xr-xbin/elbadmin6
-rwxr-xr-xbin/glacier12
-rwxr-xr-xbin/s3put6
-rwxr-xr-xbin/sdbadmin7
-rw-r--r--boto/__init__.py17
-rw-r--r--boto/auth.py4
-rw-r--r--boto/cloudformation/connection.py785
-rwxr-xr-xboto/cloudformation/stack.py29
-rw-r--r--boto/cloudformation/template.py8
-rw-r--r--boto/cloudfront/__init__.py5
-rw-r--r--boto/connection.py10
-rw-r--r--boto/datapipeline/__init__.py41
-rw-r--r--boto/datapipeline/layer1.py2
-rw-r--r--boto/dynamodb2/exceptions.py4
-rw-r--r--boto/dynamodb2/fields.py5
-rw-r--r--boto/dynamodb2/table.py17
-rw-r--r--boto/endpoints.json9
-rw-r--r--boto/exception.py26
-rwxr-xr-xboto/gs/connection.py2
-rw-r--r--boto/gs/key.py3
-rw-r--r--boto/provider.py7
-rw-r--r--boto/rds/__init__.py84
-rw-r--r--boto/rds/logfile.py22
-rw-r--r--boto/rds2/__init__.py53
-rw-r--r--boto/rds2/exceptions.py234
-rw-r--r--boto/rds2/layer1.py3774
-rw-r--r--boto/requestlog.py39
-rw-r--r--boto/roboto/param.py4
-rw-r--r--boto/route53/connection.py95
-rw-r--r--boto/route53/healthcheck.py128
-rw-r--r--boto/route53/record.py46
-rw-r--r--boto/sts/connection.py26
-rw-r--r--boto/utils.py9
-rw-r--r--docs/source/index.rst3
-rw-r--r--docs/source/migrations/rds_v1_to_v2.rst91
-rw-r--r--docs/source/rds_tut.rst9
-rw-r--r--docs/source/ref/rds2.rst26
-rw-r--r--docs/source/ref/swf.rst7
-rw-r--r--docs/source/releasenotes/v2.26.0.rst59
-rw-r--r--docs/source/s3_tut.rst2
-rw-r--r--docs/source/sqs_tut.rst2
-rw-r--r--setup.py2
-rw-r--r--tests/integration/cloudformation/test_connection.py12
-rw-r--r--tests/integration/cloudtrail/test_cert_verification.py38
-rw-r--r--tests/integration/datapipeline/__init__.py0
-rw-r--r--tests/integration/datapipeline/test_cert_verification.py38
-rw-r--r--tests/integration/dynamodb2/test_highlevel.py79
-rw-r--r--tests/integration/gs/test_basic.py6
-rw-r--r--tests/integration/kinesis/test_cert_verification.py38
-rw-r--r--tests/integration/rds2/__init__.py21
-rw-r--r--tests/integration/rds2/test_cert_verification.py39
-rw-r--r--tests/integration/rds2/test_connection.py87
-rw-r--r--tests/integration/route53/__init__.py18
-rw-r--r--tests/integration/route53/test_alias_resourcerecordsets.py83
-rw-r--r--tests/integration/route53/test_cert_verification.py2
-rw-r--r--tests/integration/route53/test_health_check.py143
-rw-r--r--tests/integration/route53/test_resourcerecordsets.py23
-rw-r--r--tests/integration/route53/test_zone.py122
-rw-r--r--tests/integration/sts/test_session_token.py2
-rw-r--r--tests/unit/auth/test_sigv4.py12
-rw-r--r--[-rwxr-xr-x]tests/unit/cloudformation/test_connection.py102
-rw-r--r--[-rwxr-xr-x]tests/unit/cloudformation/test_stack.py2
-rw-r--r--tests/unit/cloudfront/test_connection.py204
-rw-r--r--tests/unit/dynamodb2/test_table.py41
-rw-r--r--tests/unit/emr/test_connection.py7
-rw-r--r--[-rwxr-xr-x]tests/unit/mws/test_connection.py0
-rw-r--r--[-rwxr-xr-x]tests/unit/mws/test_response.py21
-rw-r--r--tests/unit/provider/test_provider.py9
-rw-r--r--tests/unit/rds/test_connection.py118
-rw-r--r--tests/unit/rds2/__init__.py0
-rw-r--r--tests/unit/rds2/test_connection.py209
-rw-r--r--tests/unit/route53/test_connection.py22
-rw-r--r--tests/unit/sns/test_connection.py11
-rw-r--r--tests/unit/sts/test_connection.py23
76 files changed, 6979 insertions, 280 deletions
diff --git a/README.rst b/README.rst
index f23634ba..c11a3199 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.25.0
+boto 2.26.0
-Released: 07-February-2014
+Released: 27-February-2014
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
diff --git a/bin/cq b/bin/cq
index 242d0d25..05bc95b9 100755
--- a/bin/cq
+++ b/bin/cq
@@ -57,6 +57,9 @@ def main():
region = a
if region:
c = boto.sqs.connect_to_region(region)
+ if c is None:
+ print 'Invalid region (%s)' % region
+ sys.exit(1)
else:
c = SQSConnection()
if queue_name:
diff --git a/bin/elbadmin b/bin/elbadmin
index 816c7327..d83643f0 100755
--- a/bin/elbadmin
+++ b/bin/elbadmin
@@ -72,6 +72,10 @@ def list(elb):
for b in elb.get_all_load_balancers():
print "%-20s %s" % (b.name, b.dns_name)
+def check_valid_region(conn, region):
+ if conn is None:
+ print 'Invalid region (%s)' % region
+ sys.exit(1)
def get(elb, name):
"""Get details about ELB <name>"""
@@ -113,6 +117,7 @@ def get(elb, name):
ec2 = boto.connect_ec2()
else:
ec2 = boto.ec2.connect_to_region(options.region)
+ check_valid_region(ec2, options.region)
instance_health = b.get_instance_health()
instances = [state.instance_id for state in instance_health]
@@ -255,6 +260,7 @@ if __name__ == "__main__":
else:
import boto.ec2.elb
elb = boto.ec2.elb.connect_to_region(options.region)
+ check_valid_region(elb, options.region)
print "%s" % (elb.region.endpoint)
diff --git a/bin/glacier b/bin/glacier
index bd28adf6..a3763e06 100755
--- a/bin/glacier
+++ b/bin/glacier
@@ -84,10 +84,14 @@ glacier <command> [args]
def connect(region, debug_level=0, access_key=None, secret_key=None):
""" Connect to a specific region """
- return connect_to_region(region,
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- debug=debug_level)
+ layer2 = connect_to_region(region,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ debug=debug_level)
+ if layer2 is None:
+ print 'Invalid region (%s)' % region
+ sys.exit(1)
+ return layer2
def list_vaults(region, access_key=None, secret_key=None):
diff --git a/bin/s3put b/bin/s3put
index 9d487f9f..0a2088d3 100755
--- a/bin/s3put
+++ b/bin/s3put
@@ -173,6 +173,10 @@ def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
_upload()
+def check_valid_region(conn, region):
+ if conn is None:
+ print 'Invalid region (%s)' % region
+ sys.exit(1)
def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
reduced, debug, cb, num_cb, acl='private', headers={},
@@ -183,6 +187,7 @@ def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
"""
conn = boto.s3.connect_to_region(region, aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret)
+ check_valid_region(conn, region)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
@@ -334,6 +339,7 @@ def main():
connect_args['host'] = host
c = boto.s3.connect_to_region(region or DEFAULT_REGION, **connect_args)
+ check_valid_region(c, region or DEFAULT_REGION)
c.debug = debug
b = c.get_bucket(bucket_name, validate=False)
diff --git a/bin/sdbadmin b/bin/sdbadmin
index 8b072cc4..9e2448c5 100755
--- a/bin/sdbadmin
+++ b/bin/sdbadmin
@@ -90,6 +90,11 @@ def load_db(domain, file, use_json=False):
else:
domain.from_xml(file)
+def check_valid_region(conn, region):
+ if conn is None:
+ print 'Invalid region (%s)' % region
+ sys.exit(1)
+
def create_db(domain_name, region_name):
"""Create a new DB
@@ -97,6 +102,7 @@ def create_db(domain_name, region_name):
:type domain: str
"""
sdb = boto.sdb.connect_to_region(region_name)
+ check_valid_region(sdb, region_name)
return sdb.create_domain(domain_name)
if __name__ == "__main__":
@@ -125,6 +131,7 @@ if __name__ == "__main__":
exit()
sdb = boto.sdb.connect_to_region(options.region_name)
+ check_valid_region(sdb, options.region_name)
if options.list:
for db in sdb.get_all_domains():
print db
diff --git a/boto/__init__.py b/boto/__init__.py
index 87177be4..2bf77a6f 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -37,7 +37,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.25.0'
+__version__ = '2.26.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
@@ -312,6 +312,21 @@ def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.rds2.layer1.RDSConnection`
+ :return: A connection to RDS
+ """
+ from boto.rds2.layer1 import RDSConnection
+ return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
diff --git a/boto/auth.py b/boto/auth.py
index 99699af7..cf933a75 100644
--- a/boto/auth.py
+++ b/boto/auth.py
@@ -330,7 +330,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
parameter_names = sorted(http_request.params.keys())
pairs = []
for pname in parameter_names:
- pval = str(http_request.params[pname]).encode('utf-8')
+ pval = boto.utils.get_utf8_value(http_request.params[pname])
pairs.append(urllib.quote(pname, safe='') + '=' +
urllib.quote(pval, safe='-_~'))
return '&'.join(pairs)
@@ -342,7 +342,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
return ""
l = []
for param in sorted(http_request.params):
- value = str(http_request.params[param])
+ value = boto.utils.get_utf8_value(http_request.params[param])
l.append('%s=%s' % (urllib.quote(param, safe='-_.~'),
urllib.quote(value, safe='-_.~')))
return '&'.join(l)
diff --git a/boto/cloudformation/connection.py b/boto/cloudformation/connection.py
index 9ebc5f18..f21dffe6 100644
--- a/boto/cloudformation/connection.py
+++ b/boto/cloudformation/connection.py
@@ -1,4 +1,5 @@
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -76,50 +77,117 @@ class CloudFormationConnection(AWSQueryConnection):
return {True: "true", False: "false"}[v]
def _build_create_or_update_params(self, stack_name, template_body,
- template_url, parameters,
- notification_arns, disable_rollback,
- timeout_in_minutes, capabilities, tags):
+ template_url, parameters, disable_rollback, timeout_in_minutes,
+ notification_arns, capabilities, on_failure, stack_policy_body,
+ stack_policy_url, tags, stack_policy_during_update_body=None,
+ stack_policy_during_update_url=None):
"""
Helper that creates JSON parameters needed by a Stack Create or
Stack Update call.
:type stack_name: string
- :param stack_name: The name of the Stack, must be unique amoung running
- Stacks
+ :param stack_name:
+ The name associated with the stack. The name must be unique within your
+ AWS account.
+
+ Must contain only alphanumeric characters (case sensitive) and start
+ with an alpha character. Maximum length of the name is 255
+ characters.
:type template_body: string
- :param template_body: The template body (JSON string)
+ :param template_body: Structure containing the template body. (For more
+ information, go to `Template Anatomy`_ in the AWS CloudFormation
+ User Guide.)
+ Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
+ passed, only `TemplateBody` is used.
:type template_url: string
- :param template_url: An S3 URL of a stored template JSON document. If
- both the template_body and template_url are
- specified, the template_body takes precedence
-
- :type parameters: list of tuples
- :param parameters: A list of (key, value) pairs for template input
- parameters.
-
- :type notification_arns: list of strings
- :param notification_arns: A list of SNS topics to send Stack event
- notifications to.
-
- :type disable_rollback: bool
- :param disable_rollback: Indicates whether or not to rollback on
- failure.
-
- :type timeout_in_minutes: int
- :param timeout_in_minutes: Maximum amount of time to let the Stack
- spend creating itself. If this timeout is exceeded,
- the Stack will enter the CREATE_FAILED state.
+ :param template_url: Location of file containing the template body. The
+ URL must point to a template (max size: 307,200 bytes) located in
+ an S3 bucket in the same region as the stack. For more information,
+ go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
+ Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
+ passed, only `TemplateBody` is used.
+
+ :type parameters: list
+ :param parameters: A list of key/value tuples that specify input
+ parameters for the stack.
+
+ :type disable_rollback: boolean
+ :param disable_rollback: Set to `True` to disable rollback of the stack
+ if stack creation failed. You can specify either `DisableRollback`
+ or `OnFailure`, but not both.
+ Default: `False`
+
+ :type timeout_in_minutes: integer
+ :param timeout_in_minutes: The amount of time that can pass before the
+ stack status becomes CREATE_FAILED; if `DisableRollback` is not set
+ or is set to `False`, the stack will be rolled back.
+
+ :type notification_arns: list
+ :param notification_arns: The Simple Notification Service (SNS) topic
+ ARNs to publish stack related events. You can find your SNS topic
+ ARNs using the `SNS console`_ or your Command Line Interface (CLI).
:type capabilities: list
- :param capabilities: The list of capabilities you want to allow in
- the stack. Currently, the only valid capability is
- 'CAPABILITY_IAM'.
-
- :type tags: dict
- :param tags: A dictionary of (key, value) pairs of tags to
- associate with this stack.
+ :param capabilities: The list of capabilities that you want to allow in
+ the stack. If your template contains certain resources, you must
+ specify the CAPABILITY_IAM value for this parameter; otherwise,
+ this action returns an InsufficientCapabilities error. The
+ following resources require you to specify the capabilities
+ parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
+ `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
+ `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
+ `AWS::IAM::UserToGroupAddition`_.
+
+ :type on_failure: string
+ :param on_failure: Determines what action will be taken if stack
+ creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
+ DELETE. You can specify either `OnFailure` or `DisableRollback`,
+ but not both.
+ Default: `ROLLBACK`
+
+ :type stack_policy_body: string
+ :param stack_policy_body: Structure containing the stack policy body.
+ (For more information, go to ` Prevent Updates to Stack Resources`_
+ in the AWS CloudFormation User Guide.)
+ If you pass `StackPolicyBody` and `StackPolicyURL`, only
+ `StackPolicyBody` is used.
+
+ :type stack_policy_url: string
+ :param stack_policy_url: Location of a file containing the stack
+ policy. The URL must point to a policy (max size: 16KB) located in
+ an S3 bucket in the same region as the stack. If you pass
+ `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
+ used.
+
+ :type tags: list
+ :param tags: A set of user-defined `Tags` to associate with this stack,
+ represented by key/value pairs. Tags defined for the stack are
+ propagated to EC2 resources that are created as part of the stack.
+ A maximum number of 10 tags can be specified.
+
+ :type stack_policy_during_update_body: string
+ :param stack_policy_during_update_body: Structure containing the
+ temporary overriding stack policy body. If you pass
+ `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
+ only `StackPolicyDuringUpdateBody` is used.
+ If you want to update protected resources, specify a temporary
+ overriding stack policy during this update. If you do not specify a
+ stack policy, the current policy that associated with the stack
+ will be used.
+
+ :type stack_policy_during_update_url: string
+ :param stack_policy_during_update_url: Location of a file containing
+ the temporary overriding stack policy. The URL must point to a
+ policy (max size: 16KB) located in an S3 bucket in the same region
+ as the stack. If you pass `StackPolicyDuringUpdateBody` and
+ `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
+ used.
+ If you want to update protected resources, specify a temporary
+ overriding stack policy during this update. If you do not specify a
+ stack policy, the current policy that is associated with the stack
+ will be used.
:rtype: dict
:return: JSON parameters represented as a Python dict.
@@ -133,7 +201,7 @@ class CloudFormationConnection(AWSQueryConnection):
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
- if len(parameters) > 0:
+ if parameters and len(parameters) > 0:
for i, (key, value) in enumerate(parameters):
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
@@ -144,107 +212,224 @@ class CloudFormationConnection(AWSQueryConnection):
for i, (key, value) in enumerate(tags.items()):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = value
- if len(notification_arns) > 0:
+ if notification_arns and len(notification_arns) > 0:
self.build_list_params(params, notification_arns,
"NotificationARNs.member")
if timeout_in_minutes:
params['TimeoutInMinutes'] = int(timeout_in_minutes)
+ if disable_rollback is not None:
+ params['DisableRollback'] = str(
+ disable_rollback).lower()
+ if on_failure is not None:
+ params['OnFailure'] = on_failure
+ if stack_policy_body is not None:
+ params['StackPolicyBody'] = stack_policy_body
+ if stack_policy_url is not None:
+ params['StackPolicyURL'] = stack_policy_url
+ if stack_policy_during_update_body is not None:
+ params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body
+ if stack_policy_during_update_url is not None:
+ params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url
return params
- def create_stack(self, stack_name, template_body=None, template_url=None,
- parameters=[], notification_arns=[], disable_rollback=False,
- timeout_in_minutes=None, capabilities=None, tags=None):
+ def _do_request(self, call, params, path, method):
"""
- Creates a CloudFormation Stack as specified by the template.
-
- :type stack_name: string
- :param stack_name: The name of the Stack, must be unique amoung running
- Stacks
-
- :type template_body: string
- :param template_body: The template body (JSON string)
-
- :type template_url: string
- :param template_url: An S3 URL of a stored template JSON document. If
- both the template_body and template_url are
- specified, the template_body takes precedence
+ Do a request via ``self.make_request`` and parse the JSON response.
- :type parameters: list of tuples
- :param parameters: A list of (key, value) pairs for template input
- parameters.
+ :type call: string
+ :param call: Call name, e.g. ``CreateStack``
- :type notification_arns: list of strings
- :param notification_arns: A list of SNS topics to send Stack event
- notifications to.
+ :type params: dict
+ :param params: Dictionary of call parameters
- :type disable_rollback: bool
- :param disable_rollback: Indicates whether or not to rollback on
- failure.
-
- :type timeout_in_minutes: int
- :param timeout_in_minutes: Maximum amount of time to let the Stack
- spend creating itself. If this timeout is exceeded,
- the Stack will enter the CREATE_FAILED state.
-
- :type capabilities: list
- :param capabilities: The list of capabilities you want to allow in
- the stack. Currently, the only valid capability is
- 'CAPABILITY_IAM'.
+ :type path: string
+ :param path: Server path
- :type tags: dict
- :param tags: A dictionary of (key, value) pairs of tags to
- associate with this stack.
+ :type method: string
+ :param method: HTTP method to use
- :rtype: string
- :return: The unique Stack ID.
+ :rtype: dict
+ :return: Parsed JSON response data
"""
- params = self._build_create_or_update_params(stack_name,
- template_body, template_url, parameters, notification_arns,
- disable_rollback, timeout_in_minutes, capabilities, tags)
- response = self.make_request('CreateStack', params, '/', 'POST')
+ response = self.make_request(call, params, path, method)
body = response.read()
if response.status == 200:
body = json.loads(body)
- return body['CreateStackResponse']['CreateStackResult']['StackId']
+ return body
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ raise self.ResponseError(response.status, response.reason, body=body)
- def update_stack(self, stack_name, template_body=None, template_url=None,
- parameters=[], notification_arns=[], disable_rollback=False,
- timeout_in_minutes=None, capabilities=None, tags=None):
+ def create_stack(self, stack_name, template_body=None, template_url=None,
+ parameters=None, notification_arns=None, disable_rollback=None,
+ timeout_in_minutes=None, capabilities=None, tags=None,
+ on_failure=None, stack_policy_body=None, stack_policy_url=None):
"""
- Updates a CloudFormation Stack as specified by the template.
+ Creates a stack as specified in the template. After the call
+ completes successfully, the stack creation starts. You can
+ check the status of the stack via the DescribeStacks API.
+ Currently, the limit for stacks is 20 stacks per account per
+ region.
:type stack_name: string
- :param stack_name: The name of the Stack, must be unique amoung running
- Stacks.
+ :param stack_name:
+ The name associated with the stack. The name must be unique within your
+ AWS account.
+
+ Must contain only alphanumeric characters (case sensitive) and start
+ with an alpha character. Maximum length of the name is 255
+ characters.
:type template_body: string
- :param template_body: The template body (JSON string)
+ :param template_body: Structure containing the template body. (For more
+ information, go to `Template Anatomy`_ in the AWS CloudFormation
+ User Guide.)
+ Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
+ passed, only `TemplateBody` is used.
:type template_url: string
- :param template_url: An S3 URL of a stored template JSON document. If
- both the template_body and template_url are
- specified, the template_body takes precedence.
+ :param template_url: Location of file containing the template body. The
+ URL must point to a template (max size: 307,200 bytes) located in
+ an S3 bucket in the same region as the stack. For more information,
+ go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
+ Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
+ passed, only `TemplateBody` is used.
+
+ :type parameters: list
+ :param parameters: A list of key/value tuples that specify input
+ parameters for the stack.
+
+ :type disable_rollback: boolean
+ :param disable_rollback: Set to `True` to disable rollback of the stack
+ if stack creation failed. You can specify either `DisableRollback`
+ or `OnFailure`, but not both.
+ Default: `False`
+
+ :type timeout_in_minutes: integer
+ :param timeout_in_minutes: The amount of time that can pass before the
+ stack status becomes CREATE_FAILED; if `DisableRollback` is not set
+ or is set to `False`, the stack will be rolled back.
+
+ :type notification_arns: list
+ :param notification_arns: The Simple Notification Service (SNS) topic
+ ARNs to publish stack related events. You can find your SNS topic
+ ARNs using the `SNS console`_ or your Command Line Interface (CLI).
- :type parameters: list of tuples
- :param parameters: A list of (key, value) pairs for template input
- parameters.
+ :type capabilities: list
+ :param capabilities: The list of capabilities that you want to allow in
+ the stack. If your template contains certain resources, you must
+ specify the CAPABILITY_IAM value for this parameter; otherwise,
+ this action returns an InsufficientCapabilities error. The
+ following resources require you to specify the capabilities
+ parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
+ `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
+ `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
+ `AWS::IAM::UserToGroupAddition`_.
+
+ :type on_failure: string
+ :param on_failure: Determines what action will be taken if stack
+ creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
+ DELETE. You can specify either `OnFailure` or `DisableRollback`,
+ but not both.
+ Default: `ROLLBACK`
+
+ :type stack_policy_body: string
+ :param stack_policy_body: Structure containing the stack policy body.
+ (For more information, go to ` Prevent Updates to Stack Resources`_
+ in the AWS CloudFormation User Guide.)
+ If you pass `StackPolicyBody` and `StackPolicyURL`, only
+ `StackPolicyBody` is used.
+
+ :type stack_policy_url: string
+ :param stack_policy_url: Location of a file containing the stack
+ policy. The URL must point to a policy (max size: 16KB) located in
+ an S3 bucket in the same region as the stack. If you pass
+ `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
+ used.
- :type notification_arns: list of strings
- :param notification_arns: A list of SNS topics to send Stack event
- notifications to.
+ :type tags: dict
+ :param tags: A set of user-defined `Tags` to associate with this stack,
+ represented by key/value pairs. Tags defined for the stack are
+ propagated to EC2 resources that are created as part of the stack.
+ A maximum number of 10 tags can be specified.
+ """
+ params = self._build_create_or_update_params(stack_name, template_body,
+ template_url, parameters, disable_rollback, timeout_in_minutes,
+ notification_arns, capabilities, on_failure, stack_policy_body,
+ stack_policy_url, tags)
+ body = self._do_request('CreateStack', params, '/', 'POST')
+ return body['CreateStackResponse']['CreateStackResult']['StackId']
+
+ def update_stack(self, stack_name, template_body=None, template_url=None,
+ parameters=None, notification_arns=None, disable_rollback=False,
+ timeout_in_minutes=None, capabilities=None, tags=None,
+ stack_policy_during_update_body=None,
+ stack_policy_during_update_url=None,
+ stack_policy_body=None, stack_policy_url=None):
+ """
+ Updates a stack as specified in the template. After the call
+ completes successfully, the stack update starts. You can check
+ the status of the stack via the DescribeStacks action.
+
+
+
+ **Note: **You cannot update `AWS::S3::Bucket`_ resources, for
+ example, to add or modify tags.
+
+
+
+ To get a copy of the template for an existing stack, you can
+ use the GetTemplate action.
+
+ Tags that were associated with this stack during creation time
+ will still be associated with the stack after an `UpdateStack`
+ operation.
+
+ For more information about creating an update template,
+ updating a stack, and monitoring the progress of the update,
+ see `Updating a Stack`_.
+
+ :type stack_name: string
+ :param stack_name:
+ The name or stack ID of the stack to update.
+
+ Must contain only alphanumeric characters (case sensitive) and start
+ with an alpha character. Maximum length of the name is 255
+ characters.
+
+ :type template_body: string
+ :param template_body: Structure containing the template body. (For more
+ information, go to `Template Anatomy`_ in the AWS CloudFormation
+ User Guide.)
+ Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
+ passed, only `TemplateBody` is used.
+
+ :type template_url: string
+ :param template_url: Location of file containing the template body. The
+ URL must point to a template located in an S3 bucket in the same
+ region as the stack. For more information, go to `Template
+ Anatomy`_ in the AWS CloudFormation User Guide.
+ Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
+ passed, only `TemplateBody` is used.
+
+ :type parameters: list
+ :param parameters: A list of key/value tuples that specify input
+ parameters for the stack.
+
+ :type notification_arns: list
+ :param notification_arns: The Simple Notification Service (SNS) topic
+ ARNs to publish stack related events. You can find your SNS topic
+ ARNs using the `SNS console`_ or your Command Line Interface (CLI).
:type disable_rollback: bool
:param disable_rollback: Indicates whether or not to rollback on
failure.
- :type timeout_in_minutes: int
- :param timeout_in_minutes: Maximum amount of time to let the Stack
- spend creating itself. If this timeout is exceeded,
- the Stack will enter the CREATE_FAILED state
+ :type timeout_in_minutes: integer
+ :param timeout_in_minutes: The amount of time that can pass before the
+ stack status becomes CREATE_FAILED; if `DisableRollback` is not set
+ or is set to `False`, the stack will be rolled back.
:type capabilities: list
:param capabilities: The list of capabilities you want to allow in
@@ -252,38 +437,86 @@ class CloudFormationConnection(AWSQueryConnection):
'CAPABILITY_IAM'.
:type tags: dict
- :param tags: A dictionary of (key, value) pairs of tags to
- associate with this stack.
+ :param tags: A set of user-defined `Tags` to associate with this stack,
+ represented by key/value pairs. Tags defined for the stack are
+ propagated to EC2 resources that are created as part of the stack.
+ A maximum number of 10 tags can be specified.
+
+ :type template_url: string
+ :param template_url: Location of file containing the template body. The
+ URL must point to a template located in an S3 bucket in the same
+ region as the stack. For more information, go to `Template
+ Anatomy`_ in the AWS CloudFormation User Guide.
+ Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
+ passed, only `TemplateBody` is used.
+
+ :type stack_policy_during_update_body: string
+ :param stack_policy_during_update_body: Structure containing the
+ temporary overriding stack policy body. If you pass
+ `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
+ only `StackPolicyDuringUpdateBody` is used.
+ If you want to update protected resources, specify a temporary
+ overriding stack policy during this update. If you do not specify a
+ stack policy, the current policy that associated with the stack
+ will be used.
+
+ :type stack_policy_during_update_url: string
+ :param stack_policy_during_update_url: Location of a file containing
+ the temporary overriding stack policy. The URL must point to a
+ policy (max size: 16KB) located in an S3 bucket in the same region
+ as the stack. If you pass `StackPolicyDuringUpdateBody` and
+ `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
+ used.
+ If you want to update protected resources, specify a temporary
+ overriding stack policy during this update. If you do not specify a
+ stack policy, the current policy that is associated with the stack
+ will be used.
:rtype: string
:return: The unique Stack ID.
"""
- params = self._build_create_or_update_params(stack_name,
- template_body, template_url, parameters, notification_arns,
- disable_rollback, timeout_in_minutes, capabilities, tags)
- response = self.make_request('UpdateStack', params, '/', 'POST')
- body = response.read()
- if response.status == 200:
- body = json.loads(body)
- return body['UpdateStackResponse']['UpdateStackResult']['StackId']
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ params = self._build_create_or_update_params(stack_name, template_body,
+ template_url, parameters, disable_rollback, timeout_in_minutes,
+ notification_arns, capabilities, None, stack_policy_body,
+ stack_policy_url, tags, stack_policy_during_update_body,
+ stack_policy_during_update_url)
+ body = self._do_request('UpdateStack', params, '/', 'POST')
+ return body['UpdateStackResponse']['UpdateStackResult']['StackId']
def delete_stack(self, stack_name_or_id):
+ """
+ Deletes a specified stack. Once the call completes
+ successfully, stack deletion starts. Deleted stacks do not
+ show up in the DescribeStacks API if the deletion has been
+ completed successfully.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or the unique identifier associated
+ with the stack.
+
+ """
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
- # TODO: change this to get_status ?
- response = self.make_request('DeleteStack', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._do_request('DeleteStack', params, '/', 'GET')
def describe_stack_events(self, stack_name_or_id=None, next_token=None):
+ """
+ Returns all stack related events for a specified stack. For
+ more information about a stack's event history, go to
+ `Stacks`_ in the AWS CloudFormation User Guide.
+ Events are returned, even if the stack never existed or has
+ been successfully deleted.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or the unique identifier associated
+ with the stack.
+ Default: There is no default value.
+
+ :type next_token: string
+ :param next_token: String that identifies the start of the next list of
+ events, if there is one.
+ Default: There is no default value.
+
+ """
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
@@ -293,21 +526,82 @@ class CloudFormationConnection(AWSQueryConnection):
StackEvent)])
def describe_stack_resource(self, stack_name_or_id, logical_resource_id):
+ """
+ Returns a description of the specified resource in the
+ specified stack.
+
+ For deleted stacks, DescribeStackResource returns resource
+ information for up to 90 days after the stack has been
+ deleted.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or the unique identifier associated
+ with the stack.
+ Default: There is no default value.
+
+ :type logical_resource_id: string
+ :param logical_resource_id: The logical name of the resource as
+ specified in the template.
+ Default: There is no default value.
+
+ """
params = {'ContentType': "JSON", 'StackName': stack_name_or_id,
'LogicalResourceId': logical_resource_id}
- response = self.make_request('DescribeStackResource', params,
- '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._do_request('DescribeStackResource', params, '/', 'GET')
def describe_stack_resources(self, stack_name_or_id=None,
logical_resource_id=None,
physical_resource_id=None):
+ """
+ Returns AWS resource descriptions for running and deleted
+ stacks. If `StackName` is specified, all the associated
+ resources that are part of the stack are returned. If
+ `PhysicalResourceId` is specified, the associated resources of
+ the stack that the resource belongs to are returned.
+ Only the first 100 resources will be returned. If your stack
+ has more resources than this, you should use
+ `ListStackResources` instead.
+ For deleted stacks, `DescribeStackResources` returns resource
+ information for up to 90 days after the stack has been
+ deleted.
+
+ You must specify either `StackName` or `PhysicalResourceId`,
+ but not both. In addition, you can specify `LogicalResourceId`
+ to filter the returned result. For more information about
+ resources, the `LogicalResourceId` and `PhysicalResourceId`,
+ go to the `AWS CloudFormation User Guide`_.
+ A `ValidationError` is returned if you specify both
+ `StackName` and `PhysicalResourceId` in the same request.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or the unique identifier associated
+ with the stack.
+ Required: Conditional. If you do not specify `StackName`, you must
+ specify `PhysicalResourceId`.
+
+ Default: There is no default value.
+
+ :type logical_resource_id: string
+ :param logical_resource_id: The logical name of the resource as
+ specified in the template.
+ Default: There is no default value.
+
+ :type physical_resource_id: string
+ :param physical_resource_id: The name or unique identifier that
+ corresponds to a physical instance ID of a resource supported by
+ AWS CloudFormation.
+ For example, for an Amazon Elastic Compute Cloud (EC2) instance,
+ `PhysicalResourceId` corresponds to the `InstanceId`. You can pass
+ the EC2 `InstanceId` to `DescribeStackResources` to find which
+ stack the instance belongs to and what other resources are part of
+ the stack.
+
+ Required: Conditional. If you do not specify `PhysicalResourceId`, you
+ must specify `StackName`.
+
+ Default: There is no default value.
+
+ """
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
@@ -318,35 +612,110 @@ class CloudFormationConnection(AWSQueryConnection):
return self.get_list('DescribeStackResources', params,
[('member', StackResource)])
- def describe_stacks(self, stack_name_or_id=None):
+ def describe_stacks(self, stack_name_or_id=None, next_token=None):
+ """
+ Returns the description for the specified stack; if no stack
+ name was specified, then it returns the description for all
+ the stacks created.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or the unique identifier associated
+ with the stack.
+ Default: There is no default value.
+
+ :type next_token: string
+ :param next_token: String that identifies the start of the next list of
+ stacks, if there is one.
+
+ """
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
+ if next_token is not None:
+ params['NextToken'] = next_token
return self.get_list('DescribeStacks', params, [('member', Stack)])
def get_template(self, stack_name_or_id):
+ """
+ Returns the template body for a specified stack. You can get
+ the template for running or deleted stacks.
+
+ For deleted stacks, GetTemplate returns the template for up to
+ 90 days after the stack has been deleted.
+ If the template does not exist, a `ValidationError` is
+ returned.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or the unique identifier associated
+ with the stack, which are not always interchangeable:
+
+ + Running stacks: You can specify either the stack's name or its unique
+ stack ID.
+ + Deleted stacks: You must specify the unique stack ID.
+
+
+ Default: There is no default value.
+
+ """
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
- response = self.make_request('GetTemplate', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._do_request('GetTemplate', params, '/', 'GET')
def list_stack_resources(self, stack_name_or_id, next_token=None):
+ """
+ Returns descriptions of all resources of the specified stack.
+
+ For deleted stacks, ListStackResources returns resource
+ information for up to 90 days after the stack has been
+ deleted.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or the unique identifier associated
+ with the stack, which are not always interchangeable:
+
+ + Running stacks: You can specify either the stack's name or its unique
+ stack ID.
+ + Deleted stacks: You must specify the unique stack ID.
+
+
+ Default: There is no default value.
+
+ :type next_token: string
+ :param next_token: String that identifies the start of the next list of
+ stack resource summaries, if there is one.
+ Default: There is no default value.
+
+ """
params = {'StackName': stack_name_or_id}
if next_token:
params['NextToken'] = next_token
return self.get_list('ListStackResources', params,
[('member', StackResourceSummary)])
- def list_stacks(self, stack_status_filters=[], next_token=None):
+ def list_stacks(self, stack_status_filters=None, next_token=None):
+ """
+ Returns the summary information for stacks whose status
+ matches the specified StackStatusFilter. Summary information
+ for stacks that have been deleted is kept for 90 days after
+ the stack is deleted. If no StackStatusFilter is specified,
+ summary information for all stacks is returned (including
+ existing stacks and stacks that have been deleted).
+
+ :type next_token: string
+ :param next_token: String that identifies the start of the next list of
+ stacks, if there is one.
+ Default: There is no default value.
+
+ :type stack_status_filter: list
+ :param stack_status_filter: Stack status to use as a filter. Specify
+ one or more stack status codes to list only stacks with the
+ specified status codes. For a complete list of stack status codes,
+ see the `StackStatus` parameter of the Stack data type.
+
+ """
params = {}
if next_token:
params['NextToken'] = next_token
- if len(stack_status_filters) > 0:
+ if stack_status_filters and len(stack_status_filters) > 0:
self.build_list_params(params, stack_status_filters,
"StackStatusFilter.member")
@@ -354,6 +723,25 @@ class CloudFormationConnection(AWSQueryConnection):
[('member', StackSummary)])
def validate_template(self, template_body=None, template_url=None):
+ """
+ Validates a specified template.
+
+ :type template_body: string
+ :param template_body: String containing the template body. (For more
+ information, go to `Template Anatomy`_ in the AWS CloudFormation
+ User Guide.)
+ Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
+ passed, only `TemplateBody` is used.
+
+ :type template_url: string
+ :param template_url: Location of file containing the template body. The
+ URL must point to a template (max size: 307,200 bytes) located in
+ an S3 bucket in the same region as the stack. For more information,
+ go to `Template Anatomy`_ in the AWS CloudFormation User Guide.
+ Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
+ passed, only `TemplateBody` is used.
+
+ """
params = {}
if template_body:
params['TemplateBody'] = template_body
@@ -366,7 +754,116 @@ class CloudFormationConnection(AWSQueryConnection):
verb="POST")
def cancel_update_stack(self, stack_name_or_id=None):
+ """
+ Cancels an update on the specified stack. If the call
+ completes successfully, the stack will roll back the update
+ and revert to the previous stack configuration.
+ Only stacks that are in the UPDATE_IN_PROGRESS state can be
+ canceled.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or the unique identifier associated with
+ the stack.
+
+ """
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
return self.get_status('CancelUpdateStack', params)
+
+ def estimate_template_cost(self, template_body=None, template_url=None,
+ parameters=None):
+ """
+ Returns the estimated monthly cost of a template. The return
+ value is an AWS Simple Monthly Calculator URL with a query
+ string that describes the resources required to run the
+ template.
+
+ :type template_body: string
+ :param template_body: Structure containing the template body. (For more
+ information, go to `Template Anatomy`_ in the AWS CloudFormation
+ User Guide.)
+ Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
+ passed, only `TemplateBody` is used.
+
+ :type template_url: string
+ :param template_url: Location of file containing the template body. The
+ URL must point to a template located in an S3 bucket in the same
+ region as the stack. For more information, go to `Template
+ Anatomy`_ in the AWS CloudFormation User Guide.
+ Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
+ passed, only `TemplateBody` is used.
+
+ :type parameters: list
+ :param parameters: A list of key/value tuples that specify input
+ parameters for the template.
+
+ :rtype: string
+ :returns: URL to pre-filled cost calculator
+ """
+ params = {'ContentType': "JSON"}
+ if template_body is not None:
+ params['TemplateBody'] = template_body
+ if template_url is not None:
+ params['TemplateURL'] = template_url
+ if parameters and len(parameters) > 0:
+ for i, (key, value) in enumerate(parameters):
+ params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
+ params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
+
+ response = self._do_request('EstimateTemplateCost', params, '/', 'POST')
+ return response['EstimateTemplateCostResponse']\
+ ['EstimateTemplateCostResult']\
+ ['Url']
+
+ def get_stack_policy(self, stack_name_or_id):
+ """
+ Returns the stack policy for a specified stack. If a stack
+ doesn't have a policy, a null value is returned.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or stack ID that is associated with
+ the stack whose policy you want to get.
+
+ :rtype: string
+ :return: The policy JSON document
+ """
+ params = {'ContentType': "JSON", 'StackName': stack_name_or_id, }
+ response = self._do_request('GetStackPolicy', params, '/', 'POST')
+ return response['GetStackPolicyResponse']\
+ ['GetStackPolicyResult']\
+ ['StackPolicyBody']
+
+ def set_stack_policy(self, stack_name_or_id, stack_policy_body=None,
+ stack_policy_url=None):
+ """
+ Sets a stack policy for a specified stack.
+
+ :type stack_name_or_id: string
+ :param stack_name_or_id: The name or stack ID that you want to
+ associate a policy with.
+
+ :type stack_policy_body: string
+ :param stack_policy_body: Structure containing the stack policy body.
+ (For more information, go to ` Prevent Updates to Stack Resources`_
+ in the AWS CloudFormation User Guide.)
+ You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
+ passed, only `StackPolicyBody` is used.
+
+ :type stack_policy_url: string
+ :param stack_policy_url: Location of a file containing the stack
+ policy. The URL must point to a policy (max size: 16KB) located in
+ an S3 bucket in the same region as the stack. You must pass
+ `StackPolicyBody` or `StackPolicyURL`. If both are passed, only
+ `StackPolicyBody` is used.
+
+ """
+ params = {'ContentType': "JSON", 'StackName': stack_name_or_id, }
+ if stack_policy_body is not None:
+ params['StackPolicyBody'] = stack_policy_body
+ if stack_policy_url is not None:
+ params['StackPolicyURL'] = stack_policy_url
+
+ response = self._do_request('SetStackPolicy', params, '/', 'POST')
+ return response['SetStackPolicyResponse']\
+ ['SetStackPolicyResult']
diff --git a/boto/cloudformation/stack.py b/boto/cloudformation/stack.py
index c173de66..5dac0dd7 100755
--- a/boto/cloudformation/stack.py
+++ b/boto/cloudformation/stack.py
@@ -107,6 +107,35 @@ class Stack(object):
def get_template(self):
return self.connection.get_template(stack_name_or_id=self.stack_id)
+ def get_policy(self):
+ """
+ Returns the stack policy for this stack. If it has no policy
+ then, a null value is returned.
+ """
+ return self.connection.get_stack_policy(self.stack_id)
+
+ def set_policy(self, stack_policy_body=None, stack_policy_url=None):
+ """
+ Sets a stack policy for this stack.
+
+ :type stack_policy_body: string
+ :param stack_policy_body: Structure containing the stack policy body.
+ (For more information, go to ` Prevent Updates to Stack Resources`_
+ in the AWS CloudFormation User Guide.)
+ You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
+ passed, only `StackPolicyBody` is used.
+
+ :type stack_policy_url: string
+ :param stack_policy_url: Location of a file containing the stack
+ policy. The URL must point to a policy (max size: 16KB) located in
+ an S3 bucket in the same region as the stack. You must pass
+ `StackPolicyBody` or `StackPolicyURL`. If both are passed, only
+ `StackPolicyBody` is used.
+ """
+ return self.connection.set_stack_policy(self.stack_id,
+ stack_policy_body=stack_policy_body,
+ stack_policy_url=stack_policy_url)
+
class StackSummary(object):
def __init__(self, connection=None):
diff --git a/boto/cloudformation/template.py b/boto/cloudformation/template.py
index 762efce5..bab21486 100644
--- a/boto/cloudformation/template.py
+++ b/boto/cloudformation/template.py
@@ -1,21 +1,29 @@
from boto.resultset import ResultSet
+from boto.cloudformation.stack import Capability
class Template(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.template_parameters = None
+ self.capabilities_reason = None
+ self.capabilities = None
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.template_parameters = ResultSet([('member', TemplateParameter)])
return self.template_parameters
+ elif name == "Capabilities":
+ self.capabilities = ResultSet([('member', Capability)])
+ return self.capabilities
else:
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
+ elif name == "CapabilitiesReason":
+ self.capabilities_reason = value
else:
setattr(self, name, value)
diff --git a/boto/cloudfront/__init__.py b/boto/cloudfront/__init__.py
index 42f70601..1afefebb 100644
--- a/boto/cloudfront/__init__.py
+++ b/boto/cloudfront/__init__.py
@@ -43,12 +43,13 @@ class CloudFrontConnection(AWSAuthConnection):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
- validate_certs=True, profile_name=None):
+ validate_certs=True, profile_name=None, https_connection_factory=None):
super(CloudFrontConnection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
- validate_certs=validate_certs,
+ validate_certs=validate_certs,
+ https_connection_factory=https_connection_factory,
profile_name=profile_name)
def get_etag(self, response):
diff --git a/boto/connection.py b/boto/connection.py
index 9951778c..c40acf1f 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -45,6 +45,7 @@ Handles basic connections to AWS
from __future__ import with_statement
import base64
+from datetime import datetime
import errno
import httplib
import os
@@ -568,6 +569,7 @@ class AWSAuthConnection(object):
host, config, self.provider, self._required_auth_capability())
if getattr(self, 'AuthServiceName', None) is not None:
self.auth_service_name = self.AuthServiceName
+ self.request_hook = None
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.host)
@@ -860,6 +862,9 @@ class AWSAuthConnection(object):
except AttributeError:
request.headers['Host'] = self.host.split(':', 1)[0]
+ def set_request_hook(self, hook):
+ self.request_hook = hook
+
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
@@ -902,6 +907,7 @@ class AWSAuthConnection(object):
if 's3' not in self._required_auth_capability():
if not getattr(self, 'anon', False):
self.set_host_header(request)
+ request.start_time = datetime.now()
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
@@ -942,6 +948,8 @@ class AWSAuthConnection(object):
else:
self.put_http_connection(request.host, request.port,
self.is_secure, connection)
+ if self.request_hook is not None:
+ self.request_hook.handle_request_data(request, response)
return response
else:
scheme, request.host, request.path, \
@@ -982,6 +990,8 @@ class AWSAuthConnection(object):
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
# Otherwise, raise the exception that must have already happened.
+ if self.request_hook is not None:
+ self.request_hook.handle_request_data(request, response, error=True)
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
diff --git a/boto/datapipeline/__init__.py b/boto/datapipeline/__init__.py
index e69de29b..1f61ea67 100644
--- a/boto/datapipeline/__init__.py
+++ b/boto/datapipeline/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import get_regions
+
+
+def regions():
+ """
+ Get all available regions for the AWS Datapipeline service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.datapipeline.layer1 import DataPipelineConnection
+ return get_regions('datapipeline', connection_cls=DataPipelineConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/datapipeline/layer1.py b/boto/datapipeline/layer1.py
index 0d904352..6635f01c 100644
--- a/boto/datapipeline/layer1.py
+++ b/boto/datapipeline/layer1.py
@@ -85,7 +85,7 @@ class DataPipelineConnection(AWSQueryConnection):
def __init__(self, **kwargs):
- region = kwargs.get('region')
+ region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
diff --git a/boto/dynamodb2/exceptions.py b/boto/dynamodb2/exceptions.py
index a9fcf75b..3a677e09 100644
--- a/boto/dynamodb2/exceptions.py
+++ b/boto/dynamodb2/exceptions.py
@@ -72,3 +72,7 @@ class UnknownFilterTypeError(DynamoDBError):
class QueryError(DynamoDBError):
pass
+
+
+class ItemNotFound(DynamoDBError):
+ pass
diff --git a/boto/dynamodb2/fields.py b/boto/dynamodb2/fields.py
index 911a11b5..4443969e 100644
--- a/boto/dynamodb2/fields.py
+++ b/boto/dynamodb2/fields.py
@@ -323,7 +323,10 @@ class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
projection_type = 'INCLUDE'
def __init__(self, *args, **kwargs):
+ throughput = kwargs.pop('throughput', None)
IncludeIndex.__init__(self, *args, **kwargs)
+ if throughput:
+ kwargs['throughput'] = throughput
GlobalBaseIndexField.__init__(self, *args, **kwargs)
def schema(self):
@@ -331,4 +334,4 @@ class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
schema_data = IncludeIndex.schema(self)
# Also the throughput.
schema_data.update(GlobalBaseIndexField.schema(self))
- return schema_data \ No newline at end of file
+ return schema_data
diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py
index 7d40ad5a..338ced19 100644
--- a/boto/dynamodb2/table.py
+++ b/boto/dynamodb2/table.py
@@ -488,6 +488,8 @@ class Table(object):
attributes_to_get=attributes,
consistent_read=consistent
)
+ if 'Item' not in item_data:
+ raise exceptions.ItemNotFound("Item %s couldn't be found." % kwargs)
item = Item(self)
item.load(item_data)
return item
@@ -526,7 +528,7 @@ class Table(object):
"""
try:
self.get_item(**kwargs)
- except JSONResponseError:
+ except (JSONResponseError, exceptions.ItemNotFound):
return False
return True
@@ -880,10 +882,15 @@ class Table(object):
"""
if self.schema:
- if len(self.schema) == 1 and len(filter_kwargs) <= 1:
- raise exceptions.QueryError(
- "You must specify more than one key to filter on."
- )
+ if len(self.schema) == 1:
+ if len(filter_kwargs) <= 1:
+ if not self.global_indexes or not len(self.global_indexes):
+ # If the schema only has one field, there's <= 1 filter
+ # param & no Global Secondary Indexes, this is user
+ # error. Bail early.
+ raise exceptions.QueryError(
+ "You must specify more than one key to filter on."
+ )
if attributes is not None:
select = 'SPECIFIC_ATTRIBUTES'
diff --git a/boto/endpoints.json b/boto/endpoints.json
index c15235c9..bf52525f 100644
--- a/boto/endpoints.json
+++ b/boto/endpoints.json
@@ -56,7 +56,11 @@
"us-west-2": "monitoring.us-west-2.amazonaws.com"
},
"datapipeline": {
- "us-east-1": "datapipeline.us-east-1.amazonaws.com"
+ "us-east-1": "datapipeline.us-east-1.amazonaws.com",
+ "us-west-2": "datapipeline.us-west-2.amazonaws.com",
+ "eu-west-1": "datapipeline.eu-west-1.amazonaws.com",
+ "ap-southeast-2": "datapipeline.ap-southeast-2.amazonaws.com",
+ "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com"
},
"directconnect": {
"ap-northeast-1": "directconnect.ap-northeast-1.amazonaws.com",
@@ -176,6 +180,9 @@
"us-west-1": "importexport.amazonaws.com",
"us-west-2": "importexport.amazonaws.com"
},
+ "kinesis": {
+ "us-east-1": "kinesis.us-east-1.amazonaws.com"
+ },
"opsworks": {
"us-east-1": "opsworks.us-east-1.amazonaws.com"
},
diff --git a/boto/exception.py b/boto/exception.py
index 53626bfe..99205c9f 100644
--- a/boto/exception.py
+++ b/boto/exception.py
@@ -27,6 +27,7 @@ Exception classes - Subclassing allows you to check for specific errors
import base64
import xml.sax
from boto import handler
+from boto.compat import json
from boto.resultset import ResultSet
@@ -88,12 +89,25 @@ class BotoServerError(StandardError):
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException), pe:
- # Remove unparsable message body so we don't include garbage
- # in exception. But first, save self.body in self.error_message
- # because occasionally we get error messages from Eucalyptus
- # that are just text strings that we want to preserve.
- self.message = self.body
- self.body = None
+ # What if it's JSON? Let's try that.
+ try:
+ parsed = json.loads(self.body)
+
+ if 'RequestId' in parsed:
+ self.request_id = parsed['RequestId']
+ if 'Error' in parsed:
+ if 'Code' in parsed['Error']:
+ self.error_code = parsed['Error']['Code']
+ if 'Message' in parsed['Error']:
+ self.message = parsed['Error']['Message']
+
+ except ValueError:
+ # Remove unparsable message body so we don't include garbage
+ # in exception. But first, save self.body in self.error_message
+ # because occasionally we get error messages from Eucalyptus
+ # that are just text strings that we want to preserve.
+ self.message = self.body
+ self.body = None
def __getattr__(self, name):
if name == 'error_message':
diff --git a/boto/gs/connection.py b/boto/gs/connection.py
index c9a43bd6..9a2e4a2b 100755
--- a/boto/gs/connection.py
+++ b/boto/gs/connection.py
@@ -32,7 +32,7 @@ class Location(object):
class GSConnection(S3Connection):
DefaultHost = 'storage.googleapis.com'
- QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
+ QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s'
def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
diff --git a/boto/gs/key.py b/boto/gs/key.py
index b67e0604..277e7c71 100644
--- a/boto/gs/key.py
+++ b/boto/gs/key.py
@@ -109,6 +109,9 @@ class Key(S3Key):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
+ def handle_restore_headers(self, response):
+ return
+
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
diff --git a/boto/provider.py b/boto/provider.py
index a7ea2028..2febdc99 100644
--- a/boto/provider.py
+++ b/boto/provider.py
@@ -289,7 +289,12 @@ class Provider(object):
if security_token is not None:
self.security_token = security_token
boto.log.debug("Using security token provided by client.")
- elif security_token_name is not None:
+ elif ((security_token_name is not None) and
+ (access_key is None) and (secret_key is None)):
+ # Only provide a token from the environment/config if the
+ # caller did not specify a key and secret. Otherwise an
+ # environment/config token could be paired with a
+ # different set of credentials provided by the caller
if security_token_name.upper() in os.environ:
self.security_token = os.environ[security_token_name.upper()]
boto.log.debug("Using security token found in environment"
diff --git a/boto/rds/__init__.py b/boto/rds/__init__.py
index edf6237a..8e8afa81 100644
--- a/boto/rds/__init__.py
+++ b/boto/rds/__init__.py
@@ -32,7 +32,7 @@ from boto.rds.regioninfo import RDSRegionInfo
from boto.rds.dbsubnetgroup import DBSubnetGroup
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.regioninfo import get_regions
-from boto.rds.logfile import LogFile
+from boto.rds.logfile import LogFile, LogFileObject
def regions():
@@ -1076,27 +1076,91 @@ class RDSConnection(AWSQueryConnection):
return self.get_list('DescribeDBSnapshots', params,
[('DBSnapshot', DBSnapshot)])
- def get_all_logs(self, dbinstance_id=None):
+ def get_all_logs(self, dbinstance_id, max_records=None, marker=None, file_size=None, filename_contains=None, file_last_written=None):
"""
Get all log files
:type instance_id: str
- :param instance_id: The identifier of a DBInstance. If provided,
- only the :class:`boto.rds.logfile.LogFile` related
- to that instance will be returned. If not
- provided, all logfiles will be returned.
+ :param instance_id: The identifier of a DBInstance.
+
+ :type max_records: int
+ :param max_records: Number of log file names to return.
+
+ :type marker: str
+ :param marker: The marker provided by a previous request.
+
+ :file_size: int
+ :param file_size: Filter results to files large than this size in bytes.
+
+ :filename_contains: str
+ :param filename_contains: Filter results to files with filename containing this string
+
+ :file_last_written: int
+ :param file_last_written: Filter results to files written after this time (POSIX timestamp)
:rtype: list
:return: A list of :class:`boto.rds.logfile.LogFile`
"""
- params = {}
- if dbinstance_id:
- params['DBInstanceIdentifier'] = dbinstance_id
- params['MaxRecords'] = 26
+ params = {'DBInstanceIdentifier': dbinstance_id}
+
+ if file_size:
+ params['FileSize'] = file_size
+
+ if filename_contains:
+ params['FilenameContains'] = filename_contains
+
+ if file_last_written:
+ params['FileLastWritten'] = file_last_written
+
+ if marker:
+ params['Marker'] = marker
+
+ if max_records:
+ params['MaxRecords'] = max_records
return self.get_list('DescribeDBLogFiles', params,
[('DescribeDBLogFilesDetails',LogFile)])
+ def get_log_file(self, dbinstance_id, log_file_name, marker=None, number_of_lines=None, max_records=None):
+ """
+ Download a log file from RDS
+
+ :type instance_id: str
+ :param instance_id: The identifier of a DBInstance.
+
+ :type log_file_name: str
+ :param log_file_name: The name of the log file to retrieve
+
+ :type marker: str
+ :param marker: A marker returned from a previous call to this method, or 0 to indicate the start of file. If
+ no marker is specified, this will fetch log lines from the end of file instead.
+
+ :type number_of_lines: int
+ :param marker: The maximium number of lines to be returned.
+ """
+
+ params = {
+ 'DBInstanceIdentifier': dbinstance_id,
+ 'LogFileName': log_file_name,
+ }
+
+ if marker:
+ params['Marker'] = marker
+
+ if number_of_lines:
+ params['NumberOfLines'] = number_of_lines
+
+ if max_records:
+ params['MaxRecords'] = max_records
+
+ logfile = self.get_object('DownloadDBLogFilePortion', params, LogFileObject)
+
+ if logfile:
+ logfile.log_filename = log_file_name
+ logfile.dbinstance_id = dbinstance_id
+
+ return logfile
+
def create_dbsnapshot(self, snapshot_id, dbinstance_id):
"""
Create a new DB snapshot.
diff --git a/boto/rds/logfile.py b/boto/rds/logfile.py
index 176d37f2..dd80a6ff 100644
--- a/boto/rds/logfile.py
+++ b/boto/rds/logfile.py
@@ -44,3 +44,25 @@ class LogFile(object):
self.size = value
else:
setattr(self, name, value)
+
+
+class LogFileObject(object):
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.log_filename = None
+
+ def __repr__(self):
+ return "LogFileObject: %s/%s" % (self.dbinstance_id, self.log_filename)
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'LogFileData':
+ self.data = value
+ elif name == 'AdditionalDataPending':
+ self.additional_data_pending = value
+ elif name == 'Marker':
+ self.marker = value
+ else:
+ setattr(self, name, value)
diff --git a/boto/rds2/__init__.py b/boto/rds2/__init__.py
new file mode 100644
index 00000000..023a0baa
--- /dev/null
+++ b/boto/rds2/__init__.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import get_regions
+
+
+def regions():
+ """
+ Get all available regions for the RDS service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.rds2.layer1 import RDSConnection
+ return get_regions('rds', connection_cls=RDSConnection)
+
+
+def connect_to_region(region_name, **kw_params):
+ """
+ Given a valid region name, return a
+ :class:`boto.rds2.layer1.RDSConnection`.
+ Any additional parameters after the region_name are passed on to
+ the connect method of the region object.
+
+ :type: str
+ :param region_name: The name of the region to connect to.
+
+ :rtype: :class:`boto.rds2.layer1.RDSConnection` or ``None``
+ :return: A connection to the given region, or None if an invalid region
+ name is given
+ """
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/rds2/exceptions.py b/boto/rds2/exceptions.py
new file mode 100644
index 00000000..be610b01
--- /dev/null
+++ b/boto/rds2/exceptions.py
@@ -0,0 +1,234 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import JSONResponseError
+
+
+class InvalidSubnet(JSONResponseError):
+ pass
+
+
+class DBParameterGroupQuotaExceeded(JSONResponseError):
+ pass
+
+
+class DBSubnetGroupAlreadyExists(JSONResponseError):
+ pass
+
+
+class DBSubnetGroupQuotaExceeded(JSONResponseError):
+ pass
+
+
+class InstanceQuotaExceeded(JSONResponseError):
+ pass
+
+
+class InvalidRestore(JSONResponseError):
+ pass
+
+
+class InvalidDBParameterGroupState(JSONResponseError):
+ pass
+
+
+class AuthorizationQuotaExceeded(JSONResponseError):
+ pass
+
+
+class DBSecurityGroupAlreadyExists(JSONResponseError):
+ pass
+
+
+class InsufficientDBInstanceCapacity(JSONResponseError):
+ pass
+
+
+class ReservedDBInstanceQuotaExceeded(JSONResponseError):
+ pass
+
+
+class DBSecurityGroupNotFound(JSONResponseError):
+ pass
+
+
+class DBInstanceAlreadyExists(JSONResponseError):
+ pass
+
+
+class ReservedDBInstanceNotFound(JSONResponseError):
+ pass
+
+
+class DBSubnetGroupDoesNotCoverEnoughAZs(JSONResponseError):
+ pass
+
+
+class InvalidDBSecurityGroupState(JSONResponseError):
+ pass
+
+
+class InvalidVPCNetworkState(JSONResponseError):
+ pass
+
+
+class ReservedDBInstancesOfferingNotFound(JSONResponseError):
+ pass
+
+
+class SNSTopicArnNotFound(JSONResponseError):
+ pass
+
+
+class SNSNoAuthorization(JSONResponseError):
+ pass
+
+
+class SnapshotQuotaExceeded(JSONResponseError):
+ pass
+
+
+class OptionGroupQuotaExceeded(JSONResponseError):
+ pass
+
+
+class DBParameterGroupNotFound(JSONResponseError):
+ pass
+
+
+class SNSInvalidTopic(JSONResponseError):
+ pass
+
+
+class InvalidDBSubnetGroupState(JSONResponseError):
+ pass
+
+
+class DBSubnetGroupNotFound(JSONResponseError):
+ pass
+
+
+class InvalidOptionGroupState(JSONResponseError):
+ pass
+
+
+class SourceNotFound(JSONResponseError):
+ pass
+
+
+class SubscriptionCategoryNotFound(JSONResponseError):
+ pass
+
+
+class EventSubscriptionQuotaExceeded(JSONResponseError):
+ pass
+
+
+class DBSecurityGroupNotSupported(JSONResponseError):
+ pass
+
+
+class InvalidEventSubscriptionState(JSONResponseError):
+ pass
+
+
+class InvalidDBSubnetState(JSONResponseError):
+ pass
+
+
+class InvalidDBSnapshotState(JSONResponseError):
+ pass
+
+
+class SubscriptionAlreadyExist(JSONResponseError):
+ pass
+
+
+class DBSecurityGroupQuotaExceeded(JSONResponseError):
+ pass
+
+
+class ProvisionedIopsNotAvailableInAZ(JSONResponseError):
+ pass
+
+
+class AuthorizationNotFound(JSONResponseError):
+ pass
+
+
+class OptionGroupAlreadyExists(JSONResponseError):
+ pass
+
+
+class SubscriptionNotFound(JSONResponseError):
+ pass
+
+
+class DBUpgradeDependencyFailure(JSONResponseError):
+ pass
+
+
+class PointInTimeRestoreNotEnabled(JSONResponseError):
+ pass
+
+
+class AuthorizationAlreadyExists(JSONResponseError):
+ pass
+
+
+class DBSubnetQuotaExceeded(JSONResponseError):
+ pass
+
+
+class OptionGroupNotFound(JSONResponseError):
+ pass
+
+
+class DBParameterGroupAlreadyExists(JSONResponseError):
+ pass
+
+
+class DBInstanceNotFound(JSONResponseError):
+ pass
+
+
+class ReservedDBInstanceAlreadyExists(JSONResponseError):
+ pass
+
+
+class InvalidDBInstanceState(JSONResponseError):
+ pass
+
+
+class DBSnapshotNotFound(JSONResponseError):
+ pass
+
+
+class DBSnapshotAlreadyExists(JSONResponseError):
+ pass
+
+
+class StorageQuotaExceeded(JSONResponseError):
+ pass
+
+
+class SubnetAlreadyInUse(JSONResponseError):
+ pass
diff --git a/boto/rds2/layer1.py b/boto/rds2/layer1.py
new file mode 100644
index 00000000..1e2ba537
--- /dev/null
+++ b/boto/rds2/layer1.py
@@ -0,0 +1,3774 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+import boto
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.rds2 import exceptions
+
+
+class RDSConnection(AWSQueryConnection):
+ """
+ Amazon Relational Database Service
+ Amazon Relational Database Service (Amazon RDS) is a web service
+ that makes it easier to set up, operate, and scale a relational
+ database in the cloud. It provides cost-efficient, resizable
+ capacity for an industry-standard relational database and manages
+ common database administration tasks, freeing up developers to
+ focus on what makes their applications and businesses unique.
+
+ Amazon RDS gives you access to the capabilities of a familiar
+ MySQL or Oracle database server. This means the code,
+ applications, and tools you already use today with your existing
+ MySQL or Oracle databases work with Amazon RDS without
+ modification. Amazon RDS automatically backs up your database and
+ maintains the database software that powers your DB instance.
+ Amazon RDS is flexible: you can scale your database instance's
+ compute resources and storage capacity to meet your application's
+ demand. As with all Amazon Web Services, there are no up-front
+ investments, and you pay only for the resources you use.
+
+ This is the Amazon RDS API Reference . It contains a comprehensive
+ description of all Amazon RDS Query APIs and data types. Note that
+ this API is asynchronous and some actions may require polling to
+ determine when an action has been applied. See the parameter
+ description to determine if a change is applied immediately or on
+ the next instance reboot or during the maintenance window. For
+ more information on Amazon RDS concepts and usage scenarios, go to
+ the `Amazon RDS User Guide`_.
+ """
+ APIVersion = "2013-09-09"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "InvalidSubnet": exceptions.InvalidSubnet,
+ "DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded,
+ "DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists,
+ "DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded,
+ "InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded,
+ "InvalidRestore": exceptions.InvalidRestore,
+ "InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState,
+ "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
+ "DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists,
+ "InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity,
+ "ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded,
+ "DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound,
+ "DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists,
+ "ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound,
+ "DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs,
+ "InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState,
+ "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
+ "ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound,
+ "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
+ "SNSNoAuthorization": exceptions.SNSNoAuthorization,
+ "SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded,
+ "OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded,
+ "DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound,
+ "SNSInvalidTopic": exceptions.SNSInvalidTopic,
+ "InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState,
+ "DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound,
+ "InvalidOptionGroupState": exceptions.InvalidOptionGroupState,
+ "SourceNotFound": exceptions.SourceNotFound,
+ "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
+ "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
+ "DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported,
+ "InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState,
+ "InvalidDBSubnetState": exceptions.InvalidDBSubnetState,
+ "InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState,
+ "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
+ "DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded,
+ "ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ,
+ "AuthorizationNotFound": exceptions.AuthorizationNotFound,
+ "OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists,
+ "SubscriptionNotFound": exceptions.SubscriptionNotFound,
+ "DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure,
+ "PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled,
+ "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
+ "DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded,
+ "OptionGroupNotFound": exceptions.OptionGroupNotFound,
+ "DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists,
+ "DBInstanceNotFound": exceptions.DBInstanceNotFound,
+ "ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists,
+ "InvalidDBInstanceState": exceptions.InvalidDBInstanceState,
+ "DBSnapshotNotFound": exceptions.DBSnapshotNotFound,
+ "DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists,
+ "StorageQuotaExceeded": exceptions.StorageQuotaExceeded,
+ "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs:
+ kwargs['host'] = region.endpoint
+
+ super(RDSConnection, self).__init__(**kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def add_source_identifier_to_subscription(self, subscription_name,
+ source_identifier):
+ """
+ Adds a source identifier to an existing RDS event notification
+ subscription.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the RDS event notification
+ subscription you want to add a source identifier to.
+
+ :type source_identifier: string
+ :param source_identifier:
+ The identifier of the event source to be added. An identifier must
+ begin with a letter and must contain only ASCII letters, digits,
+ and hyphens; it cannot end with a hyphen or contain two consecutive
+ hyphens.
+
+ Constraints:
+
+
+ + If the source type is a DB instance, then a `DBInstanceIdentifier`
+ must be supplied.
+ + If the source type is a DB security group, a `DBSecurityGroupName`
+ must be supplied.
+ + If the source type is a DB parameter group, a `DBParameterGroupName`
+ must be supplied.
+ + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
+ supplied.
+
+ """
+ params = {
+ 'SubscriptionName': subscription_name,
+ 'SourceIdentifier': source_identifier,
+ }
+ return self._make_request(
+ action='AddSourceIdentifierToSubscription',
+ verb='POST',
+ path='/', params=params)
+
+ def add_tags_to_resource(self, resource_name, tags):
+ """
+ Adds metadata tags to an Amazon RDS resource. These tags can
+ also be used with cost allocation reporting to track cost
+ associated with Amazon RDS resources, or used in Condition
+ statement in IAM policy for Amazon RDS.
+
+ For an overview on tagging Amazon RDS resources, see `Tagging
+ Amazon RDS Resources`_.
+
+ :type resource_name: string
+ :param resource_name: The Amazon RDS resource the tags will be added
+ to. This value is an Amazon Resource Name (ARN). For information
+ about creating an ARN, see ` Constructing an RDS Amazon Resource
+ Name (ARN)`_.
+
+ :type tags: list
+ :param tags: The tags to be assigned to the Amazon RDS resource.
+
+ """
+ params = {'ResourceName': resource_name, }
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='AddTagsToResource',
+ verb='POST',
+ path='/', params=params)
+
+ def authorize_db_security_group_ingress(self, db_security_group_name,
+ cidrip=None,
+ ec2_security_group_name=None,
+ ec2_security_group_id=None,
+ ec2_security_group_owner_id=None):
+ """
+ Enables ingress to a DBSecurityGroup using one of two forms of
+ authorization. First, EC2 or VPC security groups can be added
+ to the DBSecurityGroup if the application using the database
+ is running on EC2 or VPC instances. Second, IP ranges are
+ available if the application accessing your database is
+ running on the Internet. Required parameters for this API are
+ one of CIDR range, EC2SecurityGroupId for VPC, or
+ (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or
+ EC2SecurityGroupId for non-VPC).
+ You cannot authorize ingress from an EC2 security group in one
+ Region to an Amazon RDS DB instance in another. You cannot
+ authorize ingress from a VPC security group in one VPC to an
+ Amazon RDS DB instance in another.
+ For an overview of CIDR ranges, go to the `Wikipedia
+ Tutorial`_.
+
+ :type db_security_group_name: string
+ :param db_security_group_name: The name of the DB security group to add
+ authorization to.
+
+ :type cidrip: string
+ :param cidrip: The IP range to authorize.
+
+ :type ec2_security_group_name: string
+ :param ec2_security_group_name: Name of the EC2 security group to
+ authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
+ provided. Otherwise, EC2SecurityGroupOwnerId and either
+ `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
+
+ :type ec2_security_group_id: string
+ :param ec2_security_group_id: Id of the EC2 security group to
+ authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
+ provided. Otherwise, EC2SecurityGroupOwnerId and either
+ `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
+
+ :type ec2_security_group_owner_id: string
+ :param ec2_security_group_owner_id: AWS Account Number of the owner of
+ the EC2 security group specified in the EC2SecurityGroupName
+ parameter. The AWS Access Key ID is not an acceptable value. For
+ VPC DB security groups, `EC2SecurityGroupId` must be provided.
+ Otherwise, EC2SecurityGroupOwnerId and either
+ `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
+
+ """
+ params = {'DBSecurityGroupName': db_security_group_name, }
+ if cidrip is not None:
+ params['CIDRIP'] = cidrip
+ if ec2_security_group_name is not None:
+ params['EC2SecurityGroupName'] = ec2_security_group_name
+ if ec2_security_group_id is not None:
+ params['EC2SecurityGroupId'] = ec2_security_group_id
+ if ec2_security_group_owner_id is not None:
+ params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
+ return self._make_request(
+ action='AuthorizeDBSecurityGroupIngress',
+ verb='POST',
+ path='/', params=params)
+
+ def copy_db_snapshot(self, source_db_snapshot_identifier,
+ target_db_snapshot_identifier, tags=None):
+ """
+ Copies the specified DBSnapshot. The source DBSnapshot must be
+ in the "available" state.
+
+ :type source_db_snapshot_identifier: string
+ :param source_db_snapshot_identifier: The identifier for the source DB
+ snapshot.
+ Constraints:
+
+
+ + Must be the identifier for a valid system snapshot in the "available"
+ state.
+
+
+ Example: `rds:mydb-2012-04-02-00-01`
+
+ :type target_db_snapshot_identifier: string
+ :param target_db_snapshot_identifier: The identifier for the copied
+ snapshot.
+ Constraints:
+
+
+ + Cannot be null, empty, or blank
+ + Must contain from 1 to 255 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+
+ Example: `my-db-snapshot`
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,
+ 'TargetDBSnapshotIdentifier': target_db_snapshot_identifier,
+ }
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CopyDBSnapshot',
+ verb='POST',
+ path='/', params=params)
+
+ def create_db_instance(self, db_instance_identifier, allocated_storage,
+ db_instance_class, engine, master_username,
+ master_user_password, db_name=None,
+ db_security_groups=None,
+ vpc_security_group_ids=None,
+ availability_zone=None, db_subnet_group_name=None,
+ preferred_maintenance_window=None,
+ db_parameter_group_name=None,
+ backup_retention_period=None,
+ preferred_backup_window=None, port=None,
+ multi_az=None, engine_version=None,
+ auto_minor_version_upgrade=None,
+ license_model=None, iops=None,
+ option_group_name=None, character_set_name=None,
+ publicly_accessible=None, tags=None):
+ """
+ Creates a new DB instance.
+
+ :type db_name: string
+ :param db_name: The meaning of this parameter differs according to the
+ database engine you use.
+ **MySQL**
+
+ The name of the database to create when the DB instance is created. If
+ this parameter is not specified, no database is created in the DB
+ instance.
+
+ Constraints:
+
+
+ + Must contain 1 to 64 alphanumeric characters
+ + Cannot be a word reserved by the specified database engine
+
+
+ Type: String
+
+ **Oracle**
+
+ The Oracle System ID (SID) of the created DB instance.
+
+ Default: `ORCL`
+
+ Constraints:
+
+
+ + Cannot be longer than 8 characters
+
+
+ **SQL Server**
+
+ Not applicable. Must be null.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier: The DB instance identifier. This
+ parameter is stored as a lowercase string.
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15
+ for SQL Server).
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+
+
+ Example: `mydbinstance`
+
+ :type allocated_storage: integer
+ :param allocated_storage: The amount of storage (in gigabytes) to be
+ initially allocated for the database instance.
+ **MySQL**
+
+ Constraints: Must be an integer from 5 to 1024.
+
+ Type: Integer
+
+ **Oracle**
+
+ Constraints: Must be an integer from 10 to 1024.
+
+ **SQL Server**
+
+ Constraints: Must be an integer from 200 to 1024 (Standard Edition and
+ Enterprise Edition) or from 30 to 1024 (Express Edition and Web
+ Edition)
+
+ :type db_instance_class: string
+ :param db_instance_class: The compute and memory capacity of the DB
+ instance.
+ Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
+ db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
+
+ :type engine: string
+ :param engine: The name of the database engine to be used for this
+ instance.
+ Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` |
+ `sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web`
+
+ :type master_username: string
+ :param master_username:
+ The name of master user for the client DB instance.
+
+ **MySQL**
+
+ Constraints:
+
+
+ + Must be 1 to 16 alphanumeric characters.
+ + First character must be a letter.
+ + Cannot be a reserved word for the chosen database engine.
+
+
+ Type: String
+
+ **Oracle**
+
+ Constraints:
+
+
+ + Must be 1 to 30 alphanumeric characters.
+ + First character must be a letter.
+ + Cannot be a reserved word for the chosen database engine.
+
+
+ **SQL Server**
+
+ Constraints:
+
+
+ + Must be 1 to 128 alphanumeric characters.
+ + First character must be a letter.
+ + Cannot be a reserved word for the chosen database engine.
+
+ :type master_user_password: string
+ :param master_user_password: The password for the master database user.
+ Can be any printable ASCII character except "/", '"', or "@".
+ Type: String
+
+ **MySQL**
+
+ Constraints: Must contain from 8 to 41 characters.
+
+ **Oracle**
+
+ Constraints: Must contain from 8 to 30 characters.
+
+ **SQL Server**
+
+ Constraints: Must contain from 8 to 128 characters.
+
+ :type db_security_groups: list
+ :param db_security_groups: A list of DB security groups to associate
+ with this DB instance.
+ Default: The default DB security group for the database engine.
+
+ :type vpc_security_group_ids: list
+ :param vpc_security_group_ids: A list of EC2 VPC security groups to
+ associate with this DB instance.
+ Default: The default EC2 VPC security group for the DB subnet group's
+ VPC.
+
+ :type availability_zone: string
+ :param availability_zone: The EC2 Availability Zone that the database
+ instance will be created in.
+ Default: A random, system-chosen Availability Zone in the endpoint's
+ region.
+
+ Example: `us-east-1d`
+
+ Constraint: The AvailabilityZone parameter cannot be specified if the
+ MultiAZ parameter is set to `True`. The specified Availability Zone
+ must be in the same region as the current endpoint.
+
+ :type db_subnet_group_name: string
+ :param db_subnet_group_name: A DB subnet group to associate with this
+ DB instance.
+ If there is no DB subnet group, then it is a non-VPC DB instance.
+
+ :type preferred_maintenance_window: string
+ :param preferred_maintenance_window: The weekly time range (in UTC)
+ during which system maintenance can occur.
+ Format: `ddd:hh24:mi-ddd:hh24:mi`
+
+ Default: A 30-minute window selected at random from an 8-hour block of
+ time per region, occurring on a random day of the week. To see the
+ time blocks available, see ` Adjusting the Preferred Maintenance
+ Window`_ in the Amazon RDS User Guide.
+
+ Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
+
+ Constraints: Minimum 30-minute window.
+
+ :type db_parameter_group_name: string
+ :param db_parameter_group_name:
+ The name of the DB parameter group to associate with this DB instance.
+ If this argument is omitted, the default DBParameterGroup for the
+ specified engine will be used.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type backup_retention_period: integer
+ :param backup_retention_period:
+ The number of days for which automated backups are retained. Setting
+ this parameter to a positive number enables backups. Setting this
+ parameter to 0 disables automated backups.
+
+ Default: 1
+
+ Constraints:
+
+
+ + Must be a value from 0 to 8
+ + Cannot be set to 0 if the DB instance is a master instance with read
+ replicas
+
+ :type preferred_backup_window: string
+ :param preferred_backup_window: The daily time range during which
+ automated backups are created if automated backups are enabled,
+ using the `BackupRetentionPeriod` parameter.
+ Default: A 30-minute window selected at random from an 8-hour block of
+ time per region. See the Amazon RDS User Guide for the time blocks
+ for each region from which the default backup windows are assigned.
+
+ Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
+ Universal Time Coordinated (UTC). Must not conflict with the
+ preferred maintenance window. Must be at least 30 minutes.
+
+ :type port: integer
+ :param port: The port number on which the database accepts connections.
+ **MySQL**
+
+ Default: `3306`
+
+ Valid Values: `1150-65535`
+
+ Type: Integer
+
+ **Oracle**
+
+ Default: `1521`
+
+ Valid Values: `1150-65535`
+
+ **SQL Server**
+
+ Default: `1433`
+
+ Valid Values: `1150-65535` except for `1434` and `3389`.
+
+ :type multi_az: boolean
+ :param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
+ You cannot set the AvailabilityZone parameter if the MultiAZ
+ parameter is set to true.
+
+ :type engine_version: string
+ :param engine_version: The version number of the database engine to
+ use.
+ **MySQL**
+
+ Example: `5.1.42`
+
+ Type: String
+
+ **Oracle**
+
+ Example: `11.2.0.2.v2`
+
+ Type: String
+
+ **SQL Server**
+
+ Example: `10.50.2789.0.v1`
+
+ :type auto_minor_version_upgrade: boolean
+ :param auto_minor_version_upgrade: Indicates that minor engine upgrades
+ will be applied automatically to the DB instance during the
+ maintenance window.
+ Default: `True`
+
+ :type license_model: string
+ :param license_model: License model information for this DB instance.
+ Valid values: `license-included` | `bring-your-own-license` | `general-
+ public-license`
+
+ :type iops: integer
+ :param iops: The amount of Provisioned IOPS (input/output operations
+ per second) to be initially allocated for the DB instance.
+ Constraints: Must be an integer greater than 1000.
+
+ :type option_group_name: string
+ :param option_group_name: Indicates that the DB instance should be
+ associated with the specified option group.
+ Permanent options, such as the TDE option for Oracle Advanced Security
+ TDE, cannot be removed from an option group, and that option group
+ cannot be removed from a DB instance once it is associated with a
+ DB instance
+
+ :type character_set_name: string
+ :param character_set_name: For supported engines, indicates that the DB
+ instance should be associated with the specified CharacterSet.
+
+ :type publicly_accessible: boolean
+ :param publicly_accessible: Specifies the accessibility options for the
+ DB instance. A value of true specifies an Internet-facing instance
+ with a publicly resolvable DNS name, which resolves to a public IP
+ address. A value of false specifies an internal instance with a DNS
+ name that resolves to a private IP address.
+ Default: The default behavior varies depending on whether a VPC has
+ been requested or not. The following list shows the default
+ behavior in each case.
+
+
+ + **Default VPC:**true
+ + **VPC:**false
+
+
+ If no DB subnet group has been specified as part of the request and the
+ PubliclyAccessible value has not been set, the DB instance will be
+ publicly accessible. If a specific DB subnet group has been
+ specified as part of the request and the PubliclyAccessible value
+ has not been set, the DB instance will be private.
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'DBInstanceIdentifier': db_instance_identifier,
+ 'AllocatedStorage': allocated_storage,
+ 'DBInstanceClass': db_instance_class,
+ 'Engine': engine,
+ 'MasterUsername': master_username,
+ 'MasterUserPassword': master_user_password,
+ }
+ if db_name is not None:
+ params['DBName'] = db_name
+ if db_security_groups is not None:
+ self.build_list_params(params,
+ db_security_groups,
+ 'DBSecurityGroups.member')
+ if vpc_security_group_ids is not None:
+ self.build_list_params(params,
+ vpc_security_group_ids,
+ 'VpcSecurityGroupIds.member')
+ if availability_zone is not None:
+ params['AvailabilityZone'] = availability_zone
+ if db_subnet_group_name is not None:
+ params['DBSubnetGroupName'] = db_subnet_group_name
+ if preferred_maintenance_window is not None:
+ params['PreferredMaintenanceWindow'] = preferred_maintenance_window
+ if db_parameter_group_name is not None:
+ params['DBParameterGroupName'] = db_parameter_group_name
+ if backup_retention_period is not None:
+ params['BackupRetentionPeriod'] = backup_retention_period
+ if preferred_backup_window is not None:
+ params['PreferredBackupWindow'] = preferred_backup_window
+ if port is not None:
+ params['Port'] = port
+ if multi_az is not None:
+ params['MultiAZ'] = str(
+ multi_az).lower()
+ if engine_version is not None:
+ params['EngineVersion'] = engine_version
+ if auto_minor_version_upgrade is not None:
+ params['AutoMinorVersionUpgrade'] = str(
+ auto_minor_version_upgrade).lower()
+ if license_model is not None:
+ params['LicenseModel'] = license_model
+ if iops is not None:
+ params['Iops'] = iops
+ if option_group_name is not None:
+ params['OptionGroupName'] = option_group_name
+ if character_set_name is not None:
+ params['CharacterSetName'] = character_set_name
+ if publicly_accessible is not None:
+ params['PubliclyAccessible'] = str(
+ publicly_accessible).lower()
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CreateDBInstance',
+ verb='POST',
+ path='/', params=params)
+
+ def create_db_instance_read_replica(self, db_instance_identifier,
+ source_db_instance_identifier,
+ db_instance_class=None,
+ availability_zone=None, port=None,
+ auto_minor_version_upgrade=None,
+ iops=None, option_group_name=None,
+ publicly_accessible=None, tags=None):
+ """
+ Creates a DB instance that acts as a read replica of a source
+ DB instance.
+
+ All read replica DB instances are created as Single-AZ
+ deployments with backups disabled. All other DB instance
+ attributes (including DB security groups and DB parameter
+ groups) are inherited from the source DB instance, except as
+ specified below.
+
+ The source DB instance must have backup retention enabled.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier: The DB instance identifier of the read
+ replica. This is the unique key that identifies a DB instance. This
+ parameter is stored as a lowercase string.
+
+ :type source_db_instance_identifier: string
+ :param source_db_instance_identifier: The identifier of the DB instance
+ that will act as the source for the read replica. Each DB instance
+ can have up to five read replicas.
+ Constraints: Must be the identifier of an existing DB instance that is
+ not already a read replica DB instance.
+
+ :type db_instance_class: string
+ :param db_instance_class: The compute and memory capacity of the read
+ replica.
+ Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge
+ | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
+
+ Default: Inherits from the source DB instance.
+
+ :type availability_zone: string
+ :param availability_zone: The Amazon EC2 Availability Zone that the
+ read replica will be created in.
+ Default: A random, system-chosen Availability Zone in the endpoint's
+ region.
+
+ Example: `us-east-1d`
+
+ :type port: integer
+ :param port: The port number that the DB instance uses for connections.
+ Default: Inherits from the source DB instance
+
+ Valid Values: `1150-65535`
+
+ :type auto_minor_version_upgrade: boolean
+ :param auto_minor_version_upgrade: Indicates that minor engine upgrades
+ will be applied automatically to the read replica during the
+ maintenance window.
+ Default: Inherits from the source DB instance
+
+ :type iops: integer
+ :param iops: The amount of Provisioned IOPS (input/output operations
+ per second) to be initially allocated for the DB instance.
+
+ :type option_group_name: string
+ :param option_group_name: The option group the DB instance will be
+ associated with. If omitted, the default option group for the
+ engine specified will be used.
+
+ :type publicly_accessible: boolean
+ :param publicly_accessible: Specifies the accessibility options for the
+ DB instance. A value of true specifies an Internet-facing instance
+ with a publicly resolvable DNS name, which resolves to a public IP
+ address. A value of false specifies an internal instance with a DNS
+ name that resolves to a private IP address.
+ Default: The default behavior varies depending on whether a VPC has
+ been requested or not. The following list shows the default
+ behavior in each case.
+
+
+ + **Default VPC:**true
+ + **VPC:**false
+
+
+ If no DB subnet group has been specified as part of the request and the
+ PubliclyAccessible value has not been set, the DB instance will be
+ publicly accessible. If a specific DB subnet group has been
+ specified as part of the request and the PubliclyAccessible value
+ has not been set, the DB instance will be private.
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'DBInstanceIdentifier': db_instance_identifier,
+ 'SourceDBInstanceIdentifier': source_db_instance_identifier,
+ }
+ if db_instance_class is not None:
+ params['DBInstanceClass'] = db_instance_class
+ if availability_zone is not None:
+ params['AvailabilityZone'] = availability_zone
+ if port is not None:
+ params['Port'] = port
+ if auto_minor_version_upgrade is not None:
+ params['AutoMinorVersionUpgrade'] = str(
+ auto_minor_version_upgrade).lower()
+ if iops is not None:
+ params['Iops'] = iops
+ if option_group_name is not None:
+ params['OptionGroupName'] = option_group_name
+ if publicly_accessible is not None:
+ params['PubliclyAccessible'] = str(
+ publicly_accessible).lower()
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CreateDBInstanceReadReplica',
+ verb='POST',
+ path='/', params=params)
+
+ def create_db_parameter_group(self, db_parameter_group_name,
+ db_parameter_group_family, description,
+ tags=None):
+ """
+ Creates a new DB parameter group.
+
+ A DB parameter group is initially created with the default
+ parameters for the database engine used by the DB instance. To
+ provide custom values for any of the parameters, you must
+ modify the group after creating it using
+ ModifyDBParameterGroup . Once you've created a DB parameter
+ group, you need to associate it with your DB instance using
+ ModifyDBInstance . When you associate a new DB parameter group
+ with a running DB instance, you need to reboot the DB Instance
+ for the new DB parameter group and associated settings to take
+ effect.
+
+ :type db_parameter_group_name: string
+ :param db_parameter_group_name:
+ The name of the DB parameter group.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+
+ This value is stored as a lower-case string.
+
+ :type db_parameter_group_family: string
+ :param db_parameter_group_family: The DB parameter group family name. A
+ DB parameter group can be associated with one and only one DB
+ parameter group family, and can be applied only to a DB instance
+ running a database engine and engine version compatible with that
+ DB parameter group family.
+
+ :type description: string
+ :param description: The description for the DB parameter group.
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'DBParameterGroupName': db_parameter_group_name,
+ 'DBParameterGroupFamily': db_parameter_group_family,
+ 'Description': description,
+ }
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CreateDBParameterGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def create_db_security_group(self, db_security_group_name,
+ db_security_group_description, tags=None):
+ """
+ Creates a new DB security group. DB security groups control
+ access to a DB instance.
+
+ :type db_security_group_name: string
+ :param db_security_group_name: The name for the DB security group. This
+ value is stored as a lowercase string.
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+ + Must not be "Default"
+ + May not contain spaces
+
+
+ Example: `mysecuritygroup`
+
+ :type db_security_group_description: string
+ :param db_security_group_description: The description for the DB
+ security group.
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'DBSecurityGroupName': db_security_group_name,
+ 'DBSecurityGroupDescription': db_security_group_description,
+ }
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CreateDBSecurityGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def create_db_snapshot(self, db_snapshot_identifier,
+ db_instance_identifier, tags=None):
+ """
+ Creates a DBSnapshot. The source DBInstance must be in
+ "available" state.
+
+ :type db_snapshot_identifier: string
+ :param db_snapshot_identifier: The identifier for the DB snapshot.
+ Constraints:
+
+
+ + Cannot be null, empty, or blank
+ + Must contain from 1 to 255 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+
+ Example: `my-snapshot-id`
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ The DB instance identifier. This is the unique key that identifies a DB
+ instance. This parameter isn't case sensitive.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'DBSnapshotIdentifier': db_snapshot_identifier,
+ 'DBInstanceIdentifier': db_instance_identifier,
+ }
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CreateDBSnapshot',
+ verb='POST',
+ path='/', params=params)
+
+ def create_db_subnet_group(self, db_subnet_group_name,
+ db_subnet_group_description, subnet_ids,
+ tags=None):
+ """
+ Creates a new DB subnet group. DB subnet groups must contain
+ at least one subnet in at least two AZs in the region.
+
+ :type db_subnet_group_name: string
+ :param db_subnet_group_name: The name for the DB subnet group. This
+ value is stored as a lowercase string.
+ Constraints: Must contain no more than 255 alphanumeric characters or
+ hyphens. Must not be "Default".
+
+ Example: `mySubnetgroup`
+
+ :type db_subnet_group_description: string
+ :param db_subnet_group_description: The description for the DB subnet
+ group.
+
+ :type subnet_ids: list
+ :param subnet_ids: The EC2 Subnet IDs for the DB subnet group.
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'DBSubnetGroupName': db_subnet_group_name,
+ 'DBSubnetGroupDescription': db_subnet_group_description,
+ }
+ self.build_list_params(params,
+ subnet_ids,
+ 'SubnetIds.member')
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CreateDBSubnetGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def create_event_subscription(self, subscription_name, sns_topic_arn,
+ source_type=None, event_categories=None,
+ source_ids=None, enabled=None, tags=None):
+ """
+ Creates an RDS event notification subscription. This action
+ requires a topic ARN (Amazon Resource Name) created by either
+ the RDS console, the SNS console, or the SNS API. To obtain an
+ ARN with SNS, you must create a topic in Amazon SNS and
+ subscribe to the topic. The ARN is displayed in the SNS
+ console.
+
+ You can specify the type of source (SourceType) you want to be
+ notified of, provide a list of RDS sources (SourceIds) that
+ triggers the events, and provide a list of event categories
+ (EventCategories) for events you want to be notified of. For
+ example, you can specify SourceType = db-instance, SourceIds =
+ mydbinstance1, mydbinstance2 and EventCategories =
+ Availability, Backup.
+
+ If you specify both the SourceType and SourceIds, such as
+ SourceType = db-instance and SourceIdentifier = myDBInstance1,
+ you will be notified of all the db-instance events for the
+ specified source. If you specify a SourceType but do not
+ specify a SourceIdentifier, you will receive notice of the
+ events for that source type for all your RDS sources. If you
+ do not specify either the SourceType nor the SourceIdentifier,
+ you will be notified of events generated from all RDS sources
+ belonging to your customer account.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the subscription.
+ Constraints: The name must be less than 255 characters.
+
+ :type sns_topic_arn: string
+ :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
+ created for event notification. The ARN is created by Amazon SNS
+ when you create a topic and subscribe to it.
+
+ :type source_type: string
+ :param source_type: The type of source that will be generating the
+ events. For example, if you want to be notified of events generated
+ by a DB instance, you would set this parameter to db-instance. if
+ this value is not specified, all events are returned.
+ Valid values: db-instance | db-parameter-group | db-security-group |
+ db-snapshot
+
+ :type event_categories: list
+ :param event_categories: A list of event categories for a SourceType
+ that you want to subscribe to. You can see a list of the categories
+ for a given SourceType in the `Events`_ topic in the Amazon RDS
+ User Guide or by using the **DescribeEventCategories** action.
+
+ :type source_ids: list
+ :param source_ids:
+ The list of identifiers of the event sources for which events will be
+ returned. If not specified, then all sources are included in the
+ response. An identifier must begin with a letter and must contain
+ only ASCII letters, digits, and hyphens; it cannot end with a
+ hyphen or contain two consecutive hyphens.
+
+ Constraints:
+
+
+ + If SourceIds are supplied, SourceType must also be provided.
+ + If the source type is a DB instance, then a `DBInstanceIdentifier`
+ must be supplied.
+ + If the source type is a DB security group, a `DBSecurityGroupName`
+ must be supplied.
+ + If the source type is a DB parameter group, a `DBParameterGroupName`
+ must be supplied.
+ + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
+ supplied.
+
+ :type enabled: boolean
+ :param enabled: A Boolean value; set to **true** to activate the
+ subscription, set to **false** to create the subscription but not
+ active it.
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'SubscriptionName': subscription_name,
+ 'SnsTopicArn': sns_topic_arn,
+ }
+ if source_type is not None:
+ params['SourceType'] = source_type
+ if event_categories is not None:
+ self.build_list_params(params,
+ event_categories,
+ 'EventCategories.member')
+ if source_ids is not None:
+ self.build_list_params(params,
+ source_ids,
+ 'SourceIds.member')
+ if enabled is not None:
+ params['Enabled'] = str(
+ enabled).lower()
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CreateEventSubscription',
+ verb='POST',
+ path='/', params=params)
+
+ def create_option_group(self, option_group_name, engine_name,
+ major_engine_version, option_group_description,
+ tags=None):
+ """
+ Creates a new option group. You can create up to 20 option
+ groups.
+
+ :type option_group_name: string
+ :param option_group_name: Specifies the name of the option group to be
+ created.
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+
+ Example: `myoptiongroup`
+
+ :type engine_name: string
+ :param engine_name: Specifies the name of the engine that this option
+ group should be associated with.
+
+ :type major_engine_version: string
+ :param major_engine_version: Specifies the major version of the engine
+ that this option group should be associated with.
+
+ :type option_group_description: string
+ :param option_group_description: The description of the option group.
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'OptionGroupName': option_group_name,
+ 'EngineName': engine_name,
+ 'MajorEngineVersion': major_engine_version,
+ 'OptionGroupDescription': option_group_description,
+ }
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='CreateOptionGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_db_instance(self, db_instance_identifier,
+ skip_final_snapshot=None,
+ final_db_snapshot_identifier=None):
+ """
+ The DeleteDBInstance action deletes a previously provisioned
+ DB instance. A successful response from the web service
+ indicates the request was received correctly. When you delete
+ a DB instance, all automated backups for that instance are
+ deleted and cannot be recovered. Manual DB snapshots of the DB
+ instance to be deleted are not deleted.
+
+ If a final DB snapshot is requested the status of the RDS
+ instance will be "deleting" until the DB snapshot is created.
+ The API action `DescribeDBInstance` is used to monitor the
+ status of this operation. The action cannot be canceled or
+ reverted once submitted.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ The DB instance identifier for the DB instance to be deleted. This
+ parameter isn't case sensitive.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type skip_final_snapshot: boolean
+ :param skip_final_snapshot: Determines whether a final DB snapshot is
+ created before the DB instance is deleted. If `True` is specified,
+ no DBSnapshot is created. If false is specified, a DB snapshot is
+ created before the DB instance is deleted.
+ The FinalDBSnapshotIdentifier parameter must be specified if
+ SkipFinalSnapshot is `False`.
+
+ Default: `False`
+
+ :type final_db_snapshot_identifier: string
+ :param final_db_snapshot_identifier:
+ The DBSnapshotIdentifier of the new DBSnapshot created when
+ SkipFinalSnapshot is set to `False`.
+
+ Specifying this parameter and also setting the SkipFinalShapshot
+ parameter to true results in an error.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ """
+ params = {'DBInstanceIdentifier': db_instance_identifier, }
+ if skip_final_snapshot is not None:
+ params['SkipFinalSnapshot'] = str(
+ skip_final_snapshot).lower()
+ if final_db_snapshot_identifier is not None:
+ params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier
+ return self._make_request(
+ action='DeleteDBInstance',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_db_parameter_group(self, db_parameter_group_name):
+ """
+ Deletes a specified DBParameterGroup. The DBParameterGroup
+ cannot be associated with any RDS instances to be deleted.
+ The specified DB parameter group cannot be associated with any
+ DB instances.
+
+ :type db_parameter_group_name: string
+ :param db_parameter_group_name:
+ The name of the DB parameter group.
+
+ Constraints:
+
+
+ + Must be the name of an existing DB parameter group
+ + You cannot delete a default DB parameter group
+ + Cannot be associated with any DB instances
+
+ """
+ params = {'DBParameterGroupName': db_parameter_group_name, }
+ return self._make_request(
+ action='DeleteDBParameterGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_db_security_group(self, db_security_group_name):
+ """
+ Deletes a DB security group.
+ The specified DB security group must not be associated with
+ any DB instances.
+
+ :type db_security_group_name: string
+ :param db_security_group_name:
+ The name of the DB security group to delete.
+
+ You cannot delete the default DB security group.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+ + Must not be "Default"
+ + May not contain spaces
+
+ """
+ params = {'DBSecurityGroupName': db_security_group_name, }
+ return self._make_request(
+ action='DeleteDBSecurityGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_db_snapshot(self, db_snapshot_identifier):
+ """
+ Deletes a DBSnapshot.
+ The DBSnapshot must be in the `available` state to be deleted.
+
+ :type db_snapshot_identifier: string
+ :param db_snapshot_identifier: The DBSnapshot identifier.
+ Constraints: Must be the name of an existing DB snapshot in the
+ `available` state.
+
+ """
+ params = {'DBSnapshotIdentifier': db_snapshot_identifier, }
+ return self._make_request(
+ action='DeleteDBSnapshot',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_db_subnet_group(self, db_subnet_group_name):
+ """
+ Deletes a DB subnet group.
+ The specified database subnet group must not be associated
+ with any DB instances.
+
+ :type db_subnet_group_name: string
+ :param db_subnet_group_name:
+ The name of the database subnet group to delete.
+
+ You cannot delete the default subnet group.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ """
+ params = {'DBSubnetGroupName': db_subnet_group_name, }
+ return self._make_request(
+ action='DeleteDBSubnetGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_event_subscription(self, subscription_name):
+ """
+ Deletes an RDS event notification subscription.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the RDS event notification
+ subscription you want to delete.
+
+ """
+ params = {'SubscriptionName': subscription_name, }
+ return self._make_request(
+ action='DeleteEventSubscription',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_option_group(self, option_group_name):
+ """
+ Deletes an existing option group.
+
+ :type option_group_name: string
+ :param option_group_name:
+ The name of the option group to be deleted.
+
+ You cannot delete default option groups.
+
+ """
+ params = {'OptionGroupName': option_group_name, }
+ return self._make_request(
+ action='DeleteOptionGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_db_engine_versions(self, engine=None, engine_version=None,
+ db_parameter_group_family=None,
+ max_records=None, marker=None,
+ default_only=None,
+ list_supported_character_sets=None):
+ """
+ Returns a list of the available DB engines.
+
+ :type engine: string
+ :param engine: The database engine to return.
+
+ :type engine_version: string
+ :param engine_version: The database engine version to return.
+ Example: `5.1.49`
+
+ :type db_parameter_group_family: string
+ :param db_parameter_group_family:
+ The name of a specific DB parameter group family to return details for.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more than the `MaxRecords` value is available, a
+ pagination token called a marker is included in the response so
+ that the following results can be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ request. If this parameter is specified, the response includes only
+ records beyond the marker, up to the value specified by
+ `MaxRecords`.
+
+ :type default_only: boolean
+ :param default_only: Indicates that only the default version of the
+ specified engine or engine and major version combination is
+ returned.
+
+ :type list_supported_character_sets: boolean
+ :param list_supported_character_sets: If this parameter is specified,
+ and if the requested engine supports the CharacterSetName parameter
+ for CreateDBInstance, the response includes a list of supported
+ character sets for each engine version.
+
+ """
+ params = {}
+ if engine is not None:
+ params['Engine'] = engine
+ if engine_version is not None:
+ params['EngineVersion'] = engine_version
+ if db_parameter_group_family is not None:
+ params['DBParameterGroupFamily'] = db_parameter_group_family
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ if default_only is not None:
+ params['DefaultOnly'] = str(
+ default_only).lower()
+ if list_supported_character_sets is not None:
+ params['ListSupportedCharacterSets'] = str(
+ list_supported_character_sets).lower()
+ return self._make_request(
+ action='DescribeDBEngineVersions',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_db_instances(self, db_instance_identifier=None,
+ filters=None, max_records=None, marker=None):
+ """
+ Returns information about provisioned RDS instances. This API
+ supports pagination.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ The user-supplied instance identifier. If this parameter is specified,
+ information from only the specific DB instance is returned. This
+ parameter isn't case sensitive.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type filters: list
+ :param filters:
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results may be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ DescribeDBInstances request. If this parameter is specified, the
+ response includes only records beyond the marker, up to the value
+ specified by `MaxRecords` .
+
+ """
+ params = {}
+ if db_instance_identifier is not None:
+ params['DBInstanceIdentifier'] = db_instance_identifier
+ if filters is not None:
+ self.build_complex_list_params(
+ params, filters,
+ 'Filters.member',
+ ('FilterName', 'FilterValue'))
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeDBInstances',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_db_log_files(self, db_instance_identifier,
+ filename_contains=None, file_last_written=None,
+ file_size=None, max_records=None, marker=None):
+ """
+ Returns a list of DB log files for the DB instance.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ The customer-assigned name of the DB instance that contains the log
+ files you want to list.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type filename_contains: string
+ :param filename_contains: Filters the available log files for log file
+ names that contain the specified string.
+
+ :type file_last_written: long
+ :param file_last_written: Filters the available log files for files
+ written since the specified date, in POSIX timestamp format.
+
+ :type file_size: long
+ :param file_size: Filters the available log files for files larger than
+ the specified size.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified MaxRecords
+ value, a pagination token called a marker is included in the
+ response so that the remaining results can be retrieved.
+
+ :type marker: string
+ :param marker: The pagination token provided in the previous request.
+ If this parameter is specified the response includes only records
+ beyond the marker, up to MaxRecords.
+
+ """
+ params = {'DBInstanceIdentifier': db_instance_identifier, }
+ if filename_contains is not None:
+ params['FilenameContains'] = filename_contains
+ if file_last_written is not None:
+ params['FileLastWritten'] = file_last_written
+ if file_size is not None:
+ params['FileSize'] = file_size
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeDBLogFiles',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_db_parameter_groups(self, db_parameter_group_name=None,
+ filters=None, max_records=None,
+ marker=None):
+ """
+ Returns a list of `DBParameterGroup` descriptions. If a
+ `DBParameterGroupName` is specified, the list will contain
+ only the description of the specified DB parameter group.
+
+ :type db_parameter_group_name: string
+ :param db_parameter_group_name:
+ The name of a specific DB parameter group to return details for.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type filters: list
+ :param filters:
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results may be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ `DescribeDBParameterGroups` request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by `MaxRecords`.
+
+ """
+ params = {}
+ if db_parameter_group_name is not None:
+ params['DBParameterGroupName'] = db_parameter_group_name
+ if filters is not None:
+ self.build_complex_list_params(
+ params, filters,
+ 'Filters.member',
+ ('FilterName', 'FilterValue'))
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeDBParameterGroups',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_db_parameters(self, db_parameter_group_name, source=None,
+ max_records=None, marker=None):
+ """
+ Returns the detailed parameter list for a particular DB
+ parameter group.
+
+ :type db_parameter_group_name: string
+ :param db_parameter_group_name:
+ The name of a specific DB parameter group to return details for.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type source: string
+ :param source: The parameter types to return.
+ Default: All parameter types returned
+
+ Valid Values: `user | system | engine-default`
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results may be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ `DescribeDBParameters` request. If this parameter is specified, the
+ response includes only records beyond the marker, up to the value
+ specified by `MaxRecords`.
+
+ """
+ params = {'DBParameterGroupName': db_parameter_group_name, }
+ if source is not None:
+ params['Source'] = source
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeDBParameters',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_db_security_groups(self, db_security_group_name=None,
+ filters=None, max_records=None,
+ marker=None):
+ """
+ Returns a list of `DBSecurityGroup` descriptions. If a
+ `DBSecurityGroupName` is specified, the list will contain only
+ the descriptions of the specified DB security group.
+
+ :type db_security_group_name: string
+ :param db_security_group_name: The name of the DB security group to
+ return details for.
+
+ :type filters: list
+ :param filters:
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results may be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ DescribeDBSecurityGroups request. If this parameter is specified,
+ the response includes only records beyond the marker, up to the
+ value specified by `MaxRecords`.
+
+ """
+ params = {}
+ if db_security_group_name is not None:
+ params['DBSecurityGroupName'] = db_security_group_name
+ if filters is not None:
+ self.build_complex_list_params(
+ params, filters,
+ 'Filters.member',
+ ('FilterName', 'FilterValue'))
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeDBSecurityGroups',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_db_snapshots(self, db_instance_identifier=None,
+ db_snapshot_identifier=None,
+ snapshot_type=None, filters=None,
+ max_records=None, marker=None):
+ """
+ Returns information about DB snapshots. This API supports
+ pagination.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ A DB instance identifier to retrieve the list of DB snapshots for.
+ Cannot be used in conjunction with `DBSnapshotIdentifier`. This
+ parameter is not case sensitive.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type db_snapshot_identifier: string
+ :param db_snapshot_identifier:
+ A specific DB snapshot identifier to describe. Cannot be used in
+ conjunction with `DBInstanceIdentifier`. This value is stored as a
+ lowercase string.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+ + If this is the identifier of an automated snapshot, the
+ `SnapshotType` parameter must also be specified.
+
+ :type snapshot_type: string
+ :param snapshot_type: The type of snapshots that will be returned.
+ Values can be "automated" or "manual." If not specified, the
+ returned results will include all snapshots types.
+
+ :type filters: list
+ :param filters:
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results may be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ `DescribeDBSnapshots` request. If this parameter is specified, the
+ response includes only records beyond the marker, up to the value
+ specified by `MaxRecords`.
+
+ """
+ params = {}
+ if db_instance_identifier is not None:
+ params['DBInstanceIdentifier'] = db_instance_identifier
+ if db_snapshot_identifier is not None:
+ params['DBSnapshotIdentifier'] = db_snapshot_identifier
+ if snapshot_type is not None:
+ params['SnapshotType'] = snapshot_type
+ if filters is not None:
+ self.build_complex_list_params(
+ params, filters,
+ 'Filters.member',
+ ('FilterName', 'FilterValue'))
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeDBSnapshots',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_db_subnet_groups(self, db_subnet_group_name=None,
+ filters=None, max_records=None,
+ marker=None):
+ """
+ Returns a list of DBSubnetGroup descriptions. If a
+ DBSubnetGroupName is specified, the list will contain only the
+ descriptions of the specified DBSubnetGroup.
+
+ For an overview of CIDR ranges, go to the `Wikipedia
+ Tutorial`_.
+
+ :type db_subnet_group_name: string
+ :param db_subnet_group_name: The name of the DB subnet group to return
+ details for.
+
+ :type filters: list
+ :param filters:
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results may be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ DescribeDBSubnetGroups request. If this parameter is specified, the
+ response includes only records beyond the marker, up to the value
+ specified by `MaxRecords`.
+
+ """
+ params = {}
+ if db_subnet_group_name is not None:
+ params['DBSubnetGroupName'] = db_subnet_group_name
+ if filters is not None:
+ self.build_complex_list_params(
+ params, filters,
+ 'Filters.member',
+ ('FilterName', 'FilterValue'))
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeDBSubnetGroups',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_engine_default_parameters(self, db_parameter_group_family,
+ max_records=None, marker=None):
+ """
+ Returns the default engine and system parameter information
+ for the specified database engine.
+
+ :type db_parameter_group_family: string
+ :param db_parameter_group_family: The name of the DB parameter group
+ family.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results may be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ `DescribeEngineDefaultParameters` request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by `MaxRecords`.
+
+ """
+ params = {
+ 'DBParameterGroupFamily': db_parameter_group_family,
+ }
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeEngineDefaultParameters',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_event_categories(self, source_type=None):
+ """
+ Displays a list of categories for all event source types, or,
+ if specified, for a specified source type. You can see a list
+ of the event categories and source types in the ` Events`_
+ topic in the Amazon RDS User Guide.
+
+ :type source_type: string
+ :param source_type: The type of source that will be generating the
+ events.
+ Valid values: db-instance | db-parameter-group | db-security-group |
+ db-snapshot
+
+ """
+ params = {}
+ if source_type is not None:
+ params['SourceType'] = source_type
+ return self._make_request(
+ action='DescribeEventCategories',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_event_subscriptions(self, subscription_name=None,
+ filters=None, max_records=None,
+ marker=None):
+ """
+ Lists all the subscription descriptions for a customer
+ account. The description for a subscription includes
+ SubscriptionName, SNSTopicARN, CustomerID, SourceType,
+ SourceID, CreationTime, and Status.
+
+ If you specify a SubscriptionName, lists the description for
+ that subscription.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the RDS event notification
+ subscription you want to describe.
+
+ :type filters: list
+ :param filters:
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results can be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ DescribeOrderableDBInstanceOptions request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by `MaxRecords` .
+
+ """
+ params = {}
+ if subscription_name is not None:
+ params['SubscriptionName'] = subscription_name
+ if filters is not None:
+ self.build_complex_list_params(
+ params, filters,
+ 'Filters.member',
+ ('FilterName', 'FilterValue'))
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeEventSubscriptions',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_events(self, source_identifier=None, source_type=None,
+ start_time=None, end_time=None, duration=None,
+ event_categories=None, max_records=None, marker=None):
+ """
+ Returns events related to DB instances, DB security groups, DB
+ snapshots, and DB parameter groups for the past 14 days.
+ Events specific to a particular DB instance, DB security
+ group, database snapshot, or DB parameter group can be
+ obtained by providing the name as a parameter. By default, the
+ past hour of events are returned.
+
+ :type source_identifier: string
+ :param source_identifier:
+ The identifier of the event source for which events will be returned.
+ If not specified, then all sources are included in the response.
+
+ Constraints:
+
+
+ + If SourceIdentifier is supplied, SourceType must also be provided.
+ + If the source type is `DBInstance`, then a `DBInstanceIdentifier`
+ must be supplied.
+ + If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must
+ be supplied.
+ + If the source type is `DBParameterGroup`, a `DBParameterGroupName`
+ must be supplied.
+ + If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be
+ supplied.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+
+ :type source_type: string
+ :param source_type: The event source to retrieve events for. If no
+ value is specified, all events are returned.
+
+ :type start_time: timestamp
+ :param start_time: The beginning of the time interval to retrieve
+ events for, specified in ISO 8601 format. For more information
+ about ISO 8601, go to the `ISO8601 Wikipedia page.`_
+ Example: 2009-07-08T18:00Z
+
+ :type end_time: timestamp
+ :param end_time: The end of the time interval for which to retrieve
+ events, specified in ISO 8601 format. For more information about
+ ISO 8601, go to the `ISO8601 Wikipedia page.`_
+ Example: 2009-07-08T18:00Z
+
+ :type duration: integer
+ :param duration: The number of minutes to retrieve events for.
+ Default: 60
+
+ :type event_categories: list
+ :param event_categories: A list of event categories that trigger
+ notifications for a event notification subscription.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results may be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ DescribeEvents request. If this parameter is specified, the
+ response includes only records beyond the marker, up to the value
+ specified by `MaxRecords`.
+
+ """
+ params = {}
+ if source_identifier is not None:
+ params['SourceIdentifier'] = source_identifier
+ if source_type is not None:
+ params['SourceType'] = source_type
+ if start_time is not None:
+ params['StartTime'] = start_time
+ if end_time is not None:
+ params['EndTime'] = end_time
+ if duration is not None:
+ params['Duration'] = duration
+ if event_categories is not None:
+ self.build_list_params(params,
+ event_categories,
+ 'EventCategories.member')
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeEvents',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_option_group_options(self, engine_name,
+ major_engine_version=None,
+ max_records=None, marker=None):
+ """
+ Describes all available options.
+
+ :type engine_name: string
+ :param engine_name: A required parameter. Options available for the
+ given Engine name will be described.
+
+ :type major_engine_version: string
+ :param major_engine_version: If specified, filters the results to
+ include only options for the specified major engine version.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results can be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ request. If this parameter is specified, the response includes only
+ records beyond the marker, up to the value specified by
+ `MaxRecords`.
+
+ """
+ params = {'EngineName': engine_name, }
+ if major_engine_version is not None:
+ params['MajorEngineVersion'] = major_engine_version
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeOptionGroupOptions',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_option_groups(self, option_group_name=None, filters=None,
+ marker=None, max_records=None,
+ engine_name=None, major_engine_version=None):
+ """
+ Describes the available option groups.
+
+ :type option_group_name: string
+ :param option_group_name: The name of the option group to describe.
+ Cannot be supplied together with EngineName or MajorEngineVersion.
+
+ :type filters: list
+ :param filters:
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ DescribeOptionGroups request. If this parameter is specified, the
+ response includes only records beyond the marker, up to the value
+ specified by `MaxRecords`.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results can be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type engine_name: string
+ :param engine_name: Filters the list of option groups to only include
+ groups associated with a specific database engine.
+
+ :type major_engine_version: string
+ :param major_engine_version: Filters the list of option groups to only
+ include groups associated with a specific database engine version.
+ If specified, then EngineName must also be specified.
+
+ """
+ params = {}
+ if option_group_name is not None:
+ params['OptionGroupName'] = option_group_name
+ if filters is not None:
+ self.build_complex_list_params(
+ params, filters,
+ 'Filters.member',
+ ('FilterName', 'FilterValue'))
+ if marker is not None:
+ params['Marker'] = marker
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if engine_name is not None:
+ params['EngineName'] = engine_name
+ if major_engine_version is not None:
+ params['MajorEngineVersion'] = major_engine_version
+ return self._make_request(
+ action='DescribeOptionGroups',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_orderable_db_instance_options(self, engine,
+ engine_version=None,
+ db_instance_class=None,
+ license_model=None, vpc=None,
+ max_records=None, marker=None):
+ """
+ Returns a list of orderable DB instance options for the
+ specified engine.
+
+ :type engine: string
+ :param engine: The name of the engine to retrieve DB instance options
+ for.
+
+ :type engine_version: string
+ :param engine_version: The engine version filter value. Specify this
+ parameter to show only the available offerings matching the
+ specified engine version.
+
+ :type db_instance_class: string
+ :param db_instance_class: The DB instance class filter value. Specify
+ this parameter to show only the available offerings matching the
+ specified DB instance class.
+
+ :type license_model: string
+ :param license_model: The license model filter value. Specify this
+ parameter to show only the available offerings matching the
+ specified license model.
+
+ :type vpc: boolean
+ :param vpc: The VPC filter value. Specify this parameter to show only
+ the available VPC or non-VPC offerings.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a pagination token called a marker is included in the
+ response so that the remaining results can be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ DescribeOrderableDBInstanceOptions request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by `MaxRecords` .
+
+ """
+ params = {'Engine': engine, }
+ if engine_version is not None:
+ params['EngineVersion'] = engine_version
+ if db_instance_class is not None:
+ params['DBInstanceClass'] = db_instance_class
+ if license_model is not None:
+ params['LicenseModel'] = license_model
+ if vpc is not None:
+ params['Vpc'] = str(
+ vpc).lower()
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeOrderableDBInstanceOptions',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_reserved_db_instances(self, reserved_db_instance_id=None,
+ reserved_db_instances_offering_id=None,
+ db_instance_class=None, duration=None,
+ product_description=None,
+ offering_type=None, multi_az=None,
+ filters=None, max_records=None,
+ marker=None):
+ """
+ Returns information about reserved DB instances for this
+ account, or about a specified reserved DB instance.
+
+ :type reserved_db_instance_id: string
+ :param reserved_db_instance_id: The reserved DB instance identifier
+ filter value. Specify this parameter to show only the reservation
+ that matches the specified reservation ID.
+
+ :type reserved_db_instances_offering_id: string
+ :param reserved_db_instances_offering_id: The offering identifier
+ filter value. Specify this parameter to show only purchased
+ reservations matching the specified offering identifier.
+
+ :type db_instance_class: string
+ :param db_instance_class: The DB instance class filter value. Specify
+ this parameter to show only those reservations matching the
+ specified DB instances class.
+
+ :type duration: string
+ :param duration: The duration filter value, specified in years or
+ seconds. Specify this parameter to show only reservations for this
+ duration.
+ Valid Values: `1 | 3 | 31536000 | 94608000`
+
+ :type product_description: string
+ :param product_description: The product description filter value.
+ Specify this parameter to show only those reservations matching the
+ specified product description.
+
+ :type offering_type: string
+ :param offering_type: The offering type filter value. Specify this
+ parameter to show only the available offerings matching the
+ specified offering type.
+ Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
+ Utilization" `
+
+ :type multi_az: boolean
+ :param multi_az: The Multi-AZ filter value. Specify this parameter to
+ show only those reservations matching the specified Multi-AZ
+ parameter.
+
+ :type filters: list
+ :param filters:
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more than the `MaxRecords` value is available, a
+ pagination token called a marker is included in the response so
+ that the following results can be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ request. If this parameter is specified, the response includes only
+ records beyond the marker, up to the value specified by
+ `MaxRecords`.
+
+ """
+ params = {}
+ if reserved_db_instance_id is not None:
+ params['ReservedDBInstanceId'] = reserved_db_instance_id
+ if reserved_db_instances_offering_id is not None:
+ params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
+ if db_instance_class is not None:
+ params['DBInstanceClass'] = db_instance_class
+ if duration is not None:
+ params['Duration'] = duration
+ if product_description is not None:
+ params['ProductDescription'] = product_description
+ if offering_type is not None:
+ params['OfferingType'] = offering_type
+ if multi_az is not None:
+ params['MultiAZ'] = str(
+ multi_az).lower()
+ if filters is not None:
+ self.build_complex_list_params(
+ params, filters,
+ 'Filters.member',
+ ('FilterName', 'FilterValue'))
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeReservedDBInstances',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_reserved_db_instances_offerings(self,
+ reserved_db_instances_offering_id=None,
+ db_instance_class=None,
+ duration=None,
+ product_description=None,
+ offering_type=None,
+ multi_az=None,
+ max_records=None,
+ marker=None):
+ """
+ Lists available reserved DB instance offerings.
+
+ :type reserved_db_instances_offering_id: string
+ :param reserved_db_instances_offering_id: The offering identifier
+ filter value. Specify this parameter to show only the available
+ offering that matches the specified reservation identifier.
+ Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
+
+ :type db_instance_class: string
+ :param db_instance_class: The DB instance class filter value. Specify
+ this parameter to show only the available offerings matching the
+ specified DB instance class.
+
+ :type duration: string
+ :param duration: Duration filter value, specified in years or seconds.
+ Specify this parameter to show only reservations for this duration.
+ Valid Values: `1 | 3 | 31536000 | 94608000`
+
+ :type product_description: string
+ :param product_description: Product description filter value. Specify
+ this parameter to show only the available offerings matching the
+ specified product description.
+
+ :type offering_type: string
+ :param offering_type: The offering type filter value. Specify this
+ parameter to show only the available offerings matching the
+ specified offering type.
+ Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
+ Utilization" `
+
+ :type multi_az: boolean
+ :param multi_az: The Multi-AZ filter value. Specify this parameter to
+ show only the available offerings matching the specified Multi-AZ
+ parameter.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more than the `MaxRecords` value is available, a
+ pagination token called a marker is included in the response so
+ that the following results can be retrieved.
+ Default: 100
+
+ Constraints: minimum 20, maximum 100
+
+ :type marker: string
+ :param marker: An optional pagination token provided by a previous
+ request. If this parameter is specified, the response includes only
+ records beyond the marker, up to the value specified by
+ `MaxRecords`.
+
+ """
+ params = {}
+ if reserved_db_instances_offering_id is not None:
+ params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
+ if db_instance_class is not None:
+ params['DBInstanceClass'] = db_instance_class
+ if duration is not None:
+ params['Duration'] = duration
+ if product_description is not None:
+ params['ProductDescription'] = product_description
+ if offering_type is not None:
+ params['OfferingType'] = offering_type
+ if multi_az is not None:
+ params['MultiAZ'] = str(
+ multi_az).lower()
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeReservedDBInstancesOfferings',
+ verb='POST',
+ path='/', params=params)
+
+ def download_db_log_file_portion(self, db_instance_identifier,
+ log_file_name, marker=None,
+ number_of_lines=None):
+ """
+ Downloads the last line of the specified log file.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ The customer-assigned name of the DB instance that contains the log
+ files you want to list.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type log_file_name: string
+ :param log_file_name: The name of the log file to be downloaded.
+
+ :type marker: string
+ :param marker: The pagination token provided in the previous request.
+ If this parameter is specified the response includes only records
+ beyond the marker, up to MaxRecords.
+
+ :type number_of_lines: integer
+ :param number_of_lines: The number of lines remaining to be downloaded.
+
+ """
+ params = {
+ 'DBInstanceIdentifier': db_instance_identifier,
+ 'LogFileName': log_file_name,
+ }
+ if marker is not None:
+ params['Marker'] = marker
+ if number_of_lines is not None:
+ params['NumberOfLines'] = number_of_lines
+ return self._make_request(
+ action='DownloadDBLogFilePortion',
+ verb='POST',
+ path='/', params=params)
+
+ def list_tags_for_resource(self, resource_name):
+ """
+ Lists all tags on an Amazon RDS resource.
+
+ For an overview on tagging an Amazon RDS resource, see
+ `Tagging Amazon RDS Resources`_.
+
+ :type resource_name: string
+ :param resource_name: The Amazon RDS resource with tags to be listed.
+ This value is an Amazon Resource Name (ARN). For information about
+ creating an ARN, see ` Constructing an RDS Amazon Resource Name
+ (ARN)`_.
+
+ """
+ params = {'ResourceName': resource_name, }
+ return self._make_request(
+ action='ListTagsForResource',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_db_instance(self, db_instance_identifier,
+ allocated_storage=None, db_instance_class=None,
+ db_security_groups=None,
+ vpc_security_group_ids=None,
+ apply_immediately=None, master_user_password=None,
+ db_parameter_group_name=None,
+ backup_retention_period=None,
+ preferred_backup_window=None,
+ preferred_maintenance_window=None, multi_az=None,
+ engine_version=None,
+ allow_major_version_upgrade=None,
+ auto_minor_version_upgrade=None, iops=None,
+ option_group_name=None,
+ new_db_instance_identifier=None):
+ """
+ Modify settings for a DB instance. You can change one or more
+ database configuration parameters by specifying these
+ parameters and the new values in the request.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ The DB instance identifier. This value is stored as a lowercase string.
+
+ Constraints:
+
+
+ + Must be the identifier for an existing DB instance
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type allocated_storage: integer
+ :param allocated_storage: The new storage capacity of the RDS instance.
+ Changing this parameter does not result in an outage and the change
+ is applied during the next maintenance window unless the
+ `ApplyImmediately` parameter is set to `True` for this request.
+ **MySQL**
+
+ Default: Uses existing setting
+
+ Valid Values: 5-1024
+
+ Constraints: Value supplied must be at least 10% greater than the
+ current value. Values that are not at least 10% greater than the
+ existing value are rounded up so that they are 10% greater than the
+ current value.
+
+ Type: Integer
+
+ **Oracle**
+
+ Default: Uses existing setting
+
+ Valid Values: 10-1024
+
+ Constraints: Value supplied must be at least 10% greater than the
+ current value. Values that are not at least 10% greater than the
+ existing value are rounded up so that they are 10% greater than the
+ current value.
+
+ **SQL Server**
+
+ Cannot be modified.
+
+ If you choose to migrate your DB instance from using standard storage
+ to using Provisioned IOPS, or from using Provisioned IOPS to using
+ standard storage, the process can take time. The duration of the
+ migration depends on several factors such as database load, storage
+ size, storage type (standard or Provisioned IOPS), amount of IOPS
+ provisioned (if any), and the number of prior scale storage
+ operations. Typical migration times are under 24 hours, but the
+ process can take up to several days in some cases. During the
+ migration, the DB instance will be available for use, but may
+ experience performance degradation. While the migration takes
+ place, nightly backups for the instance will be suspended. No other
+ Amazon RDS operations can take place for the instance, including
+ modifying the instance, rebooting the instance, deleting the
+ instance, creating a read replica for the instance, and creating a
+ DB snapshot of the instance.
+
+ :type db_instance_class: string
+ :param db_instance_class: The new compute and memory capacity of the DB
+ instance. To determine the instance classes that are available for
+ a particular DB engine, use the DescribeOrderableDBInstanceOptions
+ action.
+ Passing a value for this parameter causes an outage during the change
+ and is applied during the next maintenance window, unless the
+ `ApplyImmediately` parameter is specified as `True` for this
+ request.
+
+ Default: Uses existing setting
+
+ Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
+ db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge`
+
+ :type db_security_groups: list
+ :param db_security_groups:
+ A list of DB security groups to authorize on this DB instance. Changing
+ this parameter does not result in an outage and the change is
+ asynchronously applied as soon as possible.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type vpc_security_group_ids: list
+ :param vpc_security_group_ids:
+ A list of EC2 VPC security groups to authorize on this DB instance.
+ This change is asynchronously applied as soon as possible.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type apply_immediately: boolean
+ :param apply_immediately: Specifies whether or not the modifications in
+ this request and any pending modifications are asynchronously
+ applied as soon as possible, regardless of the
+ `PreferredMaintenanceWindow` setting for the DB instance.
+ If this parameter is passed as `False`, changes to the DB instance are
+ applied on the next call to RebootDBInstance, the next maintenance
+ reboot, or the next failure reboot, whichever occurs first. See
+ each parameter to determine when a change is applied.
+
+ Default: `False`
+
+ :type master_user_password: string
+ :param master_user_password:
+ The new password for the DB instance master user. Can be any printable
+ ASCII character except "/", '"', or "@".
+
+ Changing this parameter does not result in an outage and the change is
+ asynchronously applied as soon as possible. Between the time of the
+ request and the completion of the request, the `MasterUserPassword`
+ element exists in the `PendingModifiedValues` element of the
+ operation response.
+
+ Default: Uses existing setting
+
+ Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30
+ alphanumeric characters (Oracle), or 8 to 128 alphanumeric
+ characters (SQL Server).
+
+ Amazon RDS API actions never return the password, so this action
+ provides a way to regain access to a master instance user if the
+ password is lost.
+
+ :type db_parameter_group_name: string
+ :param db_parameter_group_name: The name of the DB parameter group to
+ apply to this DB instance. Changing this parameter does not result
+ in an outage and the change is applied during the next maintenance
+ window unless the `ApplyImmediately` parameter is set to `True` for
+ this request.
+ Default: Uses existing setting
+
+ Constraints: The DB parameter group must be in the same DB parameter
+ group family as this DB instance.
+
+ :type backup_retention_period: integer
+ :param backup_retention_period:
+ The number of days to retain automated backups. Setting this parameter
+ to a positive number enables backups. Setting this parameter to 0
+ disables automated backups.
+
+ Changing this parameter can result in an outage if you change from 0 to
+ a non-zero value or from a non-zero value to 0. These changes are
+ applied during the next maintenance window unless the
+ `ApplyImmediately` parameter is set to `True` for this request. If
+ you change the parameter from one non-zero value to another non-
+ zero value, the change is asynchronously applied as soon as
+ possible.
+
+ Default: Uses existing setting
+
+ Constraints:
+
+
+ + Must be a value from 0 to 8
+ + Cannot be set to 0 if the DB instance is a master instance with read
+ replicas or if the DB instance is a read replica
+
+ :type preferred_backup_window: string
+ :param preferred_backup_window:
+ The daily time range during which automated backups are created if
+ automated backups are enabled, as determined by the
+ `BackupRetentionPeriod`. Changing this parameter does not result in
+ an outage and the change is asynchronously applied as soon as
+ possible.
+
+ Constraints:
+
+
+ + Must be in the format hh24:mi-hh24:mi
+ + Times should be Universal Time Coordinated (UTC)
+ + Must not conflict with the preferred maintenance window
+ + Must be at least 30 minutes
+
+ :type preferred_maintenance_window: string
+ :param preferred_maintenance_window: The weekly time range (in UTC)
+ during which system maintenance can occur, which may result in an
+ outage. Changing this parameter does not result in an outage,
+ except in the following situation, and the change is asynchronously
+ applied as soon as possible. If there are pending actions that
+ cause a reboot, and the maintenance window is changed to include
+ the current time, then changing this parameter will cause a reboot
+ of the DB instance. If moving this window to the current time,
+ there must be at least 30 minutes between the current time and end
+ of the window to ensure pending changes are applied.
+ Default: Uses existing setting
+
+ Format: ddd:hh24:mi-ddd:hh24:mi
+
+ Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
+
+ Constraints: Must be at least 30 minutes
+
+ :type multi_az: boolean
+ :param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
+ Changing this parameter does not result in an outage and the change
+ is applied during the next maintenance window unless the
+ `ApplyImmediately` parameter is set to `True` for this request.
+ Constraints: Cannot be specified if the DB instance is a read replica.
+
+ :type engine_version: string
+ :param engine_version: The version number of the database engine to
+ upgrade to. Changing this parameter results in an outage and the
+ change is applied during the next maintenance window unless the
+ `ApplyImmediately` parameter is set to `True` for this request.
+ For major version upgrades, if a non-default DB parameter group is
+ currently in use, a new DB parameter group in the DB parameter
+ group family for the new engine version must be specified. The new
+ DB parameter group can be the default for that DB parameter group
+ family.
+
+ Example: `5.1.42`
+
+ :type allow_major_version_upgrade: boolean
+ :param allow_major_version_upgrade: Indicates that major version
+ upgrades are allowed. Changing this parameter does not result in an
+ outage and the change is asynchronously applied as soon as
+ possible.
+ Constraints: This parameter must be set to true when specifying a value
+ for the EngineVersion parameter that is a different major version
+ than the DB instance's current version.
+
+ :type auto_minor_version_upgrade: boolean
+ :param auto_minor_version_upgrade: Indicates that minor version
+ upgrades will be applied automatically to the DB instance during
+ the maintenance window. Changing this parameter does not result in
+ an outage except in the following case and the change is
+ asynchronously applied as soon as possible. An outage will result
+ if this parameter is set to `True` during the maintenance window,
+ and a newer minor version is available, and RDS has enabled auto
+ patching for that engine version.
+
+ :type iops: integer
+ :param iops: The new Provisioned IOPS (I/O operations per second) value
+ for the RDS instance. Changing this parameter does not result in an
+ outage and the change is applied during the next maintenance window
+ unless the `ApplyImmediately` parameter is set to `True` for this
+ request.
+ Default: Uses existing setting
+
+ Constraints: Value supplied must be at least 10% greater than the
+ current value. Values that are not at least 10% greater than the
+ existing value are rounded up so that they are 10% greater than the
+ current value.
+
+ Type: Integer
+
+ If you choose to migrate your DB instance from using standard storage
+ to using Provisioned IOPS, or from using Provisioned IOPS to using
+ standard storage, the process can take time. The duration of the
+ migration depends on several factors such as database load, storage
+ size, storage type (standard or Provisioned IOPS), amount of IOPS
+ provisioned (if any), and the number of prior scale storage
+ operations. Typical migration times are under 24 hours, but the
+ process can take up to several days in some cases. During the
+ migration, the DB instance will be available for use, but may
+ experience performance degradation. While the migration takes
+ place, nightly backups for the instance will be suspended. No other
+ Amazon RDS operations can take place for the instance, including
+ modifying the instance, rebooting the instance, deleting the
+ instance, creating a read replica for the instance, and creating a
+ DB snapshot of the instance.
+
+ :type option_group_name: string
+ :param option_group_name: Indicates that the DB instance should be
+ associated with the specified option group. Changing this parameter
+ does not result in an outage except in the following case and the
+ change is applied during the next maintenance window unless the
+ `ApplyImmediately` parameter is set to `True` for this request. If
+ the parameter change results in an option group that enables OEM,
+ this change can cause a brief (sub-second) period during which new
+ connections are rejected but existing connections are not
+ interrupted.
+ Permanent options, such as the TDE option for Oracle Advanced Security
+ TDE, cannot be removed from an option group, and that option group
+ cannot be removed from a DB instance once it is associated with a
+ DB instance
+
+ :type new_db_instance_identifier: string
+ :param new_db_instance_identifier:
+ The new DB instance identifier for the DB instance when renaming a DB
+ Instance. This value is stored as a lowercase string.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ """
+ params = {'DBInstanceIdentifier': db_instance_identifier, }
+ if allocated_storage is not None:
+ params['AllocatedStorage'] = allocated_storage
+ if db_instance_class is not None:
+ params['DBInstanceClass'] = db_instance_class
+ if db_security_groups is not None:
+ self.build_list_params(params,
+ db_security_groups,
+ 'DBSecurityGroups.member')
+ if vpc_security_group_ids is not None:
+ self.build_list_params(params,
+ vpc_security_group_ids,
+ 'VpcSecurityGroupIds.member')
+ if apply_immediately is not None:
+ params['ApplyImmediately'] = str(
+ apply_immediately).lower()
+ if master_user_password is not None:
+ params['MasterUserPassword'] = master_user_password
+ if db_parameter_group_name is not None:
+ params['DBParameterGroupName'] = db_parameter_group_name
+ if backup_retention_period is not None:
+ params['BackupRetentionPeriod'] = backup_retention_period
+ if preferred_backup_window is not None:
+ params['PreferredBackupWindow'] = preferred_backup_window
+ if preferred_maintenance_window is not None:
+ params['PreferredMaintenanceWindow'] = preferred_maintenance_window
+ if multi_az is not None:
+ params['MultiAZ'] = str(
+ multi_az).lower()
+ if engine_version is not None:
+ params['EngineVersion'] = engine_version
+ if allow_major_version_upgrade is not None:
+ params['AllowMajorVersionUpgrade'] = str(
+ allow_major_version_upgrade).lower()
+ if auto_minor_version_upgrade is not None:
+ params['AutoMinorVersionUpgrade'] = str(
+ auto_minor_version_upgrade).lower()
+ if iops is not None:
+ params['Iops'] = iops
+ if option_group_name is not None:
+ params['OptionGroupName'] = option_group_name
+ if new_db_instance_identifier is not None:
+ params['NewDBInstanceIdentifier'] = new_db_instance_identifier
+ return self._make_request(
+ action='ModifyDBInstance',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_db_parameter_group(self, db_parameter_group_name, parameters):
+ """
+ Modifies the parameters of a DB parameter group. To modify
+ more than one parameter, submit a list of the following:
+ `ParameterName`, `ParameterValue`, and `ApplyMethod`. A
+ maximum of 20 parameters can be modified in a single request.
+
+ The `apply-immediate` method can be used only for dynamic
+ parameters; the `pending-reboot` method can be used with MySQL
+ and Oracle DB instances for either dynamic or static
+ parameters. For Microsoft SQL Server DB instances, the
+ `pending-reboot` method can be used only for static
+ parameters.
+
+ :type db_parameter_group_name: string
+ :param db_parameter_group_name:
+ The name of the DB parameter group.
+
+ Constraints:
+
+
+ + Must be the name of an existing DB parameter group
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type parameters: list
+ :param parameters:
+ An array of parameter names, values, and the apply method for the
+ parameter update. At least one parameter name, value, and apply
+ method must be supplied; subsequent arguments are optional. A
+ maximum of 20 parameters may be modified in a single request.
+
+ Valid Values (for the application method): `immediate | pending-reboot`
+
+ You can use the immediate value with dynamic parameters only. You can
+ use the pending-reboot value for both dynamic and static
+ parameters, and changes are applied when DB instance reboots.
+
+ """
+ params = {'DBParameterGroupName': db_parameter_group_name, }
+ self.build_complex_list_params(
+ params, parameters,
+ 'Parameters.member',
+ ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
+ return self._make_request(
+ action='ModifyDBParameterGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids,
+ db_subnet_group_description=None):
+ """
+ Modifies an existing DB subnet group. DB subnet groups must
+ contain at least one subnet in at least two AZs in the region.
+
+ :type db_subnet_group_name: string
+ :param db_subnet_group_name: The name for the DB subnet group. This
+ value is stored as a lowercase string.
+ Constraints: Must contain no more than 255 alphanumeric characters or
+ hyphens. Must not be "Default".
+
+ Example: `mySubnetgroup`
+
+ :type db_subnet_group_description: string
+ :param db_subnet_group_description: The description for the DB subnet
+ group.
+
+ :type subnet_ids: list
+ :param subnet_ids: The EC2 subnet IDs for the DB subnet group.
+
+ """
+ params = {'DBSubnetGroupName': db_subnet_group_name, }
+ self.build_list_params(params,
+ subnet_ids,
+ 'SubnetIds.member')
+ if db_subnet_group_description is not None:
+ params['DBSubnetGroupDescription'] = db_subnet_group_description
+ return self._make_request(
+ action='ModifyDBSubnetGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_event_subscription(self, subscription_name,
+ sns_topic_arn=None, source_type=None,
+ event_categories=None, enabled=None):
+ """
+ Modifies an existing RDS event notification subscription. Note
+ that you cannot modify the source identifiers using this call;
+ to change source identifiers for a subscription, use the
+ AddSourceIdentifierToSubscription and
+ RemoveSourceIdentifierFromSubscription calls.
+
+ You can see a list of the event categories for a given
+ SourceType in the `Events`_ topic in the Amazon RDS User Guide
+ or by using the **DescribeEventCategories** action.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the RDS event notification
+ subscription.
+
+ :type sns_topic_arn: string
+ :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
+ created for event notification. The ARN is created by Amazon SNS
+ when you create a topic and subscribe to it.
+
+ :type source_type: string
+ :param source_type: The type of source that will be generating the
+ events. For example, if you want to be notified of events generated
+ by a DB instance, you would set this parameter to db-instance. if
+ this value is not specified, all events are returned.
+ Valid values: db-instance | db-parameter-group | db-security-group |
+ db-snapshot
+
+ :type event_categories: list
+ :param event_categories: A list of event categories for a SourceType
+ that you want to subscribe to. You can see a list of the categories
+ for a given SourceType in the `Events`_ topic in the Amazon RDS
+ User Guide or by using the **DescribeEventCategories** action.
+
+ :type enabled: boolean
+ :param enabled: A Boolean value; set to **true** to activate the
+ subscription.
+
+ """
+ params = {'SubscriptionName': subscription_name, }
+ if sns_topic_arn is not None:
+ params['SnsTopicArn'] = sns_topic_arn
+ if source_type is not None:
+ params['SourceType'] = source_type
+ if event_categories is not None:
+ self.build_list_params(params,
+ event_categories,
+ 'EventCategories.member')
+ if enabled is not None:
+ params['Enabled'] = str(
+ enabled).lower()
+ return self._make_request(
+ action='ModifyEventSubscription',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_option_group(self, option_group_name, options_to_include=None,
+ options_to_remove=None, apply_immediately=None):
+ """
+ Modifies an existing option group.
+
+ :type option_group_name: string
+ :param option_group_name: The name of the option group to be modified.
+ Permanent options, such as the TDE option for Oracle Advanced Security
+ TDE, cannot be removed from an option group, and that option group
+ cannot be removed from a DB instance once it is associated with a
+ DB instance
+
+ :type options_to_include: list
+ :param options_to_include: Options in this list are added to the option
+ group or, if already present, the specified configuration is used
+ to update the existing configuration.
+
+ :type options_to_remove: list
+ :param options_to_remove: Options in this list are removed from the
+ option group.
+
+ :type apply_immediately: boolean
+ :param apply_immediately: Indicates whether the changes should be
+ applied immediately, or during the next maintenance window for each
+ instance associated with the option group.
+
+ """
+ params = {'OptionGroupName': option_group_name, }
+ if options_to_include is not None:
+ self.build_complex_list_params(
+ params, options_to_include,
+ 'OptionsToInclude.member',
+ ('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings'))
+ if options_to_remove is not None:
+ self.build_list_params(params,
+ options_to_remove,
+ 'OptionsToRemove.member')
+ if apply_immediately is not None:
+ params['ApplyImmediately'] = str(
+ apply_immediately).lower()
+ return self._make_request(
+ action='ModifyOptionGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def promote_read_replica(self, db_instance_identifier,
+ backup_retention_period=None,
+ preferred_backup_window=None):
+ """
+ Promotes a read replica DB instance to a standalone DB
+ instance.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier: The DB instance identifier. This value
+ is stored as a lowercase string.
+ Constraints:
+
+
+ + Must be the identifier for an existing read replica DB instance
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+
+ Example: mydbinstance
+
+ :type backup_retention_period: integer
+ :param backup_retention_period:
+ The number of days to retain automated backups. Setting this parameter
+ to a positive number enables backups. Setting this parameter to 0
+ disables automated backups.
+
+ Default: 1
+
+ Constraints:
+
+
+ + Must be a value from 0 to 8
+
+ :type preferred_backup_window: string
+ :param preferred_backup_window: The daily time range during which
+ automated backups are created if automated backups are enabled,
+ using the `BackupRetentionPeriod` parameter.
+ Default: A 30-minute window selected at random from an 8-hour block of
+ time per region. See the Amazon RDS User Guide for the time blocks
+ for each region from which the default backup windows are assigned.
+
+ Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
+ Universal Time Coordinated (UTC). Must not conflict with the
+ preferred maintenance window. Must be at least 30 minutes.
+
+ """
+ params = {'DBInstanceIdentifier': db_instance_identifier, }
+ if backup_retention_period is not None:
+ params['BackupRetentionPeriod'] = backup_retention_period
+ if preferred_backup_window is not None:
+ params['PreferredBackupWindow'] = preferred_backup_window
+ return self._make_request(
+ action='PromoteReadReplica',
+ verb='POST',
+ path='/', params=params)
+
+ def purchase_reserved_db_instances_offering(self,
+ reserved_db_instances_offering_id,
+ reserved_db_instance_id=None,
+ db_instance_count=None,
+ tags=None):
+ """
+ Purchases a reserved DB instance offering.
+
+ :type reserved_db_instances_offering_id: string
+ :param reserved_db_instances_offering_id: The ID of the Reserved DB
+ instance offering to purchase.
+ Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
+
+ :type reserved_db_instance_id: string
+ :param reserved_db_instance_id: Customer-specified identifier to track
+ this reservation.
+ Example: myreservationID
+
+ :type db_instance_count: integer
+ :param db_instance_count: The number of instances to reserve.
+ Default: `1`
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id,
+ }
+ if reserved_db_instance_id is not None:
+ params['ReservedDBInstanceId'] = reserved_db_instance_id
+ if db_instance_count is not None:
+ params['DBInstanceCount'] = db_instance_count
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='PurchaseReservedDBInstancesOffering',
+ verb='POST',
+ path='/', params=params)
+
+ def reboot_db_instance(self, db_instance_identifier, force_failover=None):
+ """
+ Rebooting a DB instance restarts the database engine service.
+ A reboot also applies to the DB instance any modifications to
+ the associated DB parameter group that were pending. Rebooting
+ a DB instance results in a momentary outage of the instance,
+ during which the DB instance status is set to rebooting. If
+ the RDS instance is configured for MultiAZ, it is possible
+ that the reboot will be conducted through a failover. An
+ Amazon RDS event is created when the reboot is completed.
+
+ If your DB instance is deployed in multiple Availability
+ Zones, you can force a failover from one AZ to the other
+ during the reboot. You might force a failover to test the
+ availability of your DB instance deployment or to restore
+ operations to the original AZ after a failover occurs.
+
+ The time required to reboot is a function of the specific
+ database engine's crash recovery process. To improve the
+ reboot time, we recommend that you reduce database activities
+ as much as possible during the reboot process to reduce
+ rollback activity for in-transit transactions.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ The DB instance identifier. This parameter is stored as a lowercase
+ string.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type force_failover: boolean
+ :param force_failover: When `True`, the reboot will be conducted
+ through a MultiAZ failover.
+ Constraint: You cannot specify `True` if the instance is not configured
+ for MultiAZ.
+
+ """
+ params = {'DBInstanceIdentifier': db_instance_identifier, }
+ if force_failover is not None:
+ params['ForceFailover'] = str(
+ force_failover).lower()
+ return self._make_request(
+ action='RebootDBInstance',
+ verb='POST',
+ path='/', params=params)
+
+ def remove_source_identifier_from_subscription(self, subscription_name,
+ source_identifier):
+ """
+ Removes a source identifier from an existing RDS event
+ notification subscription.
+
+ :type subscription_name: string
+ :param subscription_name: The name of the RDS event notification
+ subscription you want to remove a source identifier from.
+
+ :type source_identifier: string
+ :param source_identifier: The source identifier to be removed from the
+ subscription, such as the **DB instance identifier** for a DB
+ instance or the name of a security group.
+
+ """
+ params = {
+ 'SubscriptionName': subscription_name,
+ 'SourceIdentifier': source_identifier,
+ }
+ return self._make_request(
+ action='RemoveSourceIdentifierFromSubscription',
+ verb='POST',
+ path='/', params=params)
+
+ def remove_tags_from_resource(self, resource_name, tag_keys):
+ """
+ Removes metadata tags from an Amazon RDS resource.
+
+ For an overview on tagging an Amazon RDS resource, see
+ `Tagging Amazon RDS Resources`_.
+
+ :type resource_name: string
+ :param resource_name: The Amazon RDS resource the tags will be removed
+ from. This value is an Amazon Resource Name (ARN). For information
+ about creating an ARN, see ` Constructing an RDS Amazon Resource
+ Name (ARN)`_.
+
+ :type tag_keys: list
+ :param tag_keys: The tag key (name) of the tag to be removed.
+
+ """
+ params = {'ResourceName': resource_name, }
+ self.build_list_params(params,
+ tag_keys,
+ 'TagKeys.member')
+ return self._make_request(
+ action='RemoveTagsFromResource',
+ verb='POST',
+ path='/', params=params)
+
+ def reset_db_parameter_group(self, db_parameter_group_name,
+ reset_all_parameters=None, parameters=None):
+ """
+ Modifies the parameters of a DB parameter group to the
+ engine/system default value. To reset specific parameters
+ submit a list of the following: `ParameterName` and
+ `ApplyMethod`. To reset the entire DB parameter group, specify
+ the `DBParameterGroup` name and `ResetAllParameters`
+ parameters. When resetting the entire group, dynamic
+ parameters are updated immediately and static parameters are
+ set to `pending-reboot` to take effect on the next DB instance
+ restart or `RebootDBInstance` request.
+
+ :type db_parameter_group_name: string
+ :param db_parameter_group_name:
+ The name of the DB parameter group.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type reset_all_parameters: boolean
+ :param reset_all_parameters: Specifies whether ( `True`) or not (
+ `False`) to reset all parameters in the DB parameter group to
+ default values.
+ Default: `True`
+
+ :type parameters: list
+ :param parameters: An array of parameter names, values, and the apply
+ method for the parameter update. At least one parameter name,
+ value, and apply method must be supplied; subsequent arguments are
+ optional. A maximum of 20 parameters may be modified in a single
+ request.
+ **MySQL**
+
+ Valid Values (for Apply method): `immediate` | `pending-reboot`
+
+ You can use the immediate value with dynamic parameters only. You can
+ use the `pending-reboot` value for both dynamic and static
+ parameters, and changes are applied when DB instance reboots.
+
+ **Oracle**
+
+ Valid Values (for Apply method): `pending-reboot`
+
+ """
+ params = {'DBParameterGroupName': db_parameter_group_name, }
+ if reset_all_parameters is not None:
+ params['ResetAllParameters'] = str(
+ reset_all_parameters).lower()
+ if parameters is not None:
+ self.build_complex_list_params(
+ params, parameters,
+ 'Parameters.member',
+ ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
+ return self._make_request(
+ action='ResetDBParameterGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def restore_db_instance_from_db_snapshot(self, db_instance_identifier,
+ db_snapshot_identifier,
+ db_instance_class=None,
+ port=None,
+ availability_zone=None,
+ db_subnet_group_name=None,
+ multi_az=None,
+ publicly_accessible=None,
+ auto_minor_version_upgrade=None,
+ license_model=None,
+ db_name=None, engine=None,
+ iops=None,
+ option_group_name=None,
+ tags=None):
+ """
+ Creates a new DB instance from a DB snapshot. The target
+ database is created from the source database restore point
+ with the same configuration as the original source database,
+ except that the new RDS instance is created with the default
+ security group.
+
+ :type db_instance_identifier: string
+ :param db_instance_identifier:
+ The identifier for the DB snapshot to restore from.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type db_snapshot_identifier: string
+ :param db_snapshot_identifier: Name of the DB instance to create from
+ the DB snapshot. This parameter isn't case sensitive.
+ Constraints:
+
+
+ + Must contain from 1 to 255 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+
+ Example: `my-snapshot-id`
+
+ :type db_instance_class: string
+ :param db_instance_class: The compute and memory capacity of the Amazon
+ RDS DB instance.
+ Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
+ db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
+
+ :type port: integer
+ :param port: The port number on which the database accepts connections.
+ Default: The same port as the original DB instance
+
+ Constraints: Value must be `1150-65535`
+
+ :type availability_zone: string
+ :param availability_zone: The EC2 Availability Zone that the database
+ instance will be created in.
+ Default: A random, system-chosen Availability Zone.
+
+ Constraint: You cannot specify the AvailabilityZone parameter if the
+ MultiAZ parameter is set to `True`.
+
+ Example: `us-east-1a`
+
+ :type db_subnet_group_name: string
+ :param db_subnet_group_name: The DB subnet group name to use for the
+ new instance.
+
+ :type multi_az: boolean
+ :param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
+ Constraint: You cannot specify the AvailabilityZone parameter if the
+ MultiAZ parameter is set to `True`.
+
+ :type publicly_accessible: boolean
+ :param publicly_accessible: Specifies the accessibility options for the
+ DB instance. A value of true specifies an Internet-facing instance
+ with a publicly resolvable DNS name, which resolves to a public IP
+ address. A value of false specifies an internal instance with a DNS
+ name that resolves to a private IP address.
+ Default: The default behavior varies depending on whether a VPC has
+ been requested or not. The following list shows the default
+ behavior in each case.
+
+
+ + **Default VPC:**true
+ + **VPC:**false
+
+
+ If no DB subnet group has been specified as part of the request and the
+ PubliclyAccessible value has not been set, the DB instance will be
+ publicly accessible. If a specific DB subnet group has been
+ specified as part of the request and the PubliclyAccessible value
+ has not been set, the DB instance will be private.
+
+ :type auto_minor_version_upgrade: boolean
+ :param auto_minor_version_upgrade: Indicates that minor version
+ upgrades will be applied automatically to the DB instance during
+ the maintenance window.
+
+ :type license_model: string
+ :param license_model: License model information for the restored DB
+ instance.
+ Default: Same as source.
+
+ Valid values: `license-included` | `bring-your-own-license` | `general-
+ public-license`
+
+ :type db_name: string
+ :param db_name:
+ The database name for the restored DB instance.
+
+
+ This parameter doesn't apply to the MySQL engine.
+
+ :type engine: string
+ :param engine: The database engine to use for the new instance.
+ Default: The same as source
+
+ Constraint: Must be compatible with the engine of the source
+
+ Example: `oracle-ee`
+
+ :type iops: integer
+ :param iops: Specifies the amount of provisioned IOPS for the DB
+ instance, expressed in I/O operations per second. If this parameter
+ is not specified, the IOPS value will be taken from the backup. If
+ this parameter is set to 0, the new instance will be converted to a
+ non-PIOPS instance, which will take additional time, though your DB
+ instance will be available for connections before the conversion
+ starts.
+ Constraints: Must be an integer greater than 1000.
+
+ :type option_group_name: string
+ :param option_group_name: The name of the option group to be used for
+ the restored DB instance.
+ Permanent options, such as the TDE option for Oracle Advanced Security
+ TDE, cannot be removed from an option group, and that option group
+ cannot be removed from a DB instance once it is associated with a
+ DB instance
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'DBInstanceIdentifier': db_instance_identifier,
+ 'DBSnapshotIdentifier': db_snapshot_identifier,
+ }
+ if db_instance_class is not None:
+ params['DBInstanceClass'] = db_instance_class
+ if port is not None:
+ params['Port'] = port
+ if availability_zone is not None:
+ params['AvailabilityZone'] = availability_zone
+ if db_subnet_group_name is not None:
+ params['DBSubnetGroupName'] = db_subnet_group_name
+ if multi_az is not None:
+ params['MultiAZ'] = str(
+ multi_az).lower()
+ if publicly_accessible is not None:
+ params['PubliclyAccessible'] = str(
+ publicly_accessible).lower()
+ if auto_minor_version_upgrade is not None:
+ params['AutoMinorVersionUpgrade'] = str(
+ auto_minor_version_upgrade).lower()
+ if license_model is not None:
+ params['LicenseModel'] = license_model
+ if db_name is not None:
+ params['DBName'] = db_name
+ if engine is not None:
+ params['Engine'] = engine
+ if iops is not None:
+ params['Iops'] = iops
+ if option_group_name is not None:
+ params['OptionGroupName'] = option_group_name
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='RestoreDBInstanceFromDBSnapshot',
+ verb='POST',
+ path='/', params=params)
+
+ def restore_db_instance_to_point_in_time(self,
+ source_db_instance_identifier,
+ target_db_instance_identifier,
+ restore_time=None,
+ use_latest_restorable_time=None,
+ db_instance_class=None,
+ port=None,
+ availability_zone=None,
+ db_subnet_group_name=None,
+ multi_az=None,
+ publicly_accessible=None,
+ auto_minor_version_upgrade=None,
+ license_model=None,
+ db_name=None, engine=None,
+ iops=None,
+ option_group_name=None,
+ tags=None):
+ """
+ Restores a DB instance to an arbitrary point-in-time. Users
+ can restore to any point in time before the
+ latestRestorableTime for up to backupRetentionPeriod days. The
+ target database is created from the source database with the
+ same configuration as the original database except that the DB
+ instance is created with the default DB security group.
+
+ :type source_db_instance_identifier: string
+ :param source_db_instance_identifier:
+ The identifier of the source DB instance from which to restore.
+
+ Constraints:
+
+
+ + Must be the identifier of an existing database instance
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type target_db_instance_identifier: string
+ :param target_db_instance_identifier:
+ The name of the new database instance to be created.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type restore_time: timestamp
+ :param restore_time: The date and time to restore from.
+ Valid Values: Value must be a UTC time
+
+ Constraints:
+
+
+ + Must be before the latest restorable time for the DB instance
+ + Cannot be specified if UseLatestRestorableTime parameter is true
+
+
+ Example: `2009-09-07T23:45:00Z`
+
+ :type use_latest_restorable_time: boolean
+ :param use_latest_restorable_time: Specifies whether ( `True`) or not (
+ `False`) the DB instance is restored from the latest backup time.
+ Default: `False`
+
+ Constraints: Cannot be specified if RestoreTime parameter is provided.
+
+ :type db_instance_class: string
+ :param db_instance_class: The compute and memory capacity of the Amazon
+ RDS DB instance.
+ Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
+ db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
+
+ Default: The same DBInstanceClass as the original DB instance.
+
+ :type port: integer
+ :param port: The port number on which the database accepts connections.
+ Constraints: Value must be `1150-65535`
+
+ Default: The same port as the original DB instance.
+
+ :type availability_zone: string
+ :param availability_zone: The EC2 Availability Zone that the database
+ instance will be created in.
+ Default: A random, system-chosen Availability Zone.
+
+ Constraint: You cannot specify the AvailabilityZone parameter if the
+ MultiAZ parameter is set to true.
+
+ Example: `us-east-1a`
+
+ :type db_subnet_group_name: string
+ :param db_subnet_group_name: The DB subnet group name to use for the
+ new instance.
+
+ :type multi_az: boolean
+ :param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
+ Constraint: You cannot specify the AvailabilityZone parameter if the
+ MultiAZ parameter is set to `True`.
+
+ :type publicly_accessible: boolean
+ :param publicly_accessible: Specifies the accessibility options for the
+ DB instance. A value of true specifies an Internet-facing instance
+ with a publicly resolvable DNS name, which resolves to a public IP
+ address. A value of false specifies an internal instance with a DNS
+ name that resolves to a private IP address.
+ Default: The default behavior varies depending on whether a VPC has
+ been requested or not. The following list shows the default
+ behavior in each case.
+
+
+ + **Default VPC:**true
+ + **VPC:**false
+
+
+ If no DB subnet group has been specified as part of the request and the
+ PubliclyAccessible value has not been set, the DB instance will be
+ publicly accessible. If a specific DB subnet group has been
+ specified as part of the request and the PubliclyAccessible value
+ has not been set, the DB instance will be private.
+
+ :type auto_minor_version_upgrade: boolean
+ :param auto_minor_version_upgrade: Indicates that minor version
+ upgrades will be applied automatically to the DB instance during
+ the maintenance window.
+
+ :type license_model: string
+ :param license_model: License model information for the restored DB
+ instance.
+ Default: Same as source.
+
+ Valid values: `license-included` | `bring-your-own-license` | `general-
+ public-license`
+
+ :type db_name: string
+ :param db_name:
+ The database name for the restored DB instance.
+
+
+ This parameter is not used for the MySQL engine.
+
+ :type engine: string
+ :param engine: The database engine to use for the new instance.
+ Default: The same as source
+
+ Constraint: Must be compatible with the engine of the source
+
+ Example: `oracle-ee`
+
+ :type iops: integer
+ :param iops: The amount of Provisioned IOPS (input/output operations
+ per second) to be initially allocated for the DB instance.
+ Constraints: Must be an integer greater than 1000.
+
+ :type option_group_name: string
+ :param option_group_name: The name of the option group to be used for
+ the restored DB instance.
+ Permanent options, such as the TDE option for Oracle Advanced Security
+ TDE, cannot be removed from an option group, and that option group
+ cannot be removed from a DB instance once it is associated with a
+ DB instance
+
+ :type tags: list
+ :param tags: A list of tags.
+
+ """
+ params = {
+ 'SourceDBInstanceIdentifier': source_db_instance_identifier,
+ 'TargetDBInstanceIdentifier': target_db_instance_identifier,
+ }
+ if restore_time is not None:
+ params['RestoreTime'] = restore_time
+ if use_latest_restorable_time is not None:
+ params['UseLatestRestorableTime'] = str(
+ use_latest_restorable_time).lower()
+ if db_instance_class is not None:
+ params['DBInstanceClass'] = db_instance_class
+ if port is not None:
+ params['Port'] = port
+ if availability_zone is not None:
+ params['AvailabilityZone'] = availability_zone
+ if db_subnet_group_name is not None:
+ params['DBSubnetGroupName'] = db_subnet_group_name
+ if multi_az is not None:
+ params['MultiAZ'] = str(
+ multi_az).lower()
+ if publicly_accessible is not None:
+ params['PubliclyAccessible'] = str(
+ publicly_accessible).lower()
+ if auto_minor_version_upgrade is not None:
+ params['AutoMinorVersionUpgrade'] = str(
+ auto_minor_version_upgrade).lower()
+ if license_model is not None:
+ params['LicenseModel'] = license_model
+ if db_name is not None:
+ params['DBName'] = db_name
+ if engine is not None:
+ params['Engine'] = engine
+ if iops is not None:
+ params['Iops'] = iops
+ if option_group_name is not None:
+ params['OptionGroupName'] = option_group_name
+ if tags is not None:
+ self.build_complex_list_params(
+ params, tags,
+ 'Tags.member',
+ ('Key', 'Value'))
+ return self._make_request(
+ action='RestoreDBInstanceToPointInTime',
+ verb='POST',
+ path='/', params=params)
+
+ def revoke_db_security_group_ingress(self, db_security_group_name,
+ cidrip=None,
+ ec2_security_group_name=None,
+ ec2_security_group_id=None,
+ ec2_security_group_owner_id=None):
+ """
+ Revokes ingress from a DBSecurityGroup for previously
+ authorized IP ranges or EC2 or VPC Security Groups. Required
+ parameters for this API are one of CIDRIP, EC2SecurityGroupId
+ for VPC, or (EC2SecurityGroupOwnerId and either
+ EC2SecurityGroupName or EC2SecurityGroupId).
+
+ :type db_security_group_name: string
+ :param db_security_group_name: The name of the DB security group to
+ revoke ingress from.
+
+ :type cidrip: string
+ :param cidrip: The IP range to revoke access from. Must be a valid CIDR
+ range. If `CIDRIP` is specified, `EC2SecurityGroupName`,
+ `EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be
+ provided.
+
+ :type ec2_security_group_name: string
+ :param ec2_security_group_name: The name of the EC2 security group to
+ revoke access from. For VPC DB security groups,
+ `EC2SecurityGroupId` must be provided. Otherwise,
+ EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
+ `EC2SecurityGroupId` must be provided.
+
+ :type ec2_security_group_id: string
+ :param ec2_security_group_id: The id of the EC2 security group to
+ revoke access from. For VPC DB security groups,
+ `EC2SecurityGroupId` must be provided. Otherwise,
+ EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
+ `EC2SecurityGroupId` must be provided.
+
+ :type ec2_security_group_owner_id: string
+ :param ec2_security_group_owner_id: The AWS Account Number of the owner
+ of the EC2 security group specified in the `EC2SecurityGroupName`
+ parameter. The AWS Access Key ID is not an acceptable value. For
+ VPC DB security groups, `EC2SecurityGroupId` must be provided.
+ Otherwise, EC2SecurityGroupOwnerId and either
+ `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
+
+ """
+ params = {'DBSecurityGroupName': db_security_group_name, }
+ if cidrip is not None:
+ params['CIDRIP'] = cidrip
+ if ec2_security_group_name is not None:
+ params['EC2SecurityGroupName'] = ec2_security_group_name
+ if ec2_security_group_id is not None:
+ params['EC2SecurityGroupId'] = ec2_security_group_id
+ if ec2_security_group_owner_id is not None:
+ params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
+ return self._make_request(
+ action='RevokeDBSecurityGroupIngress',
+ verb='POST',
+ path='/', params=params)
+
+ def _make_request(self, action, verb, path, params):
+ params['ContentType'] = 'JSON'
+ response = self.make_request(action=action, verb='POST',
+ path='/', params=params)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ json_body = json.loads(body)
+ fault_name = json_body.get('Error', {}).get('Code', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
diff --git a/boto/requestlog.py b/boto/requestlog.py
new file mode 100644
index 00000000..5f1c2551
--- /dev/null
+++ b/boto/requestlog.py
@@ -0,0 +1,39 @@
+
+from datetime import datetime
+from threading import Thread
+import Queue
+
+from boto.utils import RequestHook
+
+class RequestLogger(RequestHook):
+ """
+ This class implements a request logger that uses a single thread to
+ write to a log file.
+ """
+ def __init__(self, filename='/tmp/request_log.csv'):
+ self.request_log_file = open(filename, 'w')
+ self.request_log_queue = Queue.Queue(100)
+ Thread(target=self._request_log_worker).start()
+
+
+ def handle_request_data(self, request, response, error=False):
+ len = 0 if error else response.getheader('Content-Length')
+ now = datetime.now()
+ time = now.strftime('%Y-%m-%d %H:%M:%S')
+ td = (now - request.start_time)
+ duration = (td.microseconds + long(td.seconds + td.days*24*3600) * 1e6) / 1e6
+
+ # write output including timestamp, status code, response time, response size, request action
+ self.request_log_queue.put("'%s', '%s', '%s', '%s', '%s'\n" % (time, response.status, duration, len, request.params['Action']))
+
+
+ def _request_log_worker(self):
+ while True:
+ try:
+ item = self.request_log_queue.get(True)
+ self.request_log_file.write(item)
+ self.request_log_file.flush()
+ self.request_log_queue.task_done()
+ except:
+ import traceback; traceback.print_exc(file=sys.stdout)
+
diff --git a/boto/roboto/param.py b/boto/roboto/param.py
index d4ddbd9f..ed3e6be9 100644
--- a/boto/roboto/param.py
+++ b/boto/roboto/param.py
@@ -67,7 +67,7 @@ class Converter(object):
except:
raise ValidationException(param, '')
-class Param(object):
+class Param(Converter):
def __init__(self, name=None, ptype='string', optional=True,
short_name=None, long_name=None, doc='',
@@ -142,6 +142,6 @@ class Param(object):
:param value: The value to convert. This should always
be a string.
"""
- return super(Param, self).convert(value)
+ return super(Param, self).convert(self,value)
diff --git a/boto/route53/connection.py b/boto/route53/connection.py
index f0ac6573..7f45c778 100644
--- a/boto/route53/connection.py
+++ b/boto/route53/connection.py
@@ -226,6 +226,101 @@ class Route53Connection(AWSAuthConnection):
h.parse(body)
return e
+
+ # Health checks
+
+ POSTHCXMLBody = """<CreateHealthCheckRequest xmlns="%(xmlns)s">
+ <CallerReference>%(caller_ref)s</CallerReference>
+ %(health_check)s
+ </CreateHealthCheckRequest>"""
+
+ def create_health_check(self, health_check, caller_ref=None):
+ """
+ Create a new Health Check
+
+ :type health_check: HealthCheck
+ :param health_check: HealthCheck object
+
+ :type caller_ref: str
+ :param caller_ref: A unique string that identifies the request
+ and that allows failed CreateHealthCheckRequest requests to be retried
+ without the risk of executing the operation twice. If you don't
+ provide a value for this, boto will generate a Type 4 UUID and
+ use that.
+
+ """
+ if caller_ref is None:
+ caller_ref = str(uuid.uuid4())
+ uri = '/%s/healthcheck' % self.Version
+ params = {'xmlns': self.XMLNameSpace,
+ 'caller_ref': caller_ref,
+ 'health_check': health_check.to_xml()
+ }
+ xml_body = self.POSTHCXMLBody % params
+ response = self.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 201:
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+ else:
+ raise exception.DNSServerError(response.status, response.reason, body)
+
+ def get_list_health_checks(self, maxitems=None, marker=None):
+ """
+ Return a list of health checks
+
+ :type maxitems: int
+ :param maxitems: Maximum number of items to return
+
+ :type marker: str
+ :param marker: marker to get next set of items to list
+
+ """
+
+ params = {}
+ if maxitems is not None:
+ params['maxitems'] = maxitems
+ if marker is not None:
+ params['marker'] = marker
+
+ uri = '/%s/healthcheck' % (self.Version, )
+ response = self.make_request('GET', uri, params=params)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element(list_marker='HealthChecks', item_marker=('HealthCheck',))
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+ def delete_health_check(self, health_check_id):
+ """
+ Delete a health check
+
+ :type health_check_id: str
+ :param health_check_id: ID of the health check to delete
+
+ """
+ uri = '/%s/healthcheck/%s' % (self.Version, health_check_id)
+ response = self.make_request('DELETE', uri)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status not in (200, 204):
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+
# Resource Record Sets
def get_all_rrsets(self, hosted_zone_id, type=None,
diff --git a/boto/route53/healthcheck.py b/boto/route53/healthcheck.py
new file mode 100644
index 00000000..059d208b
--- /dev/null
+++ b/boto/route53/healthcheck.py
@@ -0,0 +1,128 @@
+# Copyright (c) 2014 Tellybug, Matt Millar
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
+"""
+From http://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHealthCheck.html
+
+POST /2013-04-01/healthcheck HTTP/1.1
+
+<?xml version="1.0" encoding="UTF-8"?>
+<CreateHealthCheckRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
+ <CallerReference>unique description</CallerReference>
+ <HealthCheckConfig>
+ <IPAddress>IP address of the endpoint to check</IPAddress>
+ <Port>port on the endpoint to check</Port>
+ <Type>HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP</Type>
+ <ResourcePath>path of the file that
+ you want Amazon Route 53 to request</ResourcePath>
+ <FullyQualifiedDomainName>domain name of the
+ endpoint to check</FullyQualifiedDomainName>
+ <SearchString>if Type is HTTP_STR_MATCH or HTTPS_STR_MATCH,
+ the string to search for in the response body
+ from the specified resource</SearchString>
+ </HealthCheckConfig>
+</CreateHealthCheckRequest"""
+
+
+class HealthCheck(object):
+
+ """An individual health check"""
+
+ POSTXMLBody = """
+ <HealthCheckConfig>
+ <IPAddress>%(ip_addr)s</IPAddress>
+ <Port>%(port)s</Port>
+ <Type>%(type)s</Type>
+ <ResourcePath>%(resource_path)s</ResourcePath>
+ %(fqdn_part)s
+ %(string_match_part)s
+ %(request_interval)s
+ </HealthCheckConfig>
+ """
+
+ XMLFQDNPart = """<FullyQualifiedDomainName>%(fqdn)s</FullyQualifiedDomainName>"""
+
+ XMLStringMatchPart = """<SearchString>%(string_match)s</SearchString>"""
+
+ XMLRequestIntervalPart = """<RequestInterval>%(request_interval)d</RequestInterval>"""
+
+ valid_request_intervals = (10, 30)
+
+ def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30):
+ """
+ HealthCheck object
+
+ :type ip_addr: str
+ :param ip_addr: IP Address
+
+ :type port: int
+ :param port: Port to check
+
+ :type hc_type: str
+ :param ip_addr: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
+
+ :type resource_path: str
+ :param resource_path: Path to check
+
+ :type fqdn: str
+ :param fqdn: domain name of the endpoint to check
+
+ :type string_match: str
+ :param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource
+
+ :type request_interval: int
+ :param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
+
+ """
+ self.ip_addr = ip_addr
+ self.port = port
+ self.hc_type = hc_type
+ self.resource_path = resource_path
+ self.fqdn = fqdn
+ self.string_match = string_match
+
+ if request_interval in self.valid_request_intervals:
+ self.request_interval = request_interval
+ else:
+ raise AttributeError(
+ "Valid values for request_interval are: %s" %
+ ",".join(str(i) for i in self.valid_request_intervals))
+
+ def to_xml(self):
+ params = {
+ 'ip_addr': self.ip_addr,
+ 'port': self.port,
+ 'type': self.hc_type,
+ 'resource_path': self.resource_path,
+ 'fqdn_part': "",
+ 'string_match_part': "",
+ 'request_interval': (self.XMLRequestIntervalPart %
+ {'request_interval': self.request_interval}),
+ }
+ if self.fqdn is not None:
+ params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn}
+
+ if self.string_match is not None:
+ params['string_match_part'] = self.XMLStringMatchPart % {'string_match' : self.string_match}
+
+ return self.POSTXMLBody % params
diff --git a/boto/route53/record.py b/boto/route53/record.py
index 81b707b3..2d6ae257 100644
--- a/boto/route53/record.py
+++ b/boto/route53/record.py
@@ -66,7 +66,8 @@ class ResourceRecordSets(ResultSet):
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
- weight=None, region=None):
+ weight=None, region=None, alias_evaluate_target_health=None,
+ health_check=None):
"""
Add a change request to the set.
@@ -118,11 +119,22 @@ class ResourceRecordSets(ResultSet):
record sets that have the same combination of DNS name and type,
a value that determines which region this should be associated with
for the latency-based routing
+
+ :type alias_evaluate_target_health: Boolean
+ :param region: *Required for alias resource record sets* Indicates
+ whether this Resource Record Set should respect the health status of
+ any health checks associated with the ALIAS target record which it is
+ linked to.
+
+ :type health_check: str
+ :param health_check: Health check to associate with this record
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
- weight=weight, region=region)
+ weight=weight, region=region,
+ alias_evaluate_target_health=alias_evaluate_target_health,
+ health_check=health_check)
self.changes.append([action, change])
return change
@@ -178,11 +190,14 @@ class ResourceRecordSets(ResultSet):
class Record(object):
"""An individual ResourceRecordSet"""
+ HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
+
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
+ %(health_check)s
</ResourceRecordSet>"""
WRRBody = """
@@ -206,15 +221,18 @@ class Record(object):
</ResourceRecord>"""
AliasBody = """<AliasTarget>
- <HostedZoneId>%s</HostedZoneId>
- <DNSName>%s</DNSName>
+ <HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
+ <DNSName>%(dns_name)s</DNSName>
+ %(eval_target_health)s
</AliasTarget>"""
+ EvaluateTargetHealth = """<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
- weight=None, region=None):
+ weight=None, region=None, alias_evaluate_target_health=None,
+ health_check=None):
self.name = name
self.type = type
self.ttl = ttl
@@ -226,6 +244,8 @@ class Record(object):
self.identifier = identifier
self.weight = weight
self.region = region
+ self.alias_evaluate_target_health = alias_evaluate_target_health
+ self.health_check = health_check
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
@@ -243,7 +263,14 @@ class Record(object):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
- body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name)
+ if self.alias_evaluate_target_health is not None:
+ eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
+ else:
+ eval_target_health = ""
+
+ body = self.AliasBody % { "hosted_zone_id": self.alias_hosted_zone_id,
+ "dns_name": self.alias_dns_name,
+ "eval_target_health": eval_target_health }
else:
# Use resource record(s)
records = ""
@@ -265,11 +292,16 @@ class Record(object):
weight = self.RRRBody % {"identifier": self.identifier, "region":
self.region}
+ health_check = ""
+ if self.health_check is not None:
+ health_check = self.HealthCheckBody % (self.health_check)
+
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
+ "health_check": health_check
}
return self.XMLBody % params
@@ -304,6 +336,8 @@ class Record(object):
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
+ elif name == 'EvaluateTargetHealth':
+ self.alias_evaluate_target_health = value
elif name == 'Weight':
self.weight = value
elif name == 'Region':
diff --git a/boto/sts/connection.py b/boto/sts/connection.py
index ef4d52ed..4672c7c8 100644
--- a/boto/sts/connection.py
+++ b/boto/sts/connection.py
@@ -238,7 +238,9 @@ class STSConnection(AWSQueryConnection):
FederationToken, verb='POST')
def assume_role(self, role_arn, role_session_name, policy=None,
- duration_seconds=None, external_id=None):
+ duration_seconds=None, external_id=None,
+ mfa_serial_number=None,
+ mfa_token=None):
"""
Returns a set of temporary security credentials (consisting of
an access key ID, a secret access key, and a security token)
@@ -328,6 +330,24 @@ class STSConnection(AWSQueryConnection):
information about the external ID, see `About the External ID`_ in
Using Temporary Security Credentials .
+ :type mfa_serial_number: string
+ :param mfa_serial_number: The identification number of the MFA device that
+ is associated with the user who is making the AssumeRole call.
+ Specify this value if the trust policy of the role being assumed
+ includes a condition that requires MFA authentication. The value is
+ either the serial number for a hardware device (such as
+ GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device
+ (such as arn:aws:iam::123456789012:mfa/user). Minimum length of 9.
+ Maximum length of 256.
+
+ :type mfa_token: string
+ :param mfa_token: The value provided by the MFA device, if the trust
+ policy of the role being assumed requires MFA (that is, if the
+ policy includes a condition that tests for MFA). If the role being
+ assumed requires MFA and if the TokenCode value is missing or
+ expired, the AssumeRole call returns an "access denied" errror.
+ Minimum length of 6. Maximum length of 6.
+
"""
params = {
'RoleArn': role_arn,
@@ -339,6 +359,10 @@ class STSConnection(AWSQueryConnection):
params['DurationSeconds'] = duration_seconds
if external_id is not None:
params['ExternalId'] = external_id
+ if mfa_serial_number is not None:
+ params['SerialNumber'] = mfa_serial_number
+ if mfa_token is not None:
+ params['TokenCode'] = mfa_token
return self.get_object('AssumeRole', params, AssumedRole, verb='POST')
def assume_role_with_saml(self, role_arn, principal_arn, saml_assertion,
diff --git a/boto/utils.py b/boto/utils.py
index 853128fa..18d34f65 100644
--- a/boto/utils.py
+++ b/boto/utils.py
@@ -1025,3 +1025,12 @@ def merge_headers_by_name(name, headers):
matching_headers = find_matching_headers(name, headers)
return ','.join(str(headers[h]) for h in matching_headers
if headers[h] is not None)
+
+class RequestHook(object):
+ """
+ This can be extended and supplied to the connection object
+ to gain access to request and response object after the request completes.
+ One use for this would be to implement some specific request logging.
+ """
+ def handle_request_data(self, request, response, error=False):
+ pass
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 85afdb32..8bc776ae 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -34,6 +34,7 @@ Currently Supported Services
* :doc:`DynamoDB2 <dynamodb2_tut>` -- (:doc:`API Reference <ref/dynamodb2>`) -- (:doc:`Migration Guide from v1 <migrations/dynamodb_v1_to_v2>`)
* :doc:`DynamoDB <dynamodb_tut>` -- (:doc:`API Reference <ref/dynamodb>`)
+ * Relational Data Services 2 (RDS) -- (:doc:`API Reference <ref/rds2>`) -- (:doc:`Migration Guide from v1 <migrations/rds_v1_to_v2>`)
* :doc:`Relational Data Services (RDS) <rds_tut>` -- (:doc:`API Reference <ref/rds>`)
* ElastiCache -- (:doc:`API Reference <ref/elasticache>`)
* Redshift -- (:doc:`API Reference <ref/redshift>`)
@@ -116,6 +117,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.26.0
releasenotes/v2.25.0
releasenotes/v2.24.0
releasenotes/v2.23.0
@@ -194,6 +196,7 @@ Release Notes
support_tut
dynamodb2_tut
migrations/dynamodb_v1_to_v2
+ migrations/rds_v1_to_v2
apps_built_on_boto
ref/*
releasenotes/*
diff --git a/docs/source/migrations/rds_v1_to_v2.rst b/docs/source/migrations/rds_v1_to_v2.rst
new file mode 100644
index 00000000..944288cb
--- /dev/null
+++ b/docs/source/migrations/rds_v1_to_v2.rst
@@ -0,0 +1,91 @@
+.. rds_v1_to_v2:
+
+===============================
+Migrating from RDS v1 to RDS v2
+===============================
+
+The original ``boto.rds`` module has historically lagged quite far behind the
+service (at time of writing, almost 50% of the API calls are
+missing/out-of-date). To address this, the Boto core team has switched to
+a generated client for RDS (``boto.rds2.layer1.RDSConnection``).
+
+However, this generated variant is not backward-compatible with the older
+``boto.rds.RDSConnection``. This document is to help you update your code
+(as desired) to take advantage of the latest API calls.
+
+For the duration of the document, **RDS2Connection** refers to
+``boto.rds2.layer1.RDSConnection``, where **RDSConnection** refers to
+``boto.rds.RDSConnection``.
+
+
+Prominent Differences
+=====================
+
+* The new **RDS2Connection** maps very closely to the `official API operations`_,
+ where the old **RDSConnection** had non-standard & inconsistent method names.
+* **RDS2Connection** almost always returns a Python dictionary that maps
+ closely to the API output. **RDSConnection** returned Python objects.
+* **RDS2Connection** is much more verbose in terms of output. Tools like
+ `jmespath`_ or `jsonq`_ can make handling these sometimes complex dictionaries more
+ manageable.
+
+.. _`official API operations`: http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/Welcome.html
+.. _`jmespath`: https://github.com/boto/jmespath
+.. _`jsonq`: https://github.com/edmund-huber/jsonq
+
+
+Method Renames
+==============
+
+Format is ``old_method_name`` -> ``new_method_name``:
+
+* ``authorize_dbsecurity_group`` -> ``authorize_db_security_group_ingress``
+* ``create_dbinstance`` -> ``create_db_instance``
+* ``create_dbinstance_read_replica`` -> ``create_db_instance_read_replica``
+* ``create_parameter_group`` -> ``create_db_parameter_group``
+* ``get_all_dbsnapshots`` -> ``describe_db_snapshots``
+* ``get_all_events`` -> ``describe_events``
+* ``modify_dbinstance`` -> ``modify_db_instance``
+* ``reboot_dbinstance`` -> ``reboot_db_instance``
+* ``restore_dbinstance_from_dbsnapshot`` -> ``restore_db_instance_from_db_snapshot``
+* ``restore_dbinstance_from_point_in_time`` -> ``restore_db_instance_to_point_in_time``
+* ``revoke_dbsecurity_group`` -> ``revoke_db_security_group_ingress``
+
+
+Parameter Changes
+=================
+
+Many parameter names have changed between **RDSConnection** &
+**RDS2Connection**. For instance, the old name for the instance identifier was
+``id``, where the new name is ``db_instance_identifier``. These changes are to
+ensure things map more closely to the API.
+
+In addition, in some cases, ordering & required-ness of parameters has changed
+as well. For instance, in ``create_db_instance``, the
+``engine`` parameter is now required (previously defaulted to ``MySQL5.1``) &
+its position in the call has change to be before ``master_username``.
+
+As such, when updating your API calls, you should check the
+API Reference documentation to ensure you're passing the
+correct parameters.
+
+
+Return Values
+=============
+
+**RDSConnection** frequently returned higher-level Python objects. In contrast,
+**RDS2Connection** returns Python dictionaries of the data. This will require
+a bit more work to extract the necessary values. For example::
+
+ # Old
+ >>> instances = rds1_conn.get_all_dbinstances()
+ >>> inst = instances[0]
+ >>> inst.name
+ 'test-db'
+
+ # New
+ >>> instances = rds2_conn.describe_db_instances()
+ >>> inst = instances['DescribeDBInstancesResponse']\
+ ... ['DescribeDBInstancesResult']['DBInstances'][0]
+ >>> inst['DBName']
+ 'test-db'
diff --git a/docs/source/rds_tut.rst b/docs/source/rds_tut.rst
index 6955cbe3..e648528d 100644
--- a/docs/source/rds_tut.rst
+++ b/docs/source/rds_tut.rst
@@ -8,6 +8,15 @@ This tutorial focuses on the boto interface to the Relational Database Service
from Amazon Web Services. This tutorial assumes that you have boto already
downloaded and installed, and that you wish to setup a MySQL instance in RDS.
+.. warning::
+
+ This tutorial covers the **ORIGINAL** module for RDS.
+ It has since been supplanted by a second major version & an
+ updated API complete with all service operations. The documentation for the
+ new version of boto's support for RDS is at
+ :doc:`RDS v2 <ref/rds2>`.
+
+
Creating a Connection
---------------------
The first step in accessing RDS is to create a connection to the service.
diff --git a/docs/source/ref/rds2.rst b/docs/source/ref/rds2.rst
new file mode 100644
index 00000000..8c8121ec
--- /dev/null
+++ b/docs/source/ref/rds2.rst
@@ -0,0 +1,26 @@
+.. ref-rds2
+
+====
+RDS2
+====
+
+boto.rds2
+---------
+
+.. automodule:: boto.rds2
+ :members:
+ :undoc-members:
+
+boto.rds2.exceptions
+--------------------
+
+.. automodule:: boto.rds2.exceptions
+ :members:
+ :undoc-members:
+
+boto.rds2.layer1
+----------------
+
+.. automodule:: boto.rds2.layer1
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/swf.rst b/docs/source/ref/swf.rst
index 892bc07a..fbe1db0b 100644
--- a/docs/source/ref/swf.rst
+++ b/docs/source/ref/swf.rst
@@ -18,6 +18,13 @@ boto.swf.layer1
:members:
:undoc-members:
+boto.swf.layer1_decisions
+-------------------------
+
+.. automodule:: boto.swf.layer1_decisions
+ :members:
+ :undoc-members:
+
boto.swf.layer2
--------------------
diff --git a/docs/source/releasenotes/v2.26.0.rst b/docs/source/releasenotes/v2.26.0.rst
new file mode 100644
index 00000000..a586459c
--- /dev/null
+++ b/docs/source/releasenotes/v2.26.0.rst
@@ -0,0 +1,59 @@
+boto v2.26.0
+============
+
+:date: 2014/02/27
+
+This release adds support for MFA tokens in the AWS STS ``assume_role`` & the
+introduction of the ``boto.rds2`` module (which has full support for the entire
+RDS API). It also includes the addition of request hooks & many bugfixes.
+
+
+Changes
+-------
+
+* Added support for MFA in STS AssumeRole. (:sha:`899810c`)
+* Fixed how DynamoDB v2 works with Global Secondary Indexes. (:issue:`2122`,
+ :sha:`f602c95`)
+* Add request hooks and request logger. (:issue:`2125`, :sha:`e8b20fe`)
+* Don't pull the security token from the environment or config when a caller
+ supplies the access key and secret. (:issue:`2123`, :sha:`4df1694`)
+* Read EvaluateTargetHealth from Route53 resource record set. (:issue:`2120`,
+ :sha:`0a97158`)
+* Prevent implicit string decode in hmac-v4 handlers. (:issue:`2037`,
+ :issue:`2033`, :sha:`8e56a5f`)
+* Updated Datapipeline to include all current regions. (:issue:`2121`,
+ :sha:`dff5e3e`)
+* Bug fix for Google Storage generate_url authentication. (:issue:`2116`,
+ :issue:`2108`, :sha:`5a50932`)
+* Handle JSON error responses in BotoServerError. (:issue:`2113`, :issue:`2077`,
+ :sha:`221085e`)
+* Corrected a typo in SQS tutorial. (:issue:`2114`, :sha:`7ed41f7`)
+* Add CloudFormation template capabilities support. (:issue:`2111`,
+ :issue:`2075`, :sha:`65a4323`)
+* Add SWF layer1_decisions to docs. (:issue:`2110`, :issue:`2062`,
+ :sha:`6039cc9`)
+* Add support for request intervals in health checks. (:issue:`2109`,
+ :sha:`660b01a`)
+* Added checks for invalid regions to the ``bin`` scripts (:issue:`2107`,
+ :sha:`bbb9f1e`)
+* Better error output for unknown region - (:issue:`2041`, :issue:`1983`,
+ :sha:`cd63f92`)
+* Added certificate tests for CloudTrail. (:issue:`2106`, :sha:`a7e9b4c`)
+* Updated Kinesis endpoints. (:sha:`7bd4b6e`)
+* Finished implementation of RDS's DescribeDBLogFiles. (:issue:`2084`,
+ :sha:`f3c706c`)
+* Added support for RDS log file downloading. (:issue:`2086`, :issue:`1993`,
+ :sha:`4c51841`)
+* Added some unit tests for CloudFront. (:issue:`2076`, :sha:`6c46b1d`)
+* GS should ignore restore_headers as they are never set. (:issue:`2067`,
+ :sha:`f02aeb3`)
+* Update CloudFormation to support the latest API. (:issue:`2101`,
+ :sha:`ea1b1b6`)
+* Added Route53 health checks. (:issue:`2054`, :sha:`9028f7d`)
+* Merge branch 'rds2' into develop Fixes #2097. (:issue:`2097`, :sha:`6843c16`)
+* Fix Param class convert method (:issue:`2094`, :sha:`5cd4598`)
+* Added support for Route53 aliasing. (:issue:`2096`, :sha:`df5fa40`)
+* Removed the dependence on ``example.com`` within the Route53 tests.
+ (:issue:`2098`, :sha:`6ce9e0f`)
+* Fixed ``has_item`` support in DynamoDB v2. (:issue:`2090`, :sha:`aada5d3`)
+* Fix a little typo bug in the S3 tutorial. (:issue:`2088`, :sha:`c091d27`)
diff --git a/docs/source/s3_tut.rst b/docs/source/s3_tut.rst
index fc5fd27a..9db92211 100644
--- a/docs/source/s3_tut.rst
+++ b/docs/source/s3_tut.rst
@@ -168,7 +168,7 @@ Once a bucket exists, you can access it by getting the bucket. For example::
>>> mybucket = conn.get_bucket('mybucket') # Substitute in your bucket name
>>> mybucket.list()
- <listing of keys in the bucket)
+ ...listing of keys in the bucket...
By default, this method tries to validate the bucket's existence. You can
override this behavior by passing ``validate=False``.::
diff --git a/docs/source/sqs_tut.rst b/docs/source/sqs_tut.rst
index f4ea0254..cd10041a 100644
--- a/docs/source/sqs_tut.rst
+++ b/docs/source/sqs_tut.rst
@@ -16,7 +16,7 @@ The recommended method of doing this is as follows::
>>> import boto.sqs
>>> conn = boto.sqs.connect_to_region(
... "us-west-2",
- ... aws_access_key_id='<aws access key'>,
+ ... aws_access_key_id='<aws access key>',
... aws_secret_access_key='<aws secret key>')
At this point the variable conn will point to an SQSConnection object in the
diff --git a/setup.py b/setup.py
index 4ae4f595..7e39e93a 100644
--- a/setup.py
+++ b/setup.py
@@ -75,7 +75,7 @@ setup(name = "boto",
"boto.beanstalk", "boto.datapipeline", "boto.elasticache",
"boto.elastictranscoder", "boto.opsworks", "boto.redshift",
"boto.dynamodb2", "boto.support", "boto.cloudtrail",
- "boto.directconnect", "boto.kinesis"],
+ "boto.directconnect", "boto.kinesis", "boto.rds2"],
package_data = {
"boto.cacerts": ["cacerts.txt"],
"boto": ["endpoints.json"],
diff --git a/tests/integration/cloudformation/test_connection.py b/tests/integration/cloudformation/test_connection.py
index 9152aa12..6529cc37 100644
--- a/tests/integration/cloudformation/test_connection.py
+++ b/tests/integration/cloudformation/test_connection.py
@@ -105,6 +105,18 @@ class TestCloudformationConnection(unittest.TestCase):
template_body=json.dumps(BASIC_EC2_TEMPLATE))
self.addCleanup(self.connection.delete_stack, self.stack_name)
+ # A newly created stack should have events
+ events = self.connection.describe_stack_events(self.stack_name)
+ self.assertTrue(events)
+
+ # No policy should be set on the stack by default
+ policy = self.connection.get_stack_policy(self.stack_name)
+ self.assertEqual(None, policy)
+
+ # Our new stack should show up in the stack list
+ stacks = self.connection.describe_stacks()
+ self.assertEqual(self.stack_name, stacks[0].stack_name)
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/integration/cloudtrail/test_cert_verification.py b/tests/integration/cloudtrail/test_cert_verification.py
new file mode 100644
index 00000000..321efea6
--- /dev/null
+++ b/tests/integration/cloudtrail/test_cert_verification.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+import unittest
+
+from tests.integration import ServiceCertVerificationTest
+
+import boto.cloudtrail
+
+
+class CloudTrailCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
+ cloudtrail = True
+ regions = boto.cloudtrail.regions()
+
+ def sample_service_call(self, conn):
+ conn.describe_trails()
diff --git a/tests/integration/datapipeline/__init__.py b/tests/integration/datapipeline/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/datapipeline/__init__.py
diff --git a/tests/integration/datapipeline/test_cert_verification.py b/tests/integration/datapipeline/test_cert_verification.py
new file mode 100644
index 00000000..e30a433f
--- /dev/null
+++ b/tests/integration/datapipeline/test_cert_verification.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+import unittest
+
+from tests.integration import ServiceCertVerificationTest
+
+import boto.datapipeline
+
+
+class DatapipelineCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
+ datapipeline = True
+ regions = boto.datapipeline.regions()
+
+ def sample_service_call(self, conn):
+ conn.list_pipelines()
diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py
index 40ddd573..1e3335b7 100644
--- a/tests/integration/dynamodb2/test_highlevel.py
+++ b/tests/integration/dynamodb2/test_highlevel.py
@@ -31,7 +31,7 @@ import time
from tests.unit import unittest
from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import (HashKey, RangeKey, KeysOnlyIndex,
- GlobalKeysOnlyIndex)
+ GlobalKeysOnlyIndex, GlobalIncludeIndex)
from boto.dynamodb2.items import Item
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import NUMBER
@@ -109,6 +109,14 @@ class DynamoDBv2Test(unittest.TestCase):
time.sleep(5)
+ # Does it exist? It should?
+ self.assertTrue(users.has_item(username='jane', friend_count=3))
+ # But this shouldn't be there...
+ self.assertFalse(users.has_item(
+ username='mrcarmichaeljones',
+ friend_count=72948
+ ))
+
# Test getting an item & updating it.
# This is the "safe" variant (only write if there have been no
# changes).
@@ -223,6 +231,16 @@ class DynamoDBv2Test(unittest.TestCase):
self.assertTrue(res['username'] in ['johndoe',])
self.assertEqual(res.keys(), ['username'])
+ # Ensure that queries with attributes don't return the hash key.
+ results = users.query(
+ username__eq='johndoe',
+ friend_count__eq=4,
+ attributes=('first_name',)
+ )
+
+ for res in results:
+ self.assertTrue(res['first_name'] in ['John',])
+ self.assertEqual(res.keys(), ['first_name'])
# Test the strongly consistent query.
c_results = users.query(
@@ -395,7 +413,64 @@ class DynamoDBv2Test(unittest.TestCase):
)
# Wait again for the changes to finish propagating.
- time.sleep(120)
+ time.sleep(150)
+
+ def test_gsi_with_just_hash_key(self):
+ # GSI allows for querying off of different keys. This is behavior we
+ # previously disallowed (due to standard & LSI queries).
+ # See https://forums.aws.amazon.com/thread.jspa?threadID=146212&tstart=0
+ users = Table.create('gsi_query_users', schema=[
+ HashKey('user_id')
+ ], throughput={
+ 'read': 5,
+ 'write': 3,
+ },
+ global_indexes=[
+ GlobalIncludeIndex('UsernameIndex', parts=[
+ HashKey('username'),
+ ], includes=['user_id', 'username'], throughput={
+ 'read': 3,
+ 'write': 1,
+ })
+ ])
+ self.addCleanup(users.delete)
+
+ # Wait for it.
+ time.sleep(60)
+
+ users.put_item(data={
+ 'user_id': '7',
+ 'username': 'johndoe',
+ 'first_name': 'John',
+ 'last_name': 'Doe',
+ })
+ users.put_item(data={
+ 'user_id': '24',
+ 'username': 'alice',
+ 'first_name': 'Alice',
+ 'last_name': 'Expert',
+ })
+ users.put_item(data={
+ 'user_id': '35',
+ 'username': 'jane',
+ 'first_name': 'Jane',
+ 'last_name': 'Doe',
+ })
+
+ # Try the main key. Should be fine.
+ rs = users.query(
+ user_id__eq='24'
+ )
+ results = sorted([user['username'] for user in rs])
+ self.assertEqual(results, ['alice'])
+
+ # Now try the GSI. Also should work.
+ rs = users.query(
+ username__eq='johndoe',
+ index='UsernameIndex'
+ )
+ results = sorted([user['username'] for user in rs])
+ self.assertEqual(results, ['johndoe'])
def test_query_with_limits(self):
# Per the DDB team, it's recommended to do many smaller gets with a
diff --git a/tests/integration/gs/test_basic.py b/tests/integration/gs/test_basic.py
index ffc890ff..f2fc9ec0 100644
--- a/tests/integration/gs/test_basic.py
+++ b/tests/integration/gs/test_basic.py
@@ -31,6 +31,7 @@ Some integration tests for the GSConnection
import os
import re
import StringIO
+import urllib
import xml.sax
from boto import handler
@@ -99,6 +100,11 @@ class GSBasicTest(GSTestCase):
# check to make sure content read from gcs is identical to original
self.assertEqual(s1, fp.read())
fp.close()
+ # Use generate_url to get the contents
+ url = self._conn.generate_url(900, 'GET', bucket=bucket.name, key=key_name)
+ f = urllib.urlopen(url)
+ self.assertEqual(s1, f.read())
+ f.close()
# check to make sure set_contents_from_file is working
sfp = StringIO.StringIO('foo')
k.set_contents_from_file(sfp)
diff --git a/tests/integration/kinesis/test_cert_verification.py b/tests/integration/kinesis/test_cert_verification.py
new file mode 100644
index 00000000..522778f8
--- /dev/null
+++ b/tests/integration/kinesis/test_cert_verification.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+import unittest
+
+from tests.integration import ServiceCertVerificationTest
+
+import boto.kinesis
+
+
+class KinesisCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
+ kinesis = True
+ regions = boto.kinesis.regions()
+
+ def sample_service_call(self, conn):
+ conn.list_streams()
diff --git a/tests/integration/rds2/__init__.py b/tests/integration/rds2/__init__.py
new file mode 100644
index 00000000..b7fe4c22
--- /dev/null
+++ b/tests/integration/rds2/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
diff --git a/tests/integration/rds2/test_cert_verification.py b/tests/integration/rds2/test_cert_verification.py
new file mode 100644
index 00000000..5ad56356
--- /dev/null
+++ b/tests/integration/rds2/test_cert_verification.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+import unittest
+
+from tests.integration import ServiceCertVerificationTest
+
+import boto.rds2
+
+
+class RDSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
+ rds = True
+ regions = boto.rds2.regions()
+
+ def sample_service_call(self, conn):
+ conn.describe_db_instances()
diff --git a/tests/integration/rds2/test_connection.py b/tests/integration/rds2/test_connection.py
new file mode 100644
index 00000000..3d54a9e5
--- /dev/null
+++ b/tests/integration/rds2/test_connection.py
@@ -0,0 +1,87 @@
+# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import unittest
+import time
+from boto.rds2.layer1 import RDSConnection
+
+
+class TestRDS2Connection(unittest.TestCase):
+ rds = True
+
+ def setUp(self):
+ self.conn = RDSConnection()
+ self.db_name = "test-db-%s" % str(int(time.time()))
+
+ def test_integration(self):
+ resp = self.conn.create_db_instance(
+ db_instance_identifier=self.db_name,
+ allocated_storage=5,
+ db_instance_class='db.t1.micro',
+ engine='postgres',
+ master_username='bototestuser',
+ master_user_password='testtestt3st',
+ # Try to limit the impact & test options.
+ multi_az=False,
+ backup_retention_period=0
+ )
+ self.addCleanup(
+ self.conn.delete_db_instance,
+ self.db_name,
+ skip_final_snapshot=True
+ )
+
+ # Wait for 6 minutes for it to come up.
+ time.sleep(60 * 6)
+
+ instances = self.conn.describe_db_instances(self.db_name)
+ inst = instances['DescribeDBInstancesResponse']\
+ ['DescribeDBInstancesResult']['DBInstances'][0]
+ self.assertEqual(inst['DBInstanceStatus'], 'available')
+ self.assertEqual(inst['Engine'], 'postgres')
+ self.assertEqual(inst['AllocatedStorage'], 5)
+
+ # Try renaming it.
+ resp = self.conn.modify_db_instance(
+ self.db_name,
+ allocated_storage=10,
+ apply_immediately=True
+ )
+
+ # Give it a chance to start modifying...
+ time.sleep(60)
+
+ instances = self.conn.describe_db_instances(self.db_name)
+ inst = instances['DescribeDBInstancesResponse']\
+ ['DescribeDBInstancesResult']['DBInstances'][0]
+ self.assertEqual(inst['DBInstanceStatus'], 'modifying')
+ self.assertEqual(inst['Engine'], 'postgres')
+
+ # ...then finish the remainder of 10 minutes for the change.
+ time.sleep(60 * 9)
+
+ instances = self.conn.describe_db_instances(self.db_name)
+ inst = instances['DescribeDBInstancesResponse']\
+ ['DescribeDBInstancesResult']['DBInstances'][0]
+ self.assertEqual(inst['DBInstanceStatus'], 'available')
+ self.assertEqual(inst['Engine'], 'postgres')
+ self.assertEqual(inst['AllocatedStorage'], 10)
diff --git a/tests/integration/route53/__init__.py b/tests/integration/route53/__init__.py
index fc0f80de..3953ad05 100644
--- a/tests/integration/route53/__init__.py
+++ b/tests/integration/route53/__init__.py
@@ -1,4 +1,5 @@
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2014 Tellybug, Matt Millar
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -18,3 +19,20 @@
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+
+import time
+import unittest
+from nose.plugins.attrib import attr
+from boto.route53.connection import Route53Connection
+
+@attr(route53=True)
+class Route53TestCase(unittest.TestCase):
+ def setUp(self):
+ super(Route53TestCase, self).setUp()
+ self.conn = Route53Connection()
+ self.base_domain = 'boto-test-%s.com' % str(int(time.time()))
+ self.zone = self.conn.create_zone(self.base_domain)
+
+ def tearDown(self):
+ self.zone.delete()
+ super(Route53TestCase, self).tearDown()
diff --git a/tests/integration/route53/test_alias_resourcerecordsets.py b/tests/integration/route53/test_alias_resourcerecordsets.py
new file mode 100644
index 00000000..b5176715
--- /dev/null
+++ b/tests/integration/route53/test_alias_resourcerecordsets.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2014 Netflix, Inc. Stefan Praszalowicz
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import time
+import unittest
+from boto.route53.connection import Route53Connection
+from boto.route53.record import ResourceRecordSets
+from boto.route53.exception import DNSServerError
+
+
+class TestRoute53AliasResourceRecordSets(unittest.TestCase):
+ route53 = True
+
+ def setUp(self):
+ super(TestRoute53AliasResourceRecordSets, self).setUp()
+ self.conn = Route53Connection()
+ self.base_domain = 'boto-test-%s.com' % str(int(time.time()))
+ self.zone = self.conn.create_zone(self.base_domain)
+ # a standard record to use as the target for our alias
+ self.zone.add_a('target.%s' % self.base_domain, '102.11.23.1')
+
+ def tearDown(self):
+ self.zone.delete_a('target.%s' % self.base_domain)
+ self.zone.delete()
+ super(TestRoute53AliasResourceRecordSets, self).tearDown()
+
+ def test_incomplete_add_alias_failure(self):
+ base_record = dict(name="alias.%s." % self.base_domain,
+ type="A",
+ alias_dns_name="target.%s" % self.base_domain,
+ alias_hosted_zone_id=self.zone.id,
+ identifier="boto:TestRoute53AliasResourceRecordSets")
+
+ rrs = ResourceRecordSets(self.conn, self.zone.id)
+ rrs.add_change(action="UPSERT", **base_record)
+
+ try:
+ self.assertRaises(DNSServerError, rrs.commit)
+ except:
+ # if the call somehow goes through, delete our unexpected new record before failing test
+ rrs = ResourceRecordSets(self.conn, self.zone.id)
+ rrs.add_change(action="DELETE", **base_record)
+ rrs.commit()
+ raise
+
+ def test_add_alias(self):
+ base_record = dict(name="alias.%s." % self.base_domain,
+ type="A",
+ alias_evaluate_target_health=False,
+ alias_dns_name="target.%s" % self.base_domain,
+ alias_hosted_zone_id=self.zone.id,
+ identifier="boto:TestRoute53AliasResourceRecordSets")
+
+ rrs = ResourceRecordSets(self.conn, self.zone.id)
+ rrs.add_change(action="UPSERT", **base_record)
+ rrs.commit()
+
+ rrs = ResourceRecordSets(self.conn, self.zone.id)
+ rrs.add_change(action="DELETE", **base_record)
+ rrs.commit()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/integration/route53/test_cert_verification.py b/tests/integration/route53/test_cert_verification.py
index bc36a186..2c209f57 100644
--- a/tests/integration/route53/test_cert_verification.py
+++ b/tests/integration/route53/test_cert_verification.py
@@ -25,12 +25,14 @@
Check that all of the certs on all service endpoints validate.
"""
import unittest
+from nose.plugins.attrib import attr
from tests.integration import ServiceCertVerificationTest
import boto.route53
+@attr(route53=True)
class Route53CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
route53 = True
regions = boto.route53.regions()
diff --git a/tests/integration/route53/test_health_check.py b/tests/integration/route53/test_health_check.py
new file mode 100644
index 00000000..e662da3b
--- /dev/null
+++ b/tests/integration/route53/test_health_check.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2014 Tellybug, Matt Millar
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from tests.integration.route53 import Route53TestCase
+
+from boto.route53.healthcheck import HealthCheck
+from boto.route53.record import ResourceRecordSets
+
+class TestRoute53HealthCheck(Route53TestCase):
+ def test_create_health_check(self):
+ hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing")
+ result = self.conn.create_health_check(hc)
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTP')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing')
+ self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id'])
+
+ def test_create_https_health_check(self):
+ hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTPS", resource_path="/testing")
+ result = self.conn.create_health_check(hc)
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing')
+ self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id'])
+
+
+ def test_create_and_list_health_check(self):
+ hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing")
+ result1 = self.conn.create_health_check(hc)
+ hc = HealthCheck(ip_addr="54.217.7.119", port=80, hc_type="HTTP", resource_path="/testing")
+ result2 = self.conn.create_health_check(hc)
+ result = self.conn.get_list_health_checks()
+ self.assertTrue(len(result['ListHealthChecksResponse']['HealthChecks']) > 1)
+ self.conn.delete_health_check(result1['CreateHealthCheckResponse']['HealthCheck']['Id'])
+ self.conn.delete_health_check(result2['CreateHealthCheckResponse']['HealthCheck']['Id'])
+
+ def test_delete_health_check(self):
+ hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing")
+ result = self.conn.create_health_check(hc)
+ hc_id = result['CreateHealthCheckResponse']['HealthCheck']['Id']
+ result = self.conn.get_list_health_checks()
+ found = False
+ for hc in result['ListHealthChecksResponse']['HealthChecks']:
+ if hc['Id'] == hc_id:
+ found = True
+ break
+ self.assertTrue(found)
+ result = self.conn.delete_health_check(hc_id)
+ result = self.conn.get_list_health_checks()
+ for hc in result['ListHealthChecksResponse']['HealthChecks']:
+ self.assertFalse(hc['Id'] == hc_id)
+
+ def test_create_health_check_string_match(self):
+ hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP_STR_MATCH", resource_path="/testing", string_match="test")
+ result = self.conn.create_health_check(hc)
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTP_STR_MATCH')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'SearchString'], 'test')
+ self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id'])
+
+ def test_create_health_check_https_string_match(self):
+ hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTPS_STR_MATCH", resource_path="/testing", string_match="test")
+ result = self.conn.create_health_check(hc)
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS_STR_MATCH')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'SearchString'], 'test')
+ self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id'])
+
+ def test_create_resource_record_set(self):
+ hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing")
+ result = self.conn.create_health_check(hc)
+ records = ResourceRecordSets(
+ connection=self.conn, hosted_zone_id=self.zone.id, comment='Create DNS entry for test')
+ change = records.add_change('CREATE', 'unittest.%s.' % self.base_domain, 'A', ttl=30, identifier='test',
+ weight=1, health_check=result['CreateHealthCheckResponse']['HealthCheck']['Id'])
+ change.add_value("54.217.7.118")
+ records.commit()
+
+ records = ResourceRecordSets(self.conn, self.zone.id)
+ deleted = records.add_change('DELETE', "unittest.%s." % self.base_domain, "A", ttl=30, identifier='test',
+ weight=1, health_check=result['CreateHealthCheckResponse']['HealthCheck']['Id'])
+ deleted.add_value('54.217.7.118')
+ records.commit()
+
+ def test_create_health_check_invalid_request_interval(self):
+ """Test that health checks cannot be created with an invalid
+ 'request_interval'.
+
+ """
+ with self.assertRaises(AttributeError):
+ HealthCheck(**self.health_check_params(request_interval=5))
+
+ def test_create_health_check_request_interval(self):
+ hc_params = self.health_check_params(request_interval=10)
+ hc = HealthCheck(**hc_params)
+ result = self.conn.create_health_check(hc)
+ hc_config = (result[u'CreateHealthCheckResponse']
+ [u'HealthCheck'][u'HealthCheckConfig'])
+ self.assertEquals(hc_config[u'RequestInterval'],
+ unicode(hc_params['request_interval']))
+
+ def health_check_params(self, **kwargs):
+ params = {
+ 'ip_addr': "54.217.7.118",
+ 'port': 80,
+ 'hc_type': 'HTTP',
+ 'resource_path': '/testing',
+ }
+ params.update(kwargs)
+ return params
diff --git a/tests/integration/route53/test_resourcerecordsets.py b/tests/integration/route53/test_resourcerecordsets.py
index 9c8f3b22..dcd6d1aa 100644
--- a/tests/integration/route53/test_resourcerecordsets.py
+++ b/tests/integration/route53/test_resourcerecordsets.py
@@ -20,30 +20,23 @@
# IN THE SOFTWARE.
#
+import time
import unittest
+from tests.integration.route53 import Route53TestCase
+
from boto.route53.connection import Route53Connection
from boto.route53.record import ResourceRecordSets
-
-class TestRoute53ResourceRecordSets(unittest.TestCase):
- def setUp(self):
- super(TestRoute53ResourceRecordSets, self).setUp()
- self.conn = Route53Connection()
- self.zone = self.conn.create_zone('example.com')
-
- def tearDown(self):
- self.zone.delete()
- super(TestRoute53ResourceRecordSets, self).tearDown()
-
+class TestRoute53ResourceRecordSets(Route53TestCase):
def test_add_change(self):
rrs = ResourceRecordSets(self.conn, self.zone.id)
- created = rrs.add_change("CREATE", "vpn.example.com.", "A")
+ created = rrs.add_change("CREATE", "vpn.%s." % self.base_domain, "A")
created.add_value('192.168.0.25')
rrs.commit()
rrs = ResourceRecordSets(self.conn, self.zone.id)
- deleted = rrs.add_change('DELETE', "vpn.example.com.", "A")
+ deleted = rrs.add_change('DELETE', "vpn.%s." % self.base_domain, "A")
deleted.add_value('192.168.0.25')
rrs.commit()
@@ -52,7 +45,7 @@ class TestRoute53ResourceRecordSets(unittest.TestCase):
hosts = 101
for hostid in range(hosts):
- rec = "test" + str(hostid) + ".example.com"
+ rec = "test" + str(hostid) + ".%s" % self.base_domain
created = rrs.add_change("CREATE", rec, "A")
ip = '192.168.0.' + str(hostid)
created.add_value(ip)
@@ -79,7 +72,7 @@ class TestRoute53ResourceRecordSets(unittest.TestCase):
# Cleanup indivual records
rrs = ResourceRecordSets(self.conn, self.zone.id)
for hostid in range(hosts):
- rec = "test" + str(hostid) + ".example.com"
+ rec = "test" + str(hostid) + ".%s" % self.base_domain
deleted = rrs.add_change("DELETE", rec, "A")
ip = '192.168.0.' + str(hostid)
deleted.add_value(ip)
diff --git a/tests/integration/route53/test_zone.py b/tests/integration/route53/test_zone.py
index 1c4d6bef..449c93f2 100644
--- a/tests/integration/route53/test_zone.py
+++ b/tests/integration/route53/test_zone.py
@@ -22,63 +22,89 @@
# IN THE SOFTWARE.
#
+import time
import unittest
+from nose.plugins.attrib import attr
from boto.route53.connection import Route53Connection
from boto.exception import TooManyRecordsException
+@attr(route53=True)
class TestRoute53Zone(unittest.TestCase):
@classmethod
def setUpClass(self):
route53 = Route53Connection()
- zone = route53.get_zone('example.com')
+ self.base_domain = 'boto-test-%s.com' % str(int(time.time()))
+ zone = route53.get_zone(self.base_domain)
if zone is not None:
zone.delete()
- self.zone = route53.create_zone('example.com')
+ self.zone = route53.create_zone(self.base_domain)
def test_nameservers(self):
self.zone.get_nameservers()
def test_a(self):
- self.zone.add_a('example.com', '102.11.23.1', 80)
- record = self.zone.get_a('example.com')
- self.assertEquals(record.name, u'example.com.')
+ self.zone.add_a(self.base_domain, '102.11.23.1', 80)
+ record = self.zone.get_a(self.base_domain)
+ self.assertEquals(record.name, u'%s.' % self.base_domain)
self.assertEquals(record.resource_records, [u'102.11.23.1'])
self.assertEquals(record.ttl, u'80')
- self.zone.update_a('example.com', '186.143.32.2', '800')
- record = self.zone.get_a('example.com')
- self.assertEquals(record.name, u'example.com.')
+ self.zone.update_a(self.base_domain, '186.143.32.2', '800')
+ record = self.zone.get_a(self.base_domain)
+ self.assertEquals(record.name, u'%s.' % self.base_domain)
self.assertEquals(record.resource_records, [u'186.143.32.2'])
self.assertEquals(record.ttl, u'800')
def test_cname(self):
- self.zone.add_cname('www.example.com', 'webserver.example.com', 200)
- record = self.zone.get_cname('www.example.com')
- self.assertEquals(record.name, u'www.example.com.')
- self.assertEquals(record.resource_records, [u'webserver.example.com.'])
+ self.zone.add_cname(
+ 'www.%s' % self.base_domain,
+ 'webserver.%s' % self.base_domain,
+ 200
+ )
+ record = self.zone.get_cname('www.%s' % self.base_domain)
+ self.assertEquals(record.name, u'www.%s.' % self.base_domain)
+ self.assertEquals(record.resource_records, [
+ u'webserver.%s.' % self.base_domain
+ ])
self.assertEquals(record.ttl, u'200')
- self.zone.update_cname('www.example.com', 'web.example.com', 45)
- record = self.zone.get_cname('www.example.com')
- self.assertEquals(record.name, u'www.example.com.')
- self.assertEquals(record.resource_records, [u'web.example.com.'])
+ self.zone.update_cname(
+ 'www.%s' % self.base_domain,
+ 'web.%s' % self.base_domain,
+ 45
+ )
+ record = self.zone.get_cname('www.%s' % self.base_domain)
+ self.assertEquals(record.name, u'www.%s.' % self.base_domain)
+ self.assertEquals(record.resource_records, [
+ u'web.%s.' % self.base_domain
+ ])
self.assertEquals(record.ttl, u'45')
def test_mx(self):
- self.zone.add_mx('example.com',
- ['10 mx1.example.com', '20 mx2.example.com'],
- 1000)
- record = self.zone.get_mx('example.com')
+ self.zone.add_mx(
+ self.base_domain,
+ [
+ '10 mx1.%s' % self.base_domain,
+ '20 mx2.%s' % self.base_domain,
+ ],
+ 1000
+ )
+ record = self.zone.get_mx(self.base_domain)
self.assertEquals(set(record.resource_records),
- set([u'10 mx1.example.com.',
- u'20 mx2.example.com.']))
+ set([u'10 mx1.%s.' % self.base_domain,
+ u'20 mx2.%s.' % self.base_domain]))
self.assertEquals(record.ttl, u'1000')
- self.zone.update_mx('example.com',
- ['10 mail1.example.com', '20 mail2.example.com'],
- 50)
- record = self.zone.get_mx('example.com')
+ self.zone.update_mx(
+ self.base_domain,
+ [
+ '10 mail1.%s' % self.base_domain,
+ '20 mail2.%s' % self.base_domain,
+ ],
+ 50
+ )
+ record = self.zone.get_mx(self.base_domain)
self.assertEquals(set(record.resource_records),
- set([u'10 mail1.example.com.',
- '20 mail2.example.com.']))
+ set([u'10 mail1.%s.' % self.base_domain,
+ '20 mail2.%s.' % self.base_domain]))
self.assertEquals(record.ttl, u'50')
def test_get_records(self):
@@ -92,40 +118,48 @@ class TestRoute53Zone(unittest.TestCase):
route53.get_zones()
def test_identifiers_wrrs(self):
- self.zone.add_a('wrr.example.com', '1.2.3.4',
+ self.zone.add_a('wrr.%s' % self.base_domain, '1.2.3.4',
identifier=('foo', '20'))
- self.zone.add_a('wrr.example.com', '5.6.7.8',
+ self.zone.add_a('wrr.%s' % self.base_domain, '5.6.7.8',
identifier=('bar', '10'))
- wrrs = self.zone.find_records('wrr.example.com', 'A', all=True)
+ wrrs = self.zone.find_records(
+ 'wrr.%s' % self.base_domain,
+ 'A',
+ all=True
+ )
self.assertEquals(len(wrrs), 2)
- self.zone.delete_a('wrr.example.com', all=True)
+ self.zone.delete_a('wrr.%s' % self.base_domain, all=True)
def test_identifiers_lbrs(self):
- self.zone.add_a('lbr.example.com', '4.3.2.1',
+ self.zone.add_a('lbr.%s' % self.base_domain, '4.3.2.1',
identifier=('baz', 'us-east-1'))
- self.zone.add_a('lbr.example.com', '8.7.6.5',
+ self.zone.add_a('lbr.%s' % self.base_domain, '8.7.6.5',
identifier=('bam', 'us-west-1'))
- lbrs = self.zone.find_records('lbr.example.com', 'A', all=True)
+ lbrs = self.zone.find_records(
+ 'lbr.%s' % self.base_domain,
+ 'A',
+ all=True
+ )
self.assertEquals(len(lbrs), 2)
- self.zone.delete_a('lbr.example.com',
+ self.zone.delete_a('lbr.%s' % self.base_domain,
identifier=('bam', 'us-west-1'))
- self.zone.delete_a('lbr.example.com',
+ self.zone.delete_a('lbr.%s' % self.base_domain,
identifier=('baz', 'us-east-1'))
def test_toomany_exception(self):
- self.zone.add_a('exception.example.com', '4.3.2.1',
+ self.zone.add_a('exception.%s' % self.base_domain, '4.3.2.1',
identifier=('baz', 'us-east-1'))
- self.zone.add_a('exception.example.com', '8.7.6.5',
+ self.zone.add_a('exception.%s' % self.base_domain, '8.7.6.5',
identifier=('bam', 'us-west-1'))
with self.assertRaises(TooManyRecordsException):
- lbrs = self.zone.get_a('exception.example.com')
- self.zone.delete_a('exception.example.com', all=True)
+ lbrs = self.zone.get_a('exception.%s' % self.base_domain)
+ self.zone.delete_a('exception.%s' % self.base_domain, all=True)
@classmethod
def tearDownClass(self):
- self.zone.delete_a('example.com')
- self.zone.delete_cname('www.example.com')
- self.zone.delete_mx('example.com')
+ self.zone.delete_a(self.base_domain)
+ self.zone.delete_cname('www.%s' % self.base_domain)
+ self.zone.delete_mx(self.base_domain)
self.zone.delete()
if __name__ == '__main__':
diff --git a/tests/integration/sts/test_session_token.py b/tests/integration/sts/test_session_token.py
index d47071d9..99e613ff 100644
--- a/tests/integration/sts/test_session_token.py
+++ b/tests/integration/sts/test_session_token.py
@@ -33,7 +33,7 @@ from boto.sts.credentials import Credentials
from boto.s3.connection import S3Connection
-class SessionTokenTest (unittest.TestCase):
+class SessionTokenTest(unittest.TestCase):
sts = True
def test_session_token(self):
diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py
index a52cf44a..a7fb0a30 100644
--- a/tests/unit/auth/test_sigv4.py
+++ b/tests/unit/auth/test_sigv4.py
@@ -76,6 +76,18 @@ class TestSigV4Handler(unittest.TestCase):
query_string = auth.canonical_query_string(request)
self.assertEqual(query_string, 'Foo.1=aaa&Foo.10=zzz')
+ def test_query_string(self):
+ auth = HmacAuthV4Handler('sns.us-east-1.amazonaws.com',
+ Mock(), self.provider)
+ params = {
+ 'Message': u'We \u2665 utf-8'.encode('utf-8'),
+ }
+ request = HTTPRequest(
+ 'POST', 'https', 'sns.us-east-1.amazonaws.com', 443,
+ '/', None, params, {}, '')
+ query_string = auth.query_string(request)
+ self.assertEqual(query_string, 'Message=We%20%E2%99%A5%20utf-8')
+
def test_canonical_uri(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
Mock(), self.provider)
diff --git a/tests/unit/cloudformation/test_connection.py b/tests/unit/cloudformation/test_connection.py
index 766e6f1c..9ca5e3d3 100755..100644
--- a/tests/unit/cloudformation/test_connection.py
+++ b/tests/unit/cloudformation/test_connection.py
@@ -11,6 +11,7 @@ from mock import Mock
from tests.unit import AWSMockServiceTestCase
from boto.cloudformation.connection import CloudFormationConnection
+from boto.exception import BotoServerError
SAMPLE_TEMPLATE = r"""
@@ -108,12 +109,24 @@ class TestCloudFormationCreateStack(CloudFormationConnectionBase):
def test_create_stack_fails(self):
self.set_http_response(status_code=400, reason='Bad Request',
- body='Invalid arg.')
- with self.assertRaises(self.service_connection.ResponseError):
+ body='{"Error": {"Code": 1, "Message": "Invalid arg."}}')
+ with self.assertRaisesRegexp(self.service_connection.ResponseError,
+ 'Invalid arg.'):
api_response = self.service_connection.create_stack(
'stack_name', template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')])
+ def test_create_stack_fail_error(self):
+ self.set_http_response(status_code=400, reason='Bad Request',
+ body='{"RequestId": "abc", "Error": {"Code": 1, "Message": "Invalid arg."}}')
+ try:
+ api_response = self.service_connection.create_stack(
+ 'stack_name', template_body=SAMPLE_TEMPLATE,
+ parameters=[('KeyName', 'myKeyName')])
+ except BotoServerError, e:
+ self.assertEqual('abc', e.request_id)
+ self.assertEqual(1, e.error_code)
+ self.assertEqual('Invalid arg.', e.message)
class TestCloudFormationUpdateStack(CloudFormationConnectionBase):
def default_body(self):
@@ -569,6 +582,10 @@ class TestCloudFormationValidateTemplate(CloudFormationConnectionBase):
<Description>EC2 KeyPair</Description>
</member>
</Parameters>
+ <CapabilitiesReason>Reason</CapabilitiesReason>
+ <Capabilities>
+ <member>CAPABILITY_IAM</member>
+ </Capabilities>
</ValidateTemplateResult>
<ResponseMetadata>
<RequestId>0be7b6e8-e4a0-11e0-a5bd-9f8d5a7dbc91</RequestId>
@@ -593,6 +610,11 @@ class TestCloudFormationValidateTemplate(CloudFormationConnectionBase):
self.assertEqual(param2.no_echo, True)
self.assertEqual(param2.parameter_key, 'KeyName')
+ self.assertEqual(template.capabilities_reason, 'Reason')
+
+ self.assertEqual(len(template.capabilities), 1)
+ self.assertEqual(template.capabilities[0].value, 'CAPABILITY_IAM')
+
self.assert_request_parameters({
'Action': 'ValidateTemplate',
'TemplateBody': SAMPLE_TEMPLATE,
@@ -616,5 +638,81 @@ class TestCloudFormationCancelUpdateStack(CloudFormationConnectionBase):
})
+class TestCloudFormationEstimateTemplateCost(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ {
+ "EstimateTemplateCostResponse": {
+ "EstimateTemplateCostResult": {
+ "Url": "http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6"
+ }
+ }
+ }
+ """
+
+ def test_estimate_template_cost(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.estimate_template_cost(
+ template_body='{}')
+ self.assertEqual(api_response,
+ 'http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6')
+ self.assert_request_parameters({
+ 'Action': 'EstimateTemplateCost',
+ 'ContentType': 'JSON',
+ 'TemplateBody': '{}',
+ 'Version': '2010-05-15',
+ })
+
+
+class TestCloudFormationGetStackPolicy(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ {
+ "GetStackPolicyResponse": {
+ "GetStackPolicyResult": {
+ "StackPolicyBody": "{...}"
+ }
+ }
+ }
+ """
+
+ def test_get_stack_policy(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.get_stack_policy('stack-id')
+ self.assertEqual(api_response, '{...}')
+ self.assert_request_parameters({
+ 'Action': 'GetStackPolicy',
+ 'ContentType': 'JSON',
+ 'StackName': 'stack-id',
+ 'Version': '2010-05-15',
+ })
+
+
+class TestCloudFormationSetStackPolicy(CloudFormationConnectionBase):
+ def default_body(self):
+ return """
+ {
+ "SetStackPolicyResponse": {
+ "SetStackPolicyResult": {
+ "Some": "content"
+ }
+ }
+ }
+ """
+
+ def test_set_stack_policy(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.set_stack_policy('stack-id',
+ stack_policy_body='{}')
+ self.assertEqual(api_response['Some'], 'content')
+ self.assert_request_parameters({
+ 'Action': 'SetStackPolicy',
+ 'ContentType': 'JSON',
+ 'StackName': 'stack-id',
+ 'StackPolicyBody': '{}',
+ 'Version': '2010-05-15',
+ })
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/cloudformation/test_stack.py b/tests/unit/cloudformation/test_stack.py
index c3bc9438..415d5092 100755..100644
--- a/tests/unit/cloudformation/test_stack.py
+++ b/tests/unit/cloudformation/test_stack.py
@@ -201,7 +201,7 @@ class TestStackParse(unittest.TestCase):
datetime.datetime(2011, 3, 10, 16, 20, 51, 575757)
)
- def test_list_stacks_time_with_millis(self):
+ def test_list_stacks_time_with_millis_again(self):
rs = boto.resultset.ResultSet([
('member', boto.cloudformation.stack.StackResourceSummary)
])
diff --git a/tests/unit/cloudfront/test_connection.py b/tests/unit/cloudfront/test_connection.py
new file mode 100644
index 00000000..1c75e444
--- /dev/null
+++ b/tests/unit/cloudfront/test_connection.py
@@ -0,0 +1,204 @@
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+from boto.cloudfront import CloudFrontConnection
+from boto.cloudfront.distribution import Distribution, DistributionConfig, DistributionSummary
+from boto.cloudfront.origin import CustomOrigin
+
+
+class TestCloudFrontConnection(AWSMockServiceTestCase):
+ connection_class = CloudFrontConnection
+
+ def setUp(self):
+ super(TestCloudFrontConnection, self).setUp()
+
+ def test_get_all_distributions(self):
+ body = """
+ <DistributionList xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
+ <Marker></Marker>
+ <MaxItems>100</MaxItems>
+ <IsTruncated>false</IsTruncated>
+ <DistributionSummary>
+ <Id>EEEEEEEEEEEEE</Id>
+ <Status>InProgress</Status>
+ <LastModifiedTime>2014-02-03T11:03:41.087Z</LastModifiedTime>
+ <DomainName>abcdef12345678.cloudfront.net</DomainName>
+ <CustomOrigin>
+ <DNSName>example.com</DNSName>
+ <HTTPPort>80</HTTPPort>
+ <HTTPSPort>443</HTTPSPort>
+ <OriginProtocolPolicy>http-only</OriginProtocolPolicy>
+ </CustomOrigin>
+ <CNAME>static.example.com</CNAME>
+ <Enabled>true</Enabled>
+ </DistributionSummary>
+ </DistributionList>
+ """
+ self.set_http_response(status_code=200, body=body)
+ response = self.service_connection.get_all_distributions()
+
+ self.assertTrue(isinstance(response, list))
+ self.assertEqual(len(response), 1)
+ self.assertTrue(isinstance(response[0], DistributionSummary))
+ self.assertEqual(response[0].id, "EEEEEEEEEEEEE")
+ self.assertEqual(response[0].domain_name, "abcdef12345678.cloudfront.net")
+ self.assertEqual(response[0].status, "InProgress")
+ self.assertEqual(response[0].cnames, ["static.example.com"])
+ self.assertEqual(response[0].enabled, True)
+ self.assertTrue(isinstance(response[0].origin, CustomOrigin))
+ self.assertEqual(response[0].origin.dns_name, "example.com")
+ self.assertEqual(response[0].origin.http_port, 80)
+ self.assertEqual(response[0].origin.https_port, 443)
+ self.assertEqual(response[0].origin.origin_protocol_policy, 'http-only')
+
+ def test_get_distribution_config(self):
+ body = """
+ <DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
+ <CustomOrigin>
+ <DNSName>example.com</DNSName>
+ <HTTPPort>80</HTTPPort>
+ <HTTPSPort>443</HTTPSPort>
+ <OriginProtocolPolicy>http-only</OriginProtocolPolicy>
+ </CustomOrigin>
+ <CallerReference>1234567890123</CallerReference>
+ <CNAME>static.example.com</CNAME>
+ <Enabled>true</Enabled>
+ </DistributionConfig>
+ """
+
+ self.set_http_response(status_code=200, body=body, header={"Etag": "AABBCC"})
+ response = self.service_connection.get_distribution_config('EEEEEEEEEEEEE')
+
+ self.assertTrue(isinstance(response, DistributionConfig))
+ self.assertTrue(isinstance(response.origin, CustomOrigin))
+ self.assertEqual(response.origin.dns_name, "example.com")
+ self.assertEqual(response.origin.http_port, 80)
+ self.assertEqual(response.origin.https_port, 443)
+ self.assertEqual(response.origin.origin_protocol_policy, "http-only")
+ self.assertEqual(response.cnames, ["static.example.com"])
+ self.assertTrue(response.enabled)
+ self.assertEqual(response.etag, "AABBCC")
+
+ def test_set_distribution_config(self):
+ get_body = """
+ <DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
+ <CustomOrigin>
+ <DNSName>example.com</DNSName>
+ <HTTPPort>80</HTTPPort>
+ <HTTPSPort>443</HTTPSPort>
+ <OriginProtocolPolicy>http-only</OriginProtocolPolicy>
+ </CustomOrigin>
+ <CallerReference>1234567890123</CallerReference>
+ <CNAME>static.example.com</CNAME>
+ <Enabled>true</Enabled>
+ </DistributionConfig>
+ """
+
+ put_body = """
+ <Distribution xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
+ <Id>EEEEEE</Id>
+ <Status>InProgress</Status>
+ <LastModifiedTime>2014-02-04T10:47:53.493Z</LastModifiedTime>
+ <InProgressInvalidationBatches>0</InProgressInvalidationBatches>
+ <DomainName>d2000000000000.cloudfront.net</DomainName>
+ <DistributionConfig>
+ <CustomOrigin>
+ <DNSName>example.com</DNSName>
+ <HTTPPort>80</HTTPPort>
+ <HTTPSPort>443</HTTPSPort>
+ <OriginProtocolPolicy>match-viewer</OriginProtocolPolicy>
+ </CustomOrigin>
+ <CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
+ <Comment>this is a comment</Comment>
+ <Enabled>false</Enabled>
+ </DistributionConfig>
+ </Distribution>
+ """
+
+ self.set_http_response(status_code=200, body=get_body, header={"Etag": "AA"})
+ conf = self.service_connection.get_distribution_config('EEEEEEE')
+
+ self.set_http_response(status_code=200, body=put_body, header={"Etag": "AABBCCD"})
+ conf.comment = 'this is a comment'
+ response = self.service_connection.set_distribution_config('EEEEEEE', conf.etag, conf)
+
+ self.assertEqual(response, "AABBCCD")
+
+ def test_get_distribution_info(self):
+ body = """
+ <Distribution xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
+ <Id>EEEEEEEEEEEEE</Id>
+ <Status>InProgress</Status>
+ <LastModifiedTime>2014-02-03T11:03:41.087Z</LastModifiedTime>
+ <InProgressInvalidationBatches>0</InProgressInvalidationBatches>
+ <DomainName>abcdef12345678.cloudfront.net</DomainName>
+ <DistributionConfig>
+ <CustomOrigin>
+ <DNSName>example.com</DNSName>
+ <HTTPPort>80</HTTPPort>
+ <HTTPSPort>443</HTTPSPort>
+ <OriginProtocolPolicy>http-only</OriginProtocolPolicy>
+ </CustomOrigin>
+ <CallerReference>1111111111111</CallerReference>
+ <CNAME>static.example.com</CNAME>
+ <Enabled>true</Enabled>
+ </DistributionConfig>
+ </Distribution>
+ """
+
+ self.set_http_response(status_code=200, body=body)
+ response = self.service_connection.get_distribution_info('EEEEEEEEEEEEE')
+
+ self.assertTrue(isinstance(response, Distribution))
+ self.assertTrue(isinstance(response.config, DistributionConfig))
+ self.assertTrue(isinstance(response.config.origin, CustomOrigin))
+ self.assertEqual(response.config.origin.dns_name, "example.com")
+ self.assertEqual(response.config.origin.http_port, 80)
+ self.assertEqual(response.config.origin.https_port, 443)
+ self.assertEqual(response.config.origin.origin_protocol_policy, "http-only")
+ self.assertEqual(response.config.cnames, ["static.example.com"])
+ self.assertTrue(response.config.enabled)
+ self.assertEqual(response.id, "EEEEEEEEEEEEE")
+ self.assertEqual(response.status, "InProgress")
+ self.assertEqual(response.domain_name, "abcdef12345678.cloudfront.net")
+ self.assertEqual(response.in_progress_invalidation_batches, 0)
+
+ def test_create_distribution(self):
+ body = """
+ <Distribution xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
+ <Id>EEEEEEEEEEEEEE</Id>
+ <Status>InProgress</Status>
+ <LastModifiedTime>2014-02-04T10:34:07.873Z</LastModifiedTime>
+ <InProgressInvalidationBatches>0</InProgressInvalidationBatches>
+ <DomainName>d2000000000000.cloudfront.net</DomainName>
+ <DistributionConfig>
+ <CustomOrigin>
+ <DNSName>example.com</DNSName>
+ <HTTPPort>80</HTTPPort>
+ <HTTPSPort>443</HTTPSPort>
+ <OriginProtocolPolicy>match-viewer</OriginProtocolPolicy>
+ </CustomOrigin>
+ <CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
+ <Comment>example.com distribution</Comment>
+ <Enabled>false</Enabled>
+ </DistributionConfig>
+ </Distribution>
+ """
+
+ self.set_http_response(status_code=201, body=body)
+ origin = CustomOrigin("example.com", origin_protocol_policy="match_viewer")
+ response = self.service_connection.create_distribution(origin, enabled=False, comment="example.com distribution")
+
+ self.assertTrue(isinstance(response, Distribution))
+ self.assertTrue(isinstance(response.config, DistributionConfig))
+ self.assertTrue(isinstance(response.config.origin, CustomOrigin))
+ self.assertEqual(response.config.origin.dns_name, "example.com")
+ self.assertEqual(response.config.origin.http_port, 80)
+ self.assertEqual(response.config.origin.https_port, 443)
+ self.assertEqual(response.config.origin.origin_protocol_policy, "match-viewer")
+ self.assertEqual(response.config.cnames, [])
+ self.assertTrue(not response.config.enabled)
+ self.assertEqual(response.id, "EEEEEEEEEEEEEE")
+ self.assertEqual(response.status, "InProgress")
+ self.assertEqual(response.domain_name, "d2000000000000.cloudfront.net")
+ self.assertEqual(response.in_progress_invalidation_batches, 0)
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index a77bba6c..52ec53dd 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -17,7 +17,6 @@ from boto.exception import JSONResponseError
FakeDynamoDBConnection = mock.create_autospec(DynamoDBConnection)
-
class SchemaFieldsTestCase(unittest.TestCase):
def test_hash_key(self):
hash_key = HashKey('hello')
@@ -299,6 +298,43 @@ class IndexFieldTestCase(unittest.TestCase):
}
})
+ def test_global_include_index_throughput(self):
+ include_index = GlobalIncludeIndex('IncludeKeys', parts=[
+ HashKey('username'),
+ RangeKey('date_joined')
+ ], includes=[
+ 'gender',
+ 'friend_count'
+ ], throughput={
+ 'read': 10,
+ 'write': 8
+ })
+
+ self.assertEqual(include_index.schema(), {
+ 'IndexName': 'IncludeKeys',
+ 'KeySchema': [
+ {
+ 'AttributeName': 'username',
+ 'KeyType': 'HASH'
+ },
+ {
+ 'AttributeName': 'date_joined',
+ 'KeyType': 'RANGE'
+ }
+ ],
+ 'Projection': {
+ 'ProjectionType': 'INCLUDE',
+ 'NonKeyAttributes': [
+ 'gender',
+ 'friend_count',
+ ]
+ },
+ 'ProvisionedThroughput': {
+ 'ReadCapacityUnits': 10,
+ 'WriteCapacityUnits': 8
+ }
+ })
+
class ItemTestCase(unittest.TestCase):
def setUp(self):
@@ -1624,7 +1660,6 @@ class TableTestCase(unittest.TestCase):
username= 'johndoe',
date_joined= 1366056668)
-
def test_put_item(self):
with mock.patch.object(
self.users.connection,
@@ -2426,7 +2461,6 @@ class TableTestCase(unittest.TestCase):
self.assertEqual(mock_scan_2.call_count, 1)
-
def test_scan_with_specific_attributes(self):
items_1 = {
'results': [
@@ -2459,7 +2493,6 @@ class TableTestCase(unittest.TestCase):
self.assertEqual(mock_query.call_count, 1)
-
def test_count(self):
expected = {
"Table": {
diff --git a/tests/unit/emr/test_connection.py b/tests/unit/emr/test_connection.py
index 9c46c7dd..227f5d68 100644
--- a/tests/unit/emr/test_connection.py
+++ b/tests/unit/emr/test_connection.py
@@ -28,7 +28,12 @@ from time import time
from tests.unit import AWSMockServiceTestCase
from boto.emr.connection import EmrConnection
-from boto.emr.emrobject import BootstrapAction, BootstrapActionList, ClusterStatus, ClusterSummaryList, ClusterSummary, ClusterTimeline, InstanceInfo, InstanceList, InstanceGroupInfo, InstanceGroup, InstanceGroupList, JobFlow, JobFlowStepList, Step, StepSummaryList
+from boto.emr.emrobject import BootstrapAction, BootstrapActionList, \
+ ClusterStatus, ClusterSummaryList, \
+ ClusterSummary, ClusterTimeline, InstanceInfo, \
+ InstanceList, InstanceGroupInfo, \
+ InstanceGroup, InstanceGroupList, JobFlow, \
+ JobFlowStepList, Step, StepSummaryList, Cluster
# These tests are just checking the basic structure of
# the Elastic MapReduce code, by picking a few calls
diff --git a/tests/unit/mws/test_connection.py b/tests/unit/mws/test_connection.py
index c23f4c26..c23f4c26 100755..100644
--- a/tests/unit/mws/test_connection.py
+++ b/tests/unit/mws/test_connection.py
diff --git a/tests/unit/mws/test_response.py b/tests/unit/mws/test_response.py
index 130bfe0d..9172aa7a 100755..100644
--- a/tests/unit/mws/test_response.py
+++ b/tests/unit/mws/test_response.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+import unittest
from boto.mws.connection import MWSConnection
from boto.mws.response import (ResponseFactory, ResponseElement, Element,
MemberList, ElementList, SimpleList)
@@ -29,7 +30,7 @@ class TestMWSResponse(AWSMockServiceTestCase):
<Bif>Bam</Bif>
</Item>
</Test9Result></Test9Response>"""
- obj = self.issue_test('Test9', Test9Result, text)
+ obj = self.check_issue('Test9', Test9Result, text)
Item = obj._result.Item
useful = lambda x: not x[0].startswith('_')
nest = dict(filter(useful, Item.Nest.__dict__.items()))
@@ -59,7 +60,7 @@ class TestMWSResponse(AWSMockServiceTestCase):
<member><Foo>6</Foo><Foo>7</Foo></member>
</Extra>
</Test8Result></Test8Response>"""
- obj = self.issue_test('Test8', Test8Result, text)
+ obj = self.check_issue('Test8', Test8Result, text)
self.assertSequenceEqual(
map(int, obj._result.Item),
range(4),
@@ -116,7 +117,7 @@ class TestMWSResponse(AWSMockServiceTestCase):
</member>
</Item>
</Test7Result></Test7Response>"""
- obj = self.issue_test('Test7', Test7Result, text)
+ obj = self.check_issue('Test7', Test7Result, text)
item = obj._result.Item
self.assertEqual(len(item), 3)
nests = [z.Nest for z in filter(lambda x: x.Nest, item)]
@@ -151,7 +152,7 @@ class TestMWSResponse(AWSMockServiceTestCase):
<member><Value>Six</Value></member>
</Item>
</Test6Result></Test6Response>"""
- obj = self.issue_test('Test6', Test6Result, text)
+ obj = self.check_issue('Test6', Test6Result, text)
self.assertSequenceEqual(
[e.Value for e in obj._result.Item],
['One', 'Two', 'Six'],
@@ -167,7 +168,7 @@ class TestMWSResponse(AWSMockServiceTestCase):
text = """<Test5Response><Test5Result>
<Item/>
</Test5Result></Test5Response>"""
- obj = self.issue_test('Test5', Test5Result, text)
+ obj = self.check_issue('Test5', Test5Result, text)
self.assertSequenceEqual(obj._result.Item, [])
def test_parsing_missing_member_list(self):
@@ -176,7 +177,7 @@ class TestMWSResponse(AWSMockServiceTestCase):
text = """<Test4Response><Test4Result>
</Test4Result></Test4Response>"""
- obj = self.issue_test('Test4', Test4Result, text)
+ obj = self.check_issue('Test4', Test4Result, text)
self.assertSequenceEqual(obj._result.Item, [])
def test_parsing_element_lists(self):
@@ -189,7 +190,7 @@ class TestMWSResponse(AWSMockServiceTestCase):
<Item><Foo>Baz</Foo>
<Zam>Zoo</Zam></Item>
</Test1Result></Test1Response>"""
- obj = self.issue_test('Test1', Test1Result, text)
+ obj = self.check_issue('Test1', Test1Result, text)
self.assertTrue(len(obj._result.Item) == 3)
elements = lambda x: getattr(x, 'Foo', getattr(x, 'Zip', '?'))
elements = map(elements, obj._result.Item)
@@ -201,7 +202,7 @@ class TestMWSResponse(AWSMockServiceTestCase):
text = """<Test2Response><Test2Result>
</Test2Result></Test2Response>"""
- obj = self.issue_test('Test2', Test2Result, text)
+ obj = self.check_issue('Test2', Test2Result, text)
self.assertEqual(obj._result.Item, [])
def test_parsing_simple_lists(self):
@@ -213,10 +214,10 @@ class TestMWSResponse(AWSMockServiceTestCase):
<Item>Bif</Item>
<Item>Baz</Item>
</Test3Result></Test3Response>"""
- obj = self.issue_test('Test3', Test3Result, text)
+ obj = self.check_issue('Test3', Test3Result, text)
self.assertSequenceEqual(obj._result.Item, ['Bar', 'Bif', 'Baz'])
- def issue_test(self, action, klass, text):
+ def check_issue(self, action, klass, text):
cls = ResponseFactory(action, force=klass)
return self.service_connection._parse_response(cls, text)
diff --git a/tests/unit/provider/test_provider.py b/tests/unit/provider/test_provider.py
index 0162b8ab..ece21215 100644
--- a/tests/unit/provider/test_provider.py
+++ b/tests/unit/provider/test_provider.py
@@ -155,6 +155,15 @@ class TestProvider(unittest.TestCase):
if not imported:
del sys.modules['keyring']
+ def test_passed_in_values_beat_env_vars(self):
+ self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
+ self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
+ self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token'
+ p = provider.Provider('aws', 'access_key', 'secret_key')
+ self.assertEqual(p.access_key, 'access_key')
+ self.assertEqual(p.secret_key, 'secret_key')
+ self.assertEqual(p.security_token, None)
+
def test_env_vars_beat_config_values(self):
self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
diff --git a/tests/unit/rds/test_connection.py b/tests/unit/rds/test_connection.py
index 48afc9e0..fbc65b09 100644
--- a/tests/unit/rds/test_connection.py
+++ b/tests/unit/rds/test_connection.py
@@ -28,7 +28,9 @@ from boto.ec2.securitygroup import SecurityGroup
from boto.rds import RDSConnection
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.rds.parametergroup import ParameterGroup
-from boto.rds.logfile import LogFile
+from boto.rds.logfile import LogFile, LogFileObject
+
+import xml.sax.saxutils as saxutils
class TestRDSConnection(AWSMockServiceTestCase):
connection_class = RDSConnection
@@ -599,13 +601,13 @@ class TestRDSLogFile(AWSMockServiceTestCase):
</DescribeDBLogFilesResponse>
"""
- def test_get_all_logs(self):
+ def test_get_all_logs_simple(self):
self.set_http_response(status_code=200)
- response = self.service_connection.get_all_logs()
+ response = self.service_connection.get_all_logs('db1')
self.assert_request_parameters({
'Action': 'DescribeDBLogFiles',
- 'MaxRecords': 26,
+ 'DBInstanceIdentifier': 'db1',
}, ignore_params_values=['Version'])
self.assertEqual(len(response), 6)
@@ -614,14 +616,18 @@ class TestRDSLogFile(AWSMockServiceTestCase):
self.assertEqual(response[0].last_written, '1364403600000')
self.assertEqual(response[0].size, '0')
- def test_get_all_logs_single(self):
+ def test_get_all_logs_filtered(self):
self.set_http_response(status_code=200)
- response = self.service_connection.get_all_logs('db_instance_1')
+ response = self.service_connection.get_all_logs('db_instance_1', max_records=100, marker='error/mysql-error.log', file_size=2000000, filename_contains='error', file_last_written=12345678)
self.assert_request_parameters({
'Action': 'DescribeDBLogFiles',
'DBInstanceIdentifier': 'db_instance_1',
- 'MaxRecords': 26,
+ 'MaxRecords': 100,
+ 'Marker': 'error/mysql-error.log',
+ 'FileSize': 2000000,
+ 'FilenameContains': 'error',
+ 'FileLastWritten': 12345678,
}, ignore_params_values=['Version'])
self.assertEqual(len(response), 6)
@@ -630,6 +636,104 @@ class TestRDSLogFile(AWSMockServiceTestCase):
self.assertEqual(response[0].last_written, '1364403600000')
self.assertEqual(response[0].size, '0')
+
+class TestRDSLogFileDownload(AWSMockServiceTestCase):
+ connection_class = RDSConnection
+ logfile_sample = """
+??2014-01-26 23:59:00.01 spid54 Microsoft SQL Server 2012 - 11.0.2100.60 (X64)
+
+ Feb 10 2012 19:39:15
+
+ Copyright (c) Microsoft Corporation
+
+ Web Edition (64-bit) on Windows NT 6.1 &lt;X64&gt; (Build 7601: Service Pack 1) (Hypervisor)
+
+
+
+2014-01-26 23:59:00.01 spid54 (c) Microsoft Corporation.
+
+2014-01-26 23:59:00.01 spid54 All rights reserved.
+
+2014-01-26 23:59:00.01 spid54 Server process ID is 2976.
+
+2014-01-26 23:59:00.01 spid54 System Manufacturer: 'Xen', System Model: 'HVM domU'.
+
+2014-01-26 23:59:00.01 spid54 Authentication mode is MIXED.
+
+2014-01-26 23:59:00.01 spid54 Logging SQL Server messages in file 'D:\RDSDBDATA\Log\ERROR'.
+
+2014-01-26 23:59:00.01 spid54 The service account is 'WORKGROUP\AMAZONA-NUQUUMV$'. This is an informational message; no user action is required.
+
+2014-01-26 23:59:00.01 spid54 The error log has been reinitialized. See the previous log for older entries.
+
+2014-01-27 00:00:56.42 spid25s This instance of SQL Server has been using a process ID of 2976 since 10/21/2013 2:16:50 AM (local) 10/21/2013 2:16:50 AM (UTC). This is an informational message only; no user action is required.
+
+2014-01-27 09:35:15.43 spid71 I/O is frozen on database model. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup.
+
+2014-01-27 09:35:15.44 spid72 I/O is frozen on database msdb. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup.
+
+2014-01-27 09:35:15.44 spid74 I/O is frozen on database rdsadmin. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup.
+
+2014-01-27 09:35:15.44 spid73 I/O is frozen on database master. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup.
+
+2014-01-27 09:35:25.57 spid73 I/O was resumed on database master. No user action is required.
+
+2014-01-27 09:35:25.57 spid74 I/O was resumed on database rdsadmin. No user action is required.
+
+2014-01-27 09:35:25.57 spid71 I/O was resumed on database model. No user action is required.
+
+2014-01-27 09:35:25.57 spid72 I/O was resumed on database msdb. No user action is required.
+ """
+
+ def setUp(self):
+ super(TestRDSLogFileDownload, self).setUp()
+
+ def default_body(self):
+ return """
+<DownloadDBLogFilePortionResponse xmlns="http://rds.amazonaws.com/doc/2013-09-09/">
+ <DownloadDBLogFilePortionResult>
+ <Marker>0:4485</Marker>
+ <LogFileData>%s</LogFileData>
+ <AdditionalDataPending>false</AdditionalDataPending>
+ </DownloadDBLogFilePortionResult>
+ <ResponseMetadata>
+ <RequestId>27143615-87ae-11e3-acc9-fb64b157268e</RequestId>
+ </ResponseMetadata>
+</DownloadDBLogFilePortionResponse>
+ """ % self.logfile_sample
+
+ def test_single_download(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.get_log_file('db1', 'foo.log')
+
+ self.assertTrue(isinstance(response, LogFileObject))
+ self.assertEqual(response.marker, '0:4485')
+ self.assertEqual(response.dbinstance_id, 'db1')
+ self.assertEqual(response.log_filename, 'foo.log')
+
+ self.assertEqual(response.data, saxutils.unescape(self.logfile_sample))
+
+ self.assert_request_parameters({
+ 'Action': 'DownloadDBLogFilePortion',
+ 'DBInstanceIdentifier': 'db1',
+ 'LogFileName': 'foo.log',
+ }, ignore_params_values=['Version'])
+
+ def test_multi_args(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.get_log_file('db1', 'foo.log', marker='0:4485', number_of_lines=10)
+
+ self.assertTrue(isinstance(response, LogFileObject))
+
+ self.assert_request_parameters({
+ 'Action': 'DownloadDBLogFilePortion',
+ 'DBInstanceIdentifier': 'db1',
+ 'Marker': '0:4485',
+ 'NumberOfLines': 10,
+ 'LogFileName': 'foo.log',
+ }, ignore_params_values=['Version'])
+
+
class TestRDSOptionGroupOptions(AWSMockServiceTestCase):
connection_class = RDSConnection
diff --git a/tests/unit/rds2/__init__.py b/tests/unit/rds2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/rds2/__init__.py
diff --git a/tests/unit/rds2/test_connection.py b/tests/unit/rds2/test_connection.py
new file mode 100644
index 00000000..85502945
--- /dev/null
+++ b/tests/unit/rds2/test_connection.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+from boto.ec2.securitygroup import SecurityGroup
+from boto.rds2.layer1 import RDSConnection
+
+
+class TestRDS2Connection(AWSMockServiceTestCase):
+ connection_class = RDSConnection
+
+ def setUp(self):
+ super(TestRDS2Connection, self).setUp()
+
+ def default_body(self):
+ return """{
+ "DescribeDBInstancesResponse": {
+ "DescribeDBInstancesResult": {
+ "DBInstances": [{
+ "DBInstance": {
+ "Iops": 2000,
+ "BackupRetentionPeriod": 1,
+ "MultiAZ": false,
+ "DBInstanceStatus": "backing-up",
+ "DBInstanceIdentifier": "mydbinstance2",
+ "PreferredBackupWindow": "10:30-11:00",
+ "PreferredMaintenanceWindow": "wed:06:30-wed:07:00",
+ "OptionGroupMembership": {
+ "OptionGroupName": "default:mysql-5-5",
+ "Status": "in-sync"
+ },
+ "AvailabilityZone": "us-west-2b",
+ "ReadReplicaDBInstanceIdentifiers": null,
+ "Engine": "mysql",
+ "PendingModifiedValues": null,
+ "LicenseModel": "general-public-license",
+ "DBParameterGroups": [{
+ "DBParameterGroup": {
+ "ParameterApplyStatus": "in-sync",
+ "DBParameterGroupName": "default.mysql5.5"
+ }
+ }],
+ "Endpoint": {
+ "Port": 3306,
+ "Address": "mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com"
+ },
+ "EngineVersion": "5.5.27",
+ "DBSecurityGroups": [{
+ "DBSecurityGroup": {
+ "Status": "active",
+ "DBSecurityGroupName": "default"
+ }
+ }],
+ "VpcSecurityGroups": [{
+ "VpcSecurityGroupMembership": {
+ "VpcSecurityGroupId": "sg-1",
+ "Status": "active"
+ }
+ }],
+ "DBName": "mydb2",
+ "AutoMinorVersionUpgrade": true,
+ "InstanceCreateTime": "2012-10-03T22:01:51.047Z",
+ "AllocatedStorage": 200,
+ "DBInstanceClass": "db.m1.large",
+ "MasterUsername": "awsuser",
+ "StatusInfos": [{
+ "DBInstanceStatusInfo": {
+ "Message": null,
+ "Normal": true,
+ "Status": "replicating",
+ "StatusType": "read replication"
+ }
+ }],
+ "DBSubnetGroup": {
+ "VpcId": "990524496922",
+ "SubnetGroupStatus": "Complete",
+ "DBSubnetGroupDescription": "My modified DBSubnetGroup",
+ "DBSubnetGroupName": "mydbsubnetgroup",
+ "Subnets": [{
+ "Subnet": {
+ "SubnetStatus": "Active",
+ "SubnetIdentifier": "subnet-7c5b4115",
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1c"
+ }
+ },
+ "Subnet": {
+ "SubnetStatus": "Active",
+ "SubnetIdentifier": "subnet-7b5b4112",
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1b"
+ }
+ },
+ "Subnet": {
+ "SubnetStatus": "Active",
+ "SubnetIdentifier": "subnet-3ea6bd57",
+ "SubnetAvailabilityZone": {
+ "Name": "us-east-1d"
+ }
+ }
+ }]
+ }
+ }
+ }]
+ }
+ }
+ }"""
+
+ def test_describe_db_instances(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.describe_db_instances('instance_id')
+ self.assertEqual(len(response), 1)
+ self.assert_request_parameters({
+ 'Action': 'DescribeDBInstances',
+ 'ContentType': 'JSON',
+ 'DBInstanceIdentifier': 'instance_id',
+ }, ignore_params_values=['Version'])
+ db = response['DescribeDBInstancesResponse']\
+ ['DescribeDBInstancesResult']['DBInstances'][0]\
+ ['DBInstance']
+ self.assertEqual(db['DBInstanceIdentifier'], 'mydbinstance2')
+ self.assertEqual(db['InstanceCreateTime'], '2012-10-03T22:01:51.047Z')
+ self.assertEqual(db['Engine'], 'mysql')
+ self.assertEqual(db['DBInstanceStatus'], 'backing-up')
+ self.assertEqual(db['AllocatedStorage'], 200)
+ self.assertEqual(db['Endpoint']['Port'], 3306)
+ self.assertEqual(db['DBInstanceClass'], 'db.m1.large')
+ self.assertEqual(db['MasterUsername'], 'awsuser')
+ self.assertEqual(db['AvailabilityZone'], 'us-west-2b')
+ self.assertEqual(db['BackupRetentionPeriod'], 1)
+ self.assertEqual(db['PreferredBackupWindow'], '10:30-11:00')
+ self.assertEqual(db['PreferredMaintenanceWindow'],
+ 'wed:06:30-wed:07:00')
+ self.assertEqual(db['MultiAZ'], False)
+ self.assertEqual(db['Iops'], 2000)
+ self.assertEqual(db['PendingModifiedValues'], None)
+ self.assertEqual(
+ db['DBParameterGroups'][0]['DBParameterGroup']\
+ ['DBParameterGroupName'],
+ 'default.mysql5.5'
+ )
+ self.assertEqual(
+ db['DBSecurityGroups'][0]['DBSecurityGroup']['DBSecurityGroupName'],
+ 'default'
+ )
+ self.assertEqual(
+ db['DBSecurityGroups'][0]['DBSecurityGroup']['Status'],
+ 'active'
+ )
+ self.assertEqual(len(db['StatusInfos']), 1)
+ self.assertEqual(
+ db['StatusInfos'][0]['DBInstanceStatusInfo']['Message'],
+ None
+ )
+ self.assertEqual(
+ db['StatusInfos'][0]['DBInstanceStatusInfo']['Normal'],
+ True
+ )
+ self.assertEqual(
+ db['StatusInfos'][0]['DBInstanceStatusInfo']['Status'],
+ 'replicating'
+ )
+ self.assertEqual(
+ db['StatusInfos'][0]['DBInstanceStatusInfo']['StatusType'],
+ 'read replication'
+ )
+ self.assertEqual(
+ db['VpcSecurityGroups'][0]['VpcSecurityGroupMembership']['Status'],
+ 'active'
+ )
+ self.assertEqual(
+ db['VpcSecurityGroups'][0]['VpcSecurityGroupMembership']\
+ ['VpcSecurityGroupId'],
+ 'sg-1'
+ )
+ self.assertEqual(db['LicenseModel'], 'general-public-license')
+ self.assertEqual(db['EngineVersion'], '5.5.27')
+ self.assertEqual(db['AutoMinorVersionUpgrade'], True)
+ self.assertEqual(
+ db['DBSubnetGroup']['DBSubnetGroupName'],
+ 'mydbsubnetgroup'
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/tests/unit/route53/test_connection.py b/tests/unit/route53/test_connection.py
index caa5f022..34e803dd 100644
--- a/tests/unit/route53/test_connection.py
+++ b/tests/unit/route53/test_connection.py
@@ -28,10 +28,12 @@ from boto.route53.exception import DNSServerError
from boto.route53.record import ResourceRecordSets, Record
from boto.route53.zone import Zone
+from nose.plugins.attrib import attr
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
+@attr(route53=True)
class TestRoute53Connection(AWSMockServiceTestCase):
connection_class = Route53Connection
@@ -260,6 +262,17 @@ class TestGetAllRRSetsRoute53(AWSMockServiceTestCase):
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
+ <ResourceRecordSet>
+ <Name>us-west-2.example.com.</Name>
+ <Type>A</Type>
+ <SetIdentifier>latency-example-us-west-2</SetIdentifier>
+ <Region>us-west-2</Region>
+ <AliasTarget>
+ <HostedZoneId>ABCDEFG123456</HostedZoneId>
+ <EvaluateTargetHealth>true</EvaluateTargetHealth>
+ <DNSName>example-123456.us-west-2.elb.amazonaws.com.</DNSName>
+ </AliasTarget>
+ </ResourceRecordSet>
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
<MaxItems>100</MaxItems>
@@ -280,3 +293,12 @@ class TestGetAllRRSetsRoute53(AWSMockServiceTestCase):
self.assertTrue(response[0].name, "test.example.com.")
self.assertTrue(response[0].ttl, "60")
self.assertTrue(response[0].type, "A")
+
+ latency_record = response[2]
+ self.assertEqual(latency_record.name, 'us-west-2.example.com.')
+ self.assertEqual(latency_record.type, 'A')
+ self.assertEqual(latency_record.identifier, 'latency-example-us-west-2')
+ self.assertEqual(latency_record.region, 'us-west-2')
+ self.assertEqual(latency_record.alias_hosted_zone_id, 'ABCDEFG123456')
+ self.assertEqual(latency_record.alias_evaluate_target_health, 'true')
+ self.assertEqual(latency_record.alias_dns_name, 'example-123456.us-west-2.elb.amazonaws.com.')
diff --git a/tests/unit/sns/test_connection.py b/tests/unit/sns/test_connection.py
index 3a474c3c..aee22e36 100644
--- a/tests/unit/sns/test_connection.py
+++ b/tests/unit/sns/test_connection.py
@@ -225,6 +225,17 @@ class TestSNSConnection(AWSMockServiceTestCase):
'MessageStructure': 'json',
}, ignore_params_values=['Version', 'ContentType'])
+ def test_publish_with_utf8_message(self):
+ self.set_http_response(status_code=200)
+ subject = message = u'We \u2665 utf-8'.encode('utf-8')
+ self.service_connection.publish('topic', message, subject)
+ self.assert_request_parameters({
+ 'Action': 'Publish',
+ 'TopicArn': 'topic',
+ 'Subject': subject,
+ 'Message': message,
+ }, ignore_params_values=['Version', 'ContentType'])
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/sts/test_connection.py b/tests/unit/sts/test_connection.py
index 2e2fa1d7..e894a531 100644
--- a/tests/unit/sts/test_connection.py
+++ b/tests/unit/sts/test_connection.py
@@ -81,6 +81,29 @@ class TestSTSConnection(AWSMockServiceTestCase):
self.assertEqual(response.user.arn, 'arn:role')
self.assertEqual(response.user.assume_role_id, 'roleid:myrolesession')
+ def test_assume_role_with_mfa(self):
+ self.set_http_response(status_code=200)
+ response = self.service_connection.assume_role(
+ 'arn:role',
+ 'mysession',
+ mfa_serial_number='GAHT12345678',
+ mfa_token='abc123'
+ )
+ self.assert_request_parameters(
+ {'Action': 'AssumeRole',
+ 'RoleArn': 'arn:role',
+ 'RoleSessionName': 'mysession',
+ 'SerialNumber': 'GAHT12345678',
+ 'TokenCode': 'abc123'},
+ ignore_params_values=['Timestamp', 'AWSAccessKeyId',
+ 'SignatureMethod', 'SignatureVersion',
+ 'Version'])
+ self.assertEqual(response.credentials.access_key, 'accesskey')
+ self.assertEqual(response.credentials.secret_key, 'secretkey')
+ self.assertEqual(response.credentials.session_token, 'session_token')
+ self.assertEqual(response.user.arn, 'arn:role')
+ self.assertEqual(response.user.assume_role_id, 'roleid:myrolesession')
+
class TestSTSWebIdentityConnection(AWSMockServiceTestCase):
connection_class = STSConnection