summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel G. Taylor <danielgtaylor@gmail.com>2015-04-02 15:44:05 -0700
committerDaniel G. Taylor <danielgtaylor@gmail.com>2015-04-02 15:44:05 -0700
commitc5aa4f54d6d2aaec3a878a506502892f361a83de (patch)
tree9e1584537b2d611ed305fbe0f76bf22f96a877ee
parentedfa222f35105071e5ea9a2e2e715dc541c11d1c (diff)
parent025aa22fdacc5688de9e5c02284103d802590f2c (diff)
downloadboto-c5aa4f54d6d2aaec3a878a506502892f361a83de.tar.gz
Merge branch 'release-2.37.0'2.37.0
-rw-r--r--README.rst20
-rwxr-xr-xbin/glacier9
-rw-r--r--boto/__init__.py4
-rw-r--r--boto/cloudformation/connection.py18
-rw-r--r--boto/cloudhsm/__init__.py2
-rw-r--r--boto/cloudtrail/exceptions.py28
-rw-r--r--boto/cloudtrail/layer1.py202
-rw-r--r--boto/configservice/__init__.py2
-rw-r--r--boto/connection.py2
-rw-r--r--boto/dynamodb2/table.py214
-rw-r--r--boto/ec2/autoscale/__init__.py21
-rw-r--r--boto/ec2/elb/__init__.py6
-rw-r--r--boto/ec2/elb/loadbalancer.py6
-rw-r--r--boto/endpoints.json13
-rw-r--r--boto/pyami/config.py4
-rw-r--r--boto/swf/layer2.py3
-rw-r--r--docs/source/dynamodb2_tut.rst199
-rw-r--r--docs/source/releasenotes/v2.37.0.rst38
-rw-r--r--docs/source/route53_tut.rst16
-rw-r--r--docs/source/s3_tut.rst4
-rw-r--r--requirements.txt2
-rw-r--r--tests/integration/cloudformation/test_connection.py57
-rw-r--r--tests/integration/configservice/test_configservice.py7
-rw-r--r--tests/integration/dynamodb2/test_highlevel.py105
-rw-r--r--tests/unit/cloudformation/test_connection.py12
-rw-r--r--tests/unit/cloudtrail/test_layer1.py18
-rw-r--r--tests/unit/dynamodb2/test_table.py137
-rw-r--r--tests/unit/ec2/autoscale/test_group.py63
-rw-r--r--tests/unit/swf/test_layer2_domain.py4
-rw-r--r--tests/unit/test_connection.py10
30 files changed, 993 insertions, 233 deletions
diff --git a/README.rst b/README.rst
index 01823a31..80bf33e6 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.36.0
+boto 2.37.0
-Released: 27-Jan-2015
+Released: 2-Apr-2015
.. image:: https://travis-ci.org/boto/boto.svg?branch=develop
:target: https://travis-ci.org/boto/boto
@@ -26,6 +26,22 @@ and the `Porting Guide`_. If you would like, you can open an issue to let
others know about your work in progress. Tests **must** pass on Python
2.6, 2.7, 3.3, and 3.4 for pull requests to be accepted.
+******
+Boto 3
+******
+The next major version of Boto is currently in developer preview and can
+be found in the `Boto 3 <https://github.com/boto/boto3#readme>`__
+repository and installed via ``pip``. It supports the latest service APIs
+and provides a high-level object-oriented interface to many services.
+
+Please try Boto 3 and
+`leave feedback <https://github.com/boto/boto3/issues>`__ with any issues,
+suggestions, and feature requests you might have.
+
+********
+Services
+********
+
At the moment, boto supports:
* Compute
diff --git a/bin/glacier b/bin/glacier
index ed0769a9..ae3b0c57 100755
--- a/bin/glacier
+++ b/bin/glacier
@@ -111,9 +111,12 @@ def upload_files(vault_name, filenames, region, access_key=None, secret_key=None
glacier_vault = layer2.get_vault(vault_name)
for filename in filenames:
if isfile(filename):
- print('Uploading %s to %s' % (filename, vault_name))
- glacier_vault.upload_archive(filename, description = basename(filename))
-
+ sys.stdout.write('Uploading %s to %s...' % (filename, vault_name))
+ sys.stdout.flush()
+ archive_id = glacier_vault.upload_archive(
+ filename,
+ description = basename(filename))
+ print(' done. Vault returned ArchiveID %s' % archive_id)
def main():
if len(sys.argv) < 2:
diff --git a/boto/__init__.py b/boto/__init__.py
index 19beaa11..9b32964f 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -38,7 +38,7 @@ import logging.config
from boto.compat import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.36.0'
+__version__ = '2.37.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
@@ -1018,7 +1018,7 @@ def connect_codedeploy(aws_access_key_id=None,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
- )
+ )
def connect_configservice(aws_access_key_id=None,
diff --git a/boto/cloudformation/connection.py b/boto/cloudformation/connection.py
index 1407c570..ee850b30 100644
--- a/boto/cloudformation/connection.py
+++ b/boto/cloudformation/connection.py
@@ -135,7 +135,8 @@ class CloudFormationConnection(AWSQueryConnection):
:type parameters: list
:param parameters: A list of key/value tuples that specify input
- parameters for the stack.
+ parameters for the stack. A 3-tuple (key, value, bool) may be used to
+ specify the `UsePreviousValue` option.
:type disable_rollback: boolean
:param disable_rollback: Set to `True` to disable rollback of the stack
@@ -235,9 +236,17 @@ class CloudFormationConnection(AWSQueryConnection):
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
if parameters and len(parameters) > 0:
- for i, (key, value) in enumerate(parameters):
+ for i, parameter_tuple in enumerate(parameters):
+ key, value = parameter_tuple[:2]
+ use_previous = (parameter_tuple[2]
+ if len(parameter_tuple) > 2 else False)
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
- params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
+ if use_previous:
+ params['Parameters.member.%d.UsePreviousValue'
+ % (i + 1)] = self.encode_bool(use_previous)
+ else:
+ params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
+
if capabilities:
for i, value in enumerate(capabilities):
params['Capabilities.member.%d' % (i + 1)] = value
@@ -459,7 +468,8 @@ class CloudFormationConnection(AWSQueryConnection):
:type parameters: list
:param parameters: A list of key/value tuples that specify input
- parameters for the stack.
+ parameters for the stack. A 3-tuple (key, value, bool) may be used to
+ specify the `UsePreviousValue` option.
:type notification_arns: list
:param notification_arns: The Simple Notification Service (SNS) topic
diff --git a/boto/cloudhsm/__init__.py b/boto/cloudhsm/__init__.py
index 2d075c48..b13fe3fc 100644
--- a/boto/cloudhsm/__init__.py
+++ b/boto/cloudhsm/__init__.py
@@ -30,7 +30,7 @@ def regions():
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
- from boto.kms.layer1 import CloudHSMConnection
+ from boto.cloudhsm.layer1 import CloudHSMConnection
return get_regions('cloudhsm', connection_cls=CloudHSMConnection)
diff --git a/boto/cloudtrail/exceptions.py b/boto/cloudtrail/exceptions.py
index ac4efbd2..d2c1b735 100644
--- a/boto/cloudtrail/exceptions.py
+++ b/boto/cloudtrail/exceptions.py
@@ -88,3 +88,31 @@ class InsufficientS3BucketPolicyException(BotoServerError):
write files into the prefix.
"""
pass
+
+
+class InvalidMaxResultsException(BotoServerError):
+ pass
+
+
+class InvalidTimeRangeException(BotoServerError):
+ pass
+
+
+class InvalidLookupAttributesException(BotoServerError):
+ pass
+
+
+class InvalidCloudWatchLogsLogGroupArnException(BotoServerError):
+ pass
+
+
+class InvalidCloudWatchLogsRoleArnException(BotoServerError):
+ pass
+
+
+class CloudWatchLogsDeliveryUnavailableException(BotoServerError):
+ pass
+
+
+class InvalidNextTokenException(BotoServerError):
+ pass
diff --git a/boto/cloudtrail/layer1.py b/boto/cloudtrail/layer1.py
index 960b00da..f233f321 100644
--- a/boto/cloudtrail/layer1.py
+++ b/boto/cloudtrail/layer1.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -30,7 +30,7 @@ from boto.compat import json
class CloudTrailConnection(AWSQueryConnection):
"""
- AWS Cloud Trail
+ AWS CloudTrail
This is the CloudTrail API Reference. It provides descriptions of
actions, data types, common parameters, and common errors for
CloudTrail.
@@ -62,26 +62,33 @@ class CloudTrailConnection(AWSQueryConnection):
ResponseError = JSONResponseError
_faults = {
+ "InvalidMaxResultsException": exceptions.InvalidMaxResultsException,
"InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException,
"InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException,
"TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException,
+ "InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
+ "InvalidLookupAttributesException": exceptions.InvalidLookupAttributesException,
"InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException,
+ "InvalidCloudWatchLogsLogGroupArnException": exceptions.InvalidCloudWatchLogsLogGroupArnException,
+ "InvalidCloudWatchLogsRoleArnException": exceptions.InvalidCloudWatchLogsRoleArnException,
"InvalidTrailNameException": exceptions.InvalidTrailNameException,
- "TrailNotProvidedException": exceptions.TrailNotProvidedException,
+ "CloudWatchLogsDeliveryUnavailableException": exceptions.CloudWatchLogsDeliveryUnavailableException,
"TrailNotFoundException": exceptions.TrailNotFoundException,
"S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException,
+ "InvalidNextTokenException": exceptions.InvalidNextTokenException,
"InvalidS3PrefixException": exceptions.InvalidS3PrefixException,
"MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException,
"InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException,
}
+
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
- if 'host' not in kwargs:
+ if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudTrailConnection, self).__init__(**kwargs)
@@ -90,21 +97,16 @@ class CloudTrailConnection(AWSQueryConnection):
def _required_auth_capability(self):
return ['hmac-v4']
- def create_trail(self, name=None, s3_bucket_name=None,
- s3_key_prefix=None, sns_topic_name=None,
- include_global_service_events=None, trail=None):
+ def create_trail(self, name, s3_bucket_name, s3_key_prefix=None,
+ sns_topic_name=None, include_global_service_events=None,
+ cloud_watch_logs_log_group_arn=None,
+ cloud_watch_logs_role_arn=None):
"""
From the command line, use `create-subscription`.
Creates a trail that specifies the settings for delivery of
log data to an Amazon S3 bucket.
- Support for passing Trail as a parameter ends as early as
- February 25, 2014. The request and response examples in this
- topic show the use of parameters as well as a Trail object.
- Until Trail is removed, you can use either Trail or the
- parameter list.
-
:type name: string
:param name: Specifies the name of the trail.
@@ -125,26 +127,28 @@ class CloudTrailConnection(AWSQueryConnection):
publishing events from global services such as IAM to the log
files.
- :type trail: dict
- :param trail: Support for passing a Trail object in the CreateTrail or
- UpdateTrail actions will end as early as February 15, 2014. Instead
- of the Trail object and its members, use the parameters listed for
- these actions.
+ :type cloud_watch_logs_log_group_arn: string
+ :param cloud_watch_logs_log_group_arn: Specifies a log group name using
+ an Amazon Resource Name (ARN), a unique identifier that represents
+ the log group to which CloudTrail logs will be delivered. Not
+ required unless you specify CloudWatchLogsRoleArn.
+
+ :type cloud_watch_logs_role_arn: string
+ :param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch
+ Logs endpoint to assume to write to a users log group.
"""
- params = {}
- if name is not None:
- params['Name'] = name
- if s3_bucket_name is not None:
- params['S3BucketName'] = s3_bucket_name
+ params = {'Name': name, 'S3BucketName': s3_bucket_name, }
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
if sns_topic_name is not None:
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
- if trail is not None:
- params['trail'] = trail
+ if cloud_watch_logs_log_group_arn is not None:
+ params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn
+ if cloud_watch_logs_role_arn is not None:
+ params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn
return self.make_request(action='CreateTrail',
body=json.dumps(params))
@@ -162,11 +166,11 @@ class CloudTrailConnection(AWSQueryConnection):
def describe_trails(self, trail_name_list=None):
"""
- Retrieves the settings for some or all trails associated with
- an account.
+ Retrieves settings for the trail associated with the current
+ region for your account.
:type trail_name_list: list
- :param trail_name_list: The list of trails.
+ :param trail_name_list: The trail returned.
"""
params = {}
@@ -182,49 +186,6 @@ class CloudTrailConnection(AWSQueryConnection):
errors, Amazon SNS and Amazon S3 errors, and start and stop
logging times for each trail.
- The CloudTrail API is currently undergoing revision. This
- action currently returns both new fields and fields slated for
- removal from the API. The following lists indicate the plans
- for each field:
-
- **List of Members Planned for Ongoing Support**
-
-
- + IsLogging
- + LatestDeliveryTime
- + LatestNotificationTime
- + StartLoggingTime
- + StopLoggingTime
- + LatestNotificationError
- + LatestDeliveryError
-
-
- **List of Members Scheduled for Removal**
-
-
- + **LatestDeliveryAttemptTime**: Use LatestDeliveryTime
- instead.
- + **LatestNotificationAttemptTime**: Use
- LatestNotificationTime instead.
- + **LatestDeliveryAttemptSucceeded**: No replacement. See the
- note following this list.
- + **LatestNotificationAttemptSucceeded**: No replacement. See
- the note following this list.
- + **TimeLoggingStarted**: Use StartLoggingTime instead.
- + **TimeLoggingStopped**: Use StopLoggingtime instead.
-
-
- No replacements have been created for
- LatestDeliveryAttemptSucceeded and
- LatestNotificationAttemptSucceeded . Use LatestDeliveryError
- and LatestNotificationError to evaluate success or failure of
- log delivery or notification. Empty values returned for these
- fields indicate success. An error in LatestDeliveryError
- generally indicates either a missing bucket or insufficient
- permissions to write to the bucket. Similarly, an error in
- LatestNotificationError indicates either a missing topic or
- insufficient permissions.
-
:type name: string
:param name: The name of the trail for which you are requesting the
current status.
@@ -234,6 +195,68 @@ class CloudTrailConnection(AWSQueryConnection):
return self.make_request(action='GetTrailStatus',
body=json.dumps(params))
+ def lookup_events(self, lookup_attributes=None, start_time=None,
+ end_time=None, max_results=None, next_token=None):
+ """
+ Looks up API activity events captured by CloudTrail that
+ create, update, or delete resources in your account. Events
+ for a region can be looked up for the times in which you had
+ CloudTrail turned on in that region during the last seven
+ days. Lookup supports five different attributes: time range
+ (defined by a start time and end time), user name, event name,
+ resource type, and resource name. All attributes are optional.
+ The maximum number of attributes that can be specified in any
+ one lookup request are time range and one other attribute. The
+ default number of results returned is 10, with a maximum of 50
+ possible. The response includes a token that you can use to
+ get the next page of results.
+ The rate of lookup requests is limited to one per second per
+ account. If this limit is exceeded, a throttling error occurs.
+ Events that occurred during the selected time range will not
+ be available for lookup if CloudTrail logging was not enabled
+ when the events occurred.
+
+ :type lookup_attributes: list
+ :param lookup_attributes: Contains a list of lookup attributes.
+ Currently the list can contain only one item.
+
+ :type start_time: timestamp
+ :param start_time: Specifies that only events that occur after or at
+ the specified time are returned. If the specified start time is
+ after the specified end time, an error is returned.
+
+ :type end_time: timestamp
+ :param end_time: Specifies that only events that occur before or at the
+ specified time are returned. If the specified end time is before
+ the specified start time, an error is returned.
+
+ :type max_results: integer
+ :param max_results: The number of events to return. Possible values are
+ 1 through 50. The default is 10.
+
+ :type next_token: string
+ :param next_token: The token to use to get the next page of results
+ after a previous API call. This token must be passed in with the
+ same parameters that were specified in the the original call. For
+ example, if the original call specified an AttributeKey of
+ 'Username' with a value of 'root', the call with NextToken should
+ include those same parameters.
+
+ """
+ params = {}
+ if lookup_attributes is not None:
+ params['LookupAttributes'] = lookup_attributes
+ if start_time is not None:
+ params['StartTime'] = start_time
+ if end_time is not None:
+ params['EndTime'] = end_time
+ if max_results is not None:
+ params['MaxResults'] = max_results
+ if next_token is not None:
+ params['NextToken'] = next_token
+ return self.make_request(action='LookupEvents',
+ body=json.dumps(params))
+
def start_logging(self, name):
"""
Starts the recording of AWS API calls and log file delivery
@@ -265,9 +288,10 @@ class CloudTrailConnection(AWSQueryConnection):
return self.make_request(action='StopLogging',
body=json.dumps(params))
- def update_trail(self, name=None, s3_bucket_name=None,
- s3_key_prefix=None, sns_topic_name=None,
- include_global_service_events=None, trail=None):
+ def update_trail(self, name, s3_bucket_name=None, s3_key_prefix=None,
+ sns_topic_name=None, include_global_service_events=None,
+ cloud_watch_logs_log_group_arn=None,
+ cloud_watch_logs_role_arn=None):
"""
From the command line, use `update-subscription`.
@@ -278,12 +302,6 @@ class CloudTrailConnection(AWSQueryConnection):
target for CloudTrail log files, an IAM policy exists for the
bucket.
- Support for passing Trail as a parameter ends as early as
- February 25, 2014. The request and response examples in this
- topic show the use of parameters as well as a Trail object.
- Until Trail is removed, you can use either Trail or the
- parameter list.
-
:type name: string
:param name: Specifies the name of the trail.
@@ -304,16 +322,18 @@ class CloudTrailConnection(AWSQueryConnection):
publishing events from global services such as IAM to the log
files.
- :type trail: dict
- :param trail: Support for passing a Trail object in the CreateTrail or
- UpdateTrail actions will end as early as February 15, 2014. Instead
- of the Trail object and its members, use the parameters listed for
- these actions.
+ :type cloud_watch_logs_log_group_arn: string
+ :param cloud_watch_logs_log_group_arn: Specifies a log group name using
+ an Amazon Resource Name (ARN), a unique identifier that represents
+ the log group to which CloudTrail logs will be delivered. Not
+ required unless you specify CloudWatchLogsRoleArn.
+
+ :type cloud_watch_logs_role_arn: string
+ :param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch
+ Logs endpoint to assume to write to a users log group.
"""
- params = {}
- if name is not None:
- params['Name'] = name
+ params = {'Name': name, }
if s3_bucket_name is not None:
params['S3BucketName'] = s3_bucket_name
if s3_key_prefix is not None:
@@ -322,8 +342,10 @@ class CloudTrailConnection(AWSQueryConnection):
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
- if trail is not None:
- params['trail'] = trail
+ if cloud_watch_logs_log_group_arn is not None:
+ params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn
+ if cloud_watch_logs_role_arn is not None:
+ params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn
return self.make_request(action='UpdateTrail',
body=json.dumps(params))
diff --git a/boto/configservice/__init__.py b/boto/configservice/__init__.py
index dc2e26a9..d18f8f8e 100644
--- a/boto/configservice/__init__.py
+++ b/boto/configservice/__init__.py
@@ -30,7 +30,7 @@ def regions():
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
- from boto.kms.layer1 import ConfigServiceConnection
+ from boto.configservice.layer1 import ConfigServiceConnection
return get_regions('configservice', connection_cls=ConfigServiceConnection)
diff --git a/boto/connection.py b/boto/connection.py
index ae948096..e1a72c4c 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -381,7 +381,7 @@ class HTTPRequest(object):
if 'Content-Length' not in self.headers:
if 'Transfer-Encoding' not in self.headers or \
self.headers['Transfer-Encoding'] != 'chunked':
- self.headers['Content-Length'] = len(self.body)
+ self.headers['Content-Length'] = str(len(self.body))
class HTTPResponse(http_client.HTTPResponse):
diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py
index d72604e3..d02ff5c7 100644
--- a/boto/dynamodb2/table.py
+++ b/boto/dynamodb2/table.py
@@ -137,9 +137,9 @@ class Table(object):
to define the key structure of the table.
**IMPORTANT** - You should consider the usage pattern of your table
- up-front, as the schema & indexes can **NOT** be modified once the
- table is created, requiring the creation of a new table & migrating
- the data should you wish to revise it.
+ up-front, as the schema can **NOT** be modified once the table is
+ created, requiring the creation of a new table & migrating the data
+ should you wish to revise it.
**IMPORTANT** - If the table already exists in DynamoDB, additional
calls to this method will result in an error. If you just need
@@ -371,25 +371,27 @@ class Table(object):
raw_indexes = result['Table'].get('LocalSecondaryIndexes', [])
self.indexes = self._introspect_indexes(raw_indexes)
- if not self.global_indexes:
- # Build the global index information as well.
- raw_global_indexes = result['Table'].get('GlobalSecondaryIndexes', [])
- self.global_indexes = self._introspect_global_indexes(raw_global_indexes)
+ # Build the global index information as well.
+ raw_global_indexes = result['Table'].get('GlobalSecondaryIndexes', [])
+ self.global_indexes = self._introspect_global_indexes(raw_global_indexes)
# This is leaky.
return result
- def update(self, throughput, global_indexes=None):
+ def update(self, throughput=None, global_indexes=None):
"""
- Updates table attributes in DynamoDB.
+ Updates table attributes and global indexes in DynamoDB.
- Currently, the only thing you can modify about a table after it has
- been created is the throughput.
-
- Requires a ``throughput`` parameter, which should be a
+ Optionally accepts a ``throughput`` parameter, which should be a
dictionary. If provided, it should specify a ``read`` & ``write`` key,
both of which should have an integer value associated with them.
+ Optionally accepts a ``global_indexes`` parameter, which should be a
+ dictionary. If provided, it should specify the index name, which is also
+ a dict containing a ``read`` & ``write`` key, both of which
+ should have an integer value associated with them. If you are writing
+ new code, please use ``Table.update_global_secondary_index``.
+
Returns ``True`` on success.
Example::
@@ -413,13 +415,17 @@ class Table(object):
... }
... })
True
-
"""
- self.throughput = throughput
- data = {
- 'ReadCapacityUnits': int(self.throughput['read']),
- 'WriteCapacityUnits': int(self.throughput['write']),
- }
+
+ data = None
+
+ if throughput:
+ self.throughput = throughput
+ data = {
+ 'ReadCapacityUnits': int(self.throughput['read']),
+ 'WriteCapacityUnits': int(self.throughput['write']),
+ }
+
gsi_data = None
if global_indexes:
@@ -436,12 +442,170 @@ class Table(object):
},
})
- self.connection.update_table(
- self.table_name,
- provisioned_throughput=data,
- global_secondary_index_updates=gsi_data
- )
- return True
+ if throughput or global_indexes:
+ self.connection.update_table(
+ self.table_name,
+ provisioned_throughput=data,
+ global_secondary_index_updates=gsi_data,
+ )
+
+ return True
+ else:
+ msg = 'You need to provide either the throughput or the ' \
+ 'global_indexes to update method'
+ boto.log.error(msg)
+
+ return False
+
+ def create_global_secondary_index(self, global_index):
+ """
+ Creates a global index in DynamoDB after the table has been created.
+
+ Requires a ``global_indexes`` parameter, which should be a
+ ``GlobalBaseIndexField`` subclass representing the desired index.
+
+ To update ``global_indexes`` information on the ``Table``, you'll need
+ to call ``Table.describe``.
+
+ Returns ``True`` on success.
+
+ Example::
+
+ # To create a global index
+ >>> users.create_global_secondary_index(
+ ... global_index=GlobalAllIndex(
+ ... 'TheIndexNameHere', parts=[
+ ... HashKey('requiredHashkey', data_type=STRING),
+ ... RangeKey('optionalRangeKey', data_type=STRING)
+ ... ],
+ ... throughput={
+ ... 'read': 2,
+ ... 'write': 1,
+ ... })
+ ... )
+ True
+
+ """
+
+ if global_index:
+ gsi_data = []
+ gsi_data_attr_def = []
+
+ gsi_data.append({
+ "Create": global_index.schema()
+ })
+
+ for attr_def in global_index.parts:
+ gsi_data_attr_def.append(attr_def.definition())
+
+ self.connection.update_table(
+ self.table_name,
+ global_secondary_index_updates=gsi_data,
+ attribute_definitions=gsi_data_attr_def
+ )
+
+ return True
+ else:
+ msg = 'You need to provide the global_index to ' \
+ 'create_global_secondary_index method'
+ boto.log.error(msg)
+
+ return False
+
+ def delete_global_secondary_index(self, global_index_name):
+ """
+ Deletes a global index in DynamoDB after the table has been created.
+
+ Requires a ``global_index_name`` parameter, which should be a simple
+ string of the name of the global secondary index.
+
+ To update ``global_indexes`` information on the ``Table``, you'll need
+ to call ``Table.describe``.
+
+ Returns ``True`` on success.
+
+ Example::
+
+ # To delete a global index
+ >>> users.delete_global_secondary_index('TheIndexNameHere')
+ True
+
+ """
+
+ if global_index_name:
+ gsi_data = [
+ {
+ "Delete": {
+ "IndexName": global_index_name
+ }
+ }
+ ]
+
+ self.connection.update_table(
+ self.table_name,
+ global_secondary_index_updates=gsi_data,
+ )
+
+ return True
+ else:
+ msg = 'You need to provide the global index name to ' \
+ 'delete_global_secondary_index method'
+ boto.log.error(msg)
+
+ return False
+
+ def update_global_secondary_index(self, global_indexes):
+ """
+ Updates a global index(es) in DynamoDB after the table has been created.
+
+ Requires a ``global_indexes`` parameter, which should be a
+ dictionary. If provided, it should specify the index name, which is also
+ a dict containing a ``read`` & ``write`` key, both of which
+ should have an integer value associated with them.
+
+ To update ``global_indexes`` information on the ``Table``, you'll need
+ to call ``Table.describe``.
+
+ Returns ``True`` on success.
+
+ Example::
+
+ # To update a global index
+ >>> users.update_global_secondary_index(global_indexes={
+ ... 'TheIndexNameHere': {
+ ... 'read': 15,
+ ... 'write': 5,
+ ... }
+ ... })
+ True
+
+ """
+
+ if global_indexes:
+ gsi_data = []
+
+ for gsi_name, gsi_throughput in global_indexes.items():
+ gsi_data.append({
+ "Update": {
+ "IndexName": gsi_name,
+ "ProvisionedThroughput": {
+ "ReadCapacityUnits": int(gsi_throughput['read']),
+ "WriteCapacityUnits": int(gsi_throughput['write']),
+ },
+ },
+ })
+
+ self.connection.update_table(
+ self.table_name,
+ global_secondary_index_updates=gsi_data,
+ )
+ return True
+ else:
+ msg = 'You need to provide the global indexes to ' \
+ 'update_global_secondary_index method'
+ boto.log.error(msg)
+
+ return False
def delete(self):
"""
diff --git a/boto/ec2/autoscale/__init__.py b/boto/ec2/autoscale/__init__.py
index ea6c083c..02413d9b 100644
--- a/boto/ec2/autoscale/__init__.py
+++ b/boto/ec2/autoscale/__init__.py
@@ -192,6 +192,27 @@ class AutoScaleConnection(AWSQueryConnection):
self.build_list_params(params, instance_ids, 'InstanceIds')
return self.get_status('AttachInstances', params)
+ def detach_instances(self, name, instance_ids, decrement_capacity=True):
+ """
+ Detach instances from an Auto Scaling group.
+
+ :type name: str
+ :param name: The name of the Auto Scaling group from which to detach instances.
+
+ :type instance_ids: list
+ :param instance_ids: Instance ids to be detached from the Auto Scaling group.
+
+ :type decrement_capacity: bool
+ :param decrement_capacity: Whether to decrement the size of the
+ Auto Scaling group or not.
+ """
+
+ params = {'AutoScalingGroupName': name}
+ params['ShouldDecrementDesiredCapacity'] = 'true' if decrement_capacity else 'false'
+
+ self.build_list_params(params, instance_ids, 'InstanceIds')
+ return self.get_status('DetachInstances', params)
+
def create_auto_scaling_group(self, as_group):
"""
Create auto scaling group.
diff --git a/boto/ec2/elb/__init__.py b/boto/ec2/elb/__init__.py
index a1d8f185..bae45cf4 100644
--- a/boto/ec2/elb/__init__.py
+++ b/boto/ec2/elb/__init__.py
@@ -695,9 +695,9 @@ class ELBConnection(AWSQueryConnection):
def apply_security_groups_to_lb(self, name, security_groups):
"""
- Applies security groups to the load balancer.
- Applying security groups that are already registered with the
- Load Balancer has no effect.
+ Associates one or more security groups with the load balancer.
+ The provided security groups will override any currently applied
+ security groups.
:type name: string
:param name: The name of the Load Balancer
diff --git a/boto/ec2/elb/loadbalancer.py b/boto/ec2/elb/loadbalancer.py
index 3a065cf3..50154494 100644
--- a/boto/ec2/elb/loadbalancer.py
+++ b/boto/ec2/elb/loadbalancer.py
@@ -404,9 +404,9 @@ class LoadBalancer(object):
def apply_security_groups(self, security_groups):
"""
- Applies security groups to the load balancer.
- Applying security groups that are already registered with the
- Load Balancer has no effect.
+ Associates one or more security groups with the load balancer.
+ The provided security groups will override any currently applied
+ security groups.
:type security_groups: string or List of strings
:param security_groups: The name of the security group(s) to add.
diff --git a/boto/endpoints.json b/boto/endpoints.json
index 535d7685..714a6acf 100644
--- a/boto/endpoints.json
+++ b/boto/endpoints.json
@@ -105,15 +105,17 @@
"us-east-1": "cognito-sync.us-east-1.amazonaws.com"
},
"configservice": {
- "us-east-1": "config.us-east-1.amazonaws.com"
+ "us-east-1": "config.us-east-1.amazonaws.com",
+ "us-west-2": "config.us-west-2.amazonaws.com",
+ "eu-west-1": "config.eu-west-1.amazonaws.com",
+ "ap-southeast-2": "config.ap-southeast-2.amazonaws.com"
},
"datapipeline": {
"us-east-1": "datapipeline.us-east-1.amazonaws.com",
"us-west-2": "datapipeline.us-west-2.amazonaws.com",
"eu-west-1": "datapipeline.eu-west-1.amazonaws.com",
"ap-southeast-2": "datapipeline.ap-southeast-2.amazonaws.com",
- "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com",
- "eu-central-1": "datapipeline.eu-central-1.amazonaws.com"
+ "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com"
},
"directconnect": {
"ap-northeast-1": "directconnect.ap-northeast-1.amazonaws.com",
@@ -221,13 +223,15 @@
"us-east-1": "glacier.us-east-1.amazonaws.com",
"us-west-1": "glacier.us-west-1.amazonaws.com",
"us-west-2": "glacier.us-west-2.amazonaws.com",
- "eu-central-1": "glacier.eu-central-1.amazonaws.com"
+ "eu-central-1": "glacier.eu-central-1.amazonaws.com",
+ "us-gov-west-1": "glacier.us-gov-west-1.amazonaws.com"
},
"iam": {
"ap-northeast-1": "iam.amazonaws.com",
"ap-southeast-1": "iam.amazonaws.com",
"ap-southeast-2": "iam.amazonaws.com",
"cn-north-1": "iam.cn-north-1.amazonaws.com.cn",
+ "eu-central-1": "iam.amazonaws.com",
"eu-west-1": "iam.amazonaws.com",
"sa-east-1": "iam.amazonaws.com",
"us-east-1": "iam.amazonaws.com",
@@ -301,6 +305,7 @@
"ap-northeast-1": "route53.amazonaws.com",
"ap-southeast-1": "route53.amazonaws.com",
"ap-southeast-2": "route53.amazonaws.com",
+ "eu-central-1": "route53.amazonaws.com",
"eu-west-1": "route53.amazonaws.com",
"sa-east-1": "route53.amazonaws.com",
"us-east-1": "route53.amazonaws.com",
diff --git a/boto/pyami/config.py b/boto/pyami/config.py
index 37445f85..a2194898 100644
--- a/boto/pyami/config.py
+++ b/boto/pyami/config.py
@@ -42,10 +42,10 @@ if 'BOTO_CONFIG' in os.environ:
BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])]
# If there's a BOTO_PATH variable set, we use anything there
-# as the current configuration locations, split with colons
+# as the current configuration locations, split with os.pathsep.
elif 'BOTO_PATH' in os.environ:
BotoConfigLocations = []
- for path in os.environ['BOTO_PATH'].split(":"):
+ for path in os.environ['BOTO_PATH'].split(os.pathsep):
BotoConfigLocations.append(expanduser(path))
diff --git a/boto/swf/layer2.py b/boto/swf/layer2.py
index 5ad1c8d3..b829810b 100644
--- a/boto/swf/layer2.py
+++ b/boto/swf/layer2.py
@@ -79,6 +79,7 @@ class Domain(SWFBase):
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
+ 'region': self.region,
})
act_objects.append(ActivityType(**act_args))
return act_objects
@@ -96,6 +97,7 @@ class Domain(SWFBase):
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
+ 'region': self.region,
})
wf_objects.append(WorkflowType(**wf_args))
@@ -128,6 +130,7 @@ class Domain(SWFBase):
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
+ 'region': self.region,
})
exe_objects.append(WorkflowExecution(**exe_args))
diff --git a/docs/source/dynamodb2_tut.rst b/docs/source/dynamodb2_tut.rst
index 449890c5..0e4b5b87 100644
--- a/docs/source/dynamodb2_tut.rst
+++ b/docs/source/dynamodb2_tut.rst
@@ -37,7 +37,11 @@ Creating a New Table
--------------------
To create a new table, you need to call ``Table.create`` & specify (at a
-minimum) both the table's name as well as the key schema for the table.
+minimum) both the table's name as well as the key schema for the table::
+
+ >>> from boto.dynamodb2.fields import HashKey
+ >>> from boto.dynamodb2.table import Table
+ >>> users = Table.create('users', schema=[HashKey('username')]);
Since both the key schema and local secondary indexes can not be
modified after the table is created, you'll need to plan ahead of time how you
@@ -60,37 +64,34 @@ that can be queried on. The ``AllIndex`` duplicates all values onto the index
duplicates only the keys from the schema onto the index. The ``IncludeIndex``
lets you specify a list of fieldnames to duplicate over.
-Simple example::
-
- >>> from boto.dynamodb2.fields import HashKey
- >>> from boto.dynamodb2.table import Table
-
- # Uses your ``aws_access_key_id`` & ``aws_secret_access_key`` from either a
- # config file or environment variable & the default region.
- >>> users = Table.create('users', schema=[
- ... HashKey('username'),
- ... ])
-
A full example::
>>> import boto.dynamodb2
- >>> from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex, AllIndex
+ >>> from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex, GlobalAllIndex
>>> from boto.dynamodb2.table import Table
>>> from boto.dynamodb2.types import NUMBER
+ # Uses your ``aws_access_key_id`` & ``aws_secret_access_key`` from either a
+ # config file or environment variable & the default region.
>>> users = Table.create('users', schema=[
- ... HashKey('account_type', data_type=NUMBER),
+ ... HashKey('username'), # defaults to STRING data_type
... RangeKey('last_name'),
... ], throughput={
... 'read': 5,
... 'write': 15,
- ... }, indexes=[
- ... AllIndex('EverythingIndex', parts=[
- ... HashKey('account_type', data_type=NUMBER),
- ... ])
+ ... }, global_indexes=[
+ ... GlobalAllIndex('EverythingIndex', parts=[
+ ... HashKey('account_type'),
+ ... ],
+ ... throughput={
+ ... 'read': 1,
+ ... 'write': 1,
+ ... })
... ],
- ... # If you need to specify custom parameters, such as credentials or region, use the following:
- ... Table.create('users', connection=boto.dynamodb2.connect_to_region('us-east-1'))
+ ... # If you need to specify custom parameters, such as credentials or region,
+ ... # use the following:
+ ... # connection=boto.dynamodb2.connect_to_region('us-east-1')
+ ... )
Using an Existing Table
@@ -108,15 +109,15 @@ Lazy example::
Efficient example::
- >>> from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex
+ >>> from boto.dynamodb2.fields import HashKey, RangeKey, GlobalAllIndex
>>> from boto.dynamodb2.table import Table
>>> from boto.dynamodb2.types import NUMBER
>>> users = Table('users', schema=[
- ... HashKey('account_type', data_type=NUMBER),
+ ... HashKey('username'),
... RangeKey('last_name'),
- ... ], indexes=[
- ... AllIndex('EverythingIndex', parts=[
- ... HashKey('account_type', data_type=NUMBER),
+ ... ], global_indexes=[
+ ... GlobalAllIndex('EverythingIndex', parts=[
+ ... HashKey('account_type'),
... ])
... ])
@@ -142,6 +143,7 @@ Example::
... 'username': 'johndoe',
... 'first_name': 'John',
... 'last_name': 'Doe',
+ ... 'account_type': 'standard_user',
... })
True
@@ -156,13 +158,15 @@ Example::
>>> users = Table('users')
# WARNING - This doens't save it yet!
- >>> johndoe = Item(users, data={
- ... 'username': 'johndoe',
- ... 'first_name': 'John',
+ >>> janedoe = Item(users, data={
+ ... 'username': 'janedoe',
+ ... 'first_name': 'Jane',
... 'last_name': 'Doe',
+ ... 'account_type': 'standard_user',
... })
+
# The data now gets persisted to the server.
- >>> johndoe.save()
+ >>> janedoe.save()
True
@@ -177,13 +181,11 @@ Example::
>>> from boto.dynamodb2.table import Table
>>> users = Table('users')
- >>> johndoe = users.get_item(username='johndoe')
+ >>> johndoe = users.get_item(username='johndoe', last_name='Doe')
Once you have an ``Item`` instance, it presents a dictionary-like interface to
the data.::
- >>> johndoe = users.get_item(username='johndoe')
-
# Read a field out.
>>> johndoe['first_name']
'John'
@@ -192,7 +194,7 @@ the data.::
>>> johndoe['first_name'] = 'Johann'
# Delete data from it (DOESN'T SAVE YET!).
- >>> del johndoe['last_name']
+ >>> del johndoe['account_type']
Updating an Item
@@ -207,10 +209,13 @@ since you read the data. DynamoDB will verify the data is in the original state
and, if so, will send all of the item's data. If that expectation fails, the
call will fail::
- >>> johndoe = users.get_item(username='johndoe')
+ >>> from boto.dynamodb2.table import Table
+ >>> users = Table('users')
+
+ >>> johndoe = users.get_item(username='johndoe', last_name='Doe')
>>> johndoe['first_name'] = 'Johann'
>>> johndoe['whatever'] = "man, that's just like your opinion"
- >>> del johndoe['last_name']
+ >>> del johndoe['account_type']
# Affects all fields, even the ones not changed locally.
>>> johndoe.save()
@@ -219,10 +224,9 @@ call will fail::
The second is a full overwrite. If you can be confident your version of the
data is the most correct, you can force an overwrite of the data.::
- >>> johndoe = users.get_item(username='johndoe')
+ >>> johndoe = users.get_item(username='johndoe', last_name='Doe')
>>> johndoe['first_name'] = 'Johann'
- >>> johndoe['whatever'] = "man, that's just like your opinion"
- >>> del johndoe['last_name']
+ >>> johndoe['whatever'] = "Man, that's just like your opinion"
# Specify ``overwrite=True`` to fully replace the data.
>>> johndoe.save(overwrite=True)
@@ -232,13 +236,13 @@ The last is a partial update. If you've only modified certain fields, you
can send a partial update that only writes those fields, allowing other
(potentially changed) fields to go untouched.::
- >>> johndoe = users.get_item(username='johndoe')
+ >>> johndoe = users.get_item(username='johndoe', last_name='Doe')
>>> johndoe['first_name'] = 'Johann'
>>> johndoe['whatever'] = "man, that's just like your opinion"
- >>> del johndoe['last_name']
+ >>> del johndoe['account_type']
# Partial update, only sending/affecting the
- # ``first_name/whatever/last_name`` fields.
+ # ``first_name/whatever/account_type`` fields.
>>> johndoe.partial_save()
True
@@ -261,7 +265,7 @@ If you don't have an ``Item`` instance & you don't want to incur the
>>> from boto.dynamodb2.table import Table
>>> users = Table('users')
- >>> users.delete_item(username='johndoe')
+ >>> users.delete_item(username='johndoe', last_name='Doe')
True
@@ -276,6 +280,7 @@ Batch writing involves wrapping the calls you want batched in a context manager.
The context manager imitates the ``Table.put_item`` & ``Table.delete_item``
APIs. Getting & using the context manager looks like::
+ >>> import time
>>> from boto.dynamodb2.table import Table
>>> users = Table('users')
@@ -287,11 +292,12 @@ APIs. Getting & using the context manager looks like::
... 'date_joined': int(time.time()),
... })
... batch.put_item(data={
- ... 'username': 'alice',
- ... 'first_name': 'Alice',
+ ... 'username': 'joebloggs',
+ ... 'first_name': 'Joe',
+ ... 'last_name': 'Bloggs',
... 'date_joined': int(time.time()),
... })
- ... batch.delete_item(username=jane')
+ ... batch.delete_item(username='janedoe', last_name='Doe')
However, there are some limitations on what you can do within the context
manager.
@@ -328,16 +334,59 @@ from the operator being used to filter the value.
In terms of querying, our original schema is less than optimal. For the
following examples, we'll be using the following table setup::
- >>> users = Table.create('users', schema=[
+ >>> from boto.dynamodb2.fields import HashKey, RangeKey, GlobalAllIndex
+ >>> from boto.dynamodb2.table import Table
+ >>> from boto.dynamodb2.types import NUMBER
+ >>> import time
+ >>> users = Table.create('users2', schema=[
... HashKey('account_type'),
... RangeKey('last_name'),
- ... ], indexes=[
- ... AllIndex('DateJoinedIndex', parts=[
+ ... ], throughput={
+ ... 'read': 5,
+ ... 'write': 15,
+ ... }, global_indexes=[
+ ... GlobalAllIndex('DateJoinedIndex', parts=[
... HashKey('account_type'),
... RangeKey('date_joined', data_type=NUMBER),
- ... ]),
+ ... ],
+ ... throughput={
+ ... 'read': 1,
+ ... 'write': 1,
+ ... }),
... ])
+And the following data::
+
+ >>> with users.batch_write() as batch:
+ ... batch.put_item(data={
+ ... 'account_type': 'standard_user',
+ ... 'first_name': 'John',
+ ... 'last_name': 'Doe',
+ ... 'is_owner': True,
+ ... 'email': True,
+ ... 'date_joined': int(time.time()) - (60*60*2),
+ ... })
+ ... batch.put_item(data={
+ ... 'account_type': 'standard_user',
+ ... 'first_name': 'Jane',
+ ... 'last_name': 'Doering',
+ ... 'date_joined': int(time.time()) - 2,
+ ... })
+ ... batch.put_item(data={
+ ... 'account_type': 'standard_user',
+ ... 'first_name': 'Bob',
+ ... 'last_name': 'Doerr',
+ ... 'date_joined': int(time.time()) - (60*60*3),
+ ... })
+ ... batch.put_item(data={
+ ... 'account_type': 'super_user',
+ ... 'first_name': 'Alice',
+ ... 'last_name': 'Liddel',
+ ... 'is_owner': True,
+ ... 'email': True,
+ ... 'date_joined': int(time.time()) - 1,
+ ... })
+
When executing the query, you get an iterable back that contains your results.
These results may be spread over multiple requests as DynamoDB paginates them.
This is done transparently, but you should be aware it may take more than one
@@ -352,9 +401,9 @@ To run a query for last names starting with the letter "D"::
>>> for user in names_with_d:
... print user['first_name']
- 'Bob'
- 'Jane'
'John'
+ 'Jane'
+ 'Bob'
You can also reverse results (``reverse=True``) as well as limiting them
(``limit=2``)::
@@ -368,11 +417,11 @@ You can also reverse results (``reverse=True``) as well as limiting them
>>> for user in rev_with_d:
... print user['first_name']
- 'John'
+ 'Bob'
'Jane'
You can also run queries against the local secondary indexes. Simply provide
-the index name (``index='FirstNameIndex'``) & filter parameters against its
+the index name (``index='DateJoinedIndex'``) & filter parameters against its
fields::
# Users within the last hour.
@@ -384,7 +433,6 @@ fields::
>>> for user in recent:
... print user['first_name']
- 'Alice'
'Jane'
By default, DynamoDB can return a large amount of data per-request (up to 1Mb
@@ -397,13 +445,15 @@ specify a smaller page size via the ``max_page_size`` argument to
>>> all_users = users.query_2(
... account_type__eq='standard_user',
... date_joined__gte=0,
+ ... index='DateJoinedIndex',
... max_page_size=10
... )
# Usage is the same, but now many smaller requests are done.
- >>> for user in recent:
+ >>> for user in all_users:
... print user['first_name']
- 'Alice'
+ 'Bob'
+ 'John'
'Jane'
Finally, if you need to query on data that's not in either a key or in an
@@ -413,8 +463,9 @@ concept, this is akin to what DynamoDB does.
.. warning::
- Scans are consistent & run over the entire table, so relatively speaking,
- they're more expensive than plain queries or queries against an LSI.
+ Scans are eventually consistent & run over the entire table, so
+ relatively speaking, they're more expensive than plain queries or queries
+ against an LSI.
An example scan of all records in the table looks like::
@@ -423,14 +474,14 @@ An example scan of all records in the table looks like::
Filtering a scan looks like::
>>> owners_with_emails = users.scan(
- ... is_owner__eq=1,
+ ... is_owner__eq=True,
... email__null=False,
... )
- >>> for user in recent:
+ >>> for user in owners_with_emails:
... print user['first_name']
- 'George'
'John'
+ 'Alice'
The ``ResultSet``
@@ -445,12 +496,16 @@ Typical use is simply a standard ``for`` to iterate over the results::
>>> result_set = users.scan()
>>> for user in result_set:
... print user['first_name']
+ 'John'
+ 'Jane'
+ 'Bob'
+ 'Alice'
However, this throws away results as it fetches more data. As a result, you
-can't index it like a ``list``.
+can't index it like a ``list``::
>>> len(result_set)
- 0
+ TypeError: object of type 'ResultSet' has no len()
Because it does this, if you need to loop over your results more than once (or
do things like negative indexing, length checks, etc.), you should wrap it in
@@ -461,6 +516,8 @@ a call to ``list()``. Ex.::
# Slice it for every other user.
>>> for user in all_users[::2]:
... print user['first_name']
+ 'John'
+ 'Bob'
.. warning::
@@ -585,25 +642,21 @@ response).
Example::
>>> from boto.dynamodb2.table import Table
- >>> users = Table('users')
+ >>> users = Table('users2')
# No request yet.
>>> many_users = users.batch_get(keys=[
- {'username': 'alice'},
- {'username': 'bob'},
- {'username': 'fred'},
- {'username': 'jane'},
- {'username': 'johndoe'},
- ])
+ ... {'account_type': 'standard_user', 'last_name': 'Doe'},
+ ... {'account_type': 'standard_user', 'last_name': 'Doering'},
+ ... {'account_type': 'super_user', 'last_name': 'Liddel'},
+ ... ])
# Now the request is performed, requesting all five in one request.
>>> for user in many_users:
... print user['first_name']
'Alice'
- 'Bobby'
- 'Fred'
- 'Jane'
'John'
+ 'Jane'
Deleting a Table
diff --git a/docs/source/releasenotes/v2.37.0.rst b/docs/source/releasenotes/v2.37.0.rst
new file mode 100644
index 00000000..33638f85
--- /dev/null
+++ b/docs/source/releasenotes/v2.37.0.rst
@@ -0,0 +1,38 @@
+boto v2.37.0
+============
+
+:date: 2015/04/02
+
+This release updates AWS CloudTrail to the latest API, adds new regional
+service endpoints and fixes bugs in several services.
+
+.. note::
+
+ The CloudTrail ``create_trail`` operation no longer supports the deprecated
+ ``trail`` parameter, which has been marked for removal by the service
+ since early 2014. Instead, you pass each trail parameter as a keyword
+ argument now. Please see the
+ `reference <http://boto.readthedocs.org/en/latest/ref/cloudtrail.html#boto.cloudtrail.layer1.CloudTrailConnection.create_trail>`__
+ to help port over existing code.
+
+
+Changes
+-------
+* Update AWS CloudTrail to the latest API. (:issue:`3074`, :sha:`bccc29a`)
+* Add support for UsePreviousValue to CloudFormation UpdateStack. (:issue:`3029`, :sha:`8a8a22a`)
+* Fix BOTH_PATH to work with Windows drives (:issue:`2823`, :sha:`7ba973e`)
+* Fix division calculation in S3 docs. (:issue:`3018`, :sha:`4ffd9ba`)
+* Add Boto 3 link in README. (:issue:`3013`, :sha:`561716c`)
+* Add more regions for configservice (:issue:`3009`, :sha:`a82244f`)
+* Add ``eu-central-1`` endpoints (Frankfurt region) for IAM and Route53 (:sha:`5ff4add`)
+* Fix unit tests from hanging (:sha:`da9f9b7`)
+* Fixed wording in dynamodb tutorial (:issue:`2993`, :sha:`36cadf4`)
+* Update SWF objects to keep a consistent region name. (:issue:`2985`, :issue:`2980`, :issue:`2606`, :sha:`ce75a19`)
+* Print archive ID in glacier upload script. (:issue:`2951`, :sha:`047c7d3`)
+* Add some minor documentation for Route53 tutorial. (:issue:`2952`, :sha:`b855fb3`)
+* Add Amazon DynamoDB online indexing support on High level API (:issue:`2925`, :sha:`0621c53`)
+* Ensure Content-Length header is a string. (:issue:`2932`, :sha:`34a0f63`)
+* Correct docs around overriding SGs on ELBs (:issue:`2937`, :sha:`84d0ff9`)
+* Fix DynamoDB tests. (:sha:`616ee80`)
+* Fix region bug. (:issue:`2927`, :sha:`b1cb61e`)
+* Fix import for ``boto.cloudhsm.layer1.CloudHSMConnection``. (:issue:`2926`, :sha:`1944d35`)
diff --git a/docs/source/route53_tut.rst b/docs/source/route53_tut.rst
index f24dfc28..12e0d659 100644
--- a/docs/source/route53_tut.rst
+++ b/docs/source/route53_tut.rst
@@ -85,3 +85,19 @@ You can call the API again and ask for the current status as follows:
When the status has changed to *INSYNC*, the change has been propagated to
remote servers
+Working with Change Sets
+-----------------------
+
+You can also do bulk updates using ResourceRecordSets. For example updating the TTL
+
+>>> zone = conn.get_zone('example.com')
+>>> change_set = boto.route53.record.ResourceRecordSets(conn, zone.id)
+>>> for rrset in conn.get_all_rrsets(zone.id):
+... u = change_set.add_change("UPSERT", rrset.name, rrset.type, ttl=3600)
+... u.add_value(rrset.resource_records[0])
+... results = change_set.commit()
+Done
+
+In this example we update the TTL to 1hr (3600 seconds) for all records recursed from
+example.com.
+Note: this will also change the SOA and NS records which may not be ideal for many users.
diff --git a/docs/source/s3_tut.rst b/docs/source/s3_tut.rst
index 83093bfa..23e0350d 100644
--- a/docs/source/s3_tut.rst
+++ b/docs/source/s3_tut.rst
@@ -190,12 +190,12 @@ to be taken. The example below makes use of the FileChunkIO module, so
# Use a chunk size of 50 MiB (feel free to change this)
>>> chunk_size = 52428800
- >>> chunk_count = int(math.ceil(source_size / chunk_size))
+ >>> chunk_count = int(math.ceil(source_size / float(chunk_size)))
# Send the file parts, using FileChunkIO to create a file-like object
# that points to a certain byte range within the original file. We
# set bytes to never exceed the original file size.
- >>> for i in range(chunk_count + 1):
+ >>> for i in range(chunk_count):
>>> offset = chunk_size * i
>>> bytes = min(chunk_size, source_size - offset)
>>> with FileChunkIO(source_path, 'r', offset=offset,
diff --git a/requirements.txt b/requirements.txt
index 22756525..c5d83577 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@ requests>=1.2.3,<=2.0.1
rsa==3.1.4
simplejson==3.5.2
argparse==1.2.1
-httpretty>=0.7.0
+httpretty>=0.7.0,<=0.8.6
paramiko>=1.10.0
PyYAML>=3.10
coverage==3.7.1
diff --git a/tests/integration/cloudformation/test_connection.py b/tests/integration/cloudformation/test_connection.py
index 6529cc37..4cafb0fe 100644
--- a/tests/integration/cloudformation/test_connection.py
+++ b/tests/integration/cloudformation/test_connection.py
@@ -10,6 +10,14 @@ BASIC_EC2_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template EC2InstanceSample",
"Parameters": {
+ "Parameter1": {
+ "Description": "Test Parameter 1",
+ "Type": "String"
+ },
+ "Parameter2": {
+ "Description": "Test Parameter 2",
+ "Type": "String"
+ }
},
"Mappings": {
"RegionMap": {
@@ -32,7 +40,14 @@ BASIC_EC2_TEMPLATE = {
]
},
"UserData": {
- "Fn::Base64": "a" * 15000
+ "Fn::Base64": {
+ "Fn::Join":[
+ "",
+ [{"Ref": "Parameter1"},
+ {"Ref": "Parameter2"}]
+ ]
+ }
+
}
}
}
@@ -102,7 +117,9 @@ class TestCloudformationConnection(unittest.TestCase):
# See https://github.com/boto/boto/issues/1037
body = self.connection.create_stack(
self.stack_name,
- template_body=json.dumps(BASIC_EC2_TEMPLATE))
+ template_body=json.dumps(BASIC_EC2_TEMPLATE),
+ parameters=[('Parameter1', 'initial_value'),
+ ('Parameter2', 'initial_value')])
self.addCleanup(self.connection.delete_stack, self.stack_name)
# A newly created stack should have events
@@ -114,9 +131,39 @@ class TestCloudformationConnection(unittest.TestCase):
self.assertEqual(None, policy)
# Our new stack should show up in the stack list
- stacks = self.connection.describe_stacks()
- self.assertEqual(self.stack_name, stacks[0].stack_name)
-
+ stacks = self.connection.describe_stacks(self.stack_name)
+ stack = stacks[0]
+ self.assertEqual(self.stack_name, stack.stack_name)
+
+ params = [(p.key, p.value) for p in stack.parameters]
+ self.assertEquals([('Parameter1', 'initial_value'),
+ ('Parameter2', 'initial_value')], params)
+
+ for _ in range(30):
+ stack.update()
+ if stack.stack_status.find("PROGRESS") == -1:
+ break
+ time.sleep(5)
+
+ body = self.connection.update_stack(
+ self.stack_name,
+ template_body=json.dumps(BASIC_EC2_TEMPLATE),
+ parameters=[('Parameter1', '', True),
+ ('Parameter2', 'updated_value')])
+
+ stacks = self.connection.describe_stacks(self.stack_name)
+ stack = stacks[0]
+ params = [(p.key, p.value) for p in stacks[0].parameters]
+ self.assertEquals([('Parameter1', 'initial_value'),
+ ('Parameter2', 'updated_value')], params)
+ # Waiting for the update to complete to unblock the delete_stack in the
+ # cleanup.
+ for _ in range(30):
+ stack.update()
+ if stack.stack_status.find("PROGRESS") == -1:
+ break
+ time.sleep(5)
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/integration/configservice/test_configservice.py b/tests/integration/configservice/test_configservice.py
index 07f6472e..4a7e2dcf 100644
--- a/tests/integration/configservice/test_configservice.py
+++ b/tests/integration/configservice/test_configservice.py
@@ -33,7 +33,12 @@ class TestConfigService(unittest.TestCase):
response = self.configservice.describe_configuration_recorders()
self.assertIn('ConfigurationRecorders', response)
- def test_handle_(self):
+ def test_handle_no_such_configuration_recorder(self):
with self.assertRaises(NoSuchConfigurationRecorderException):
self.configservice.describe_configuration_recorders(
configuration_recorder_names=['non-existant-recorder'])
+
+ def test_connect_to_non_us_east_1(self):
+ self.configservice = boto.configservice.connect_to_region('us-west-2')
+ response = self.configservice.describe_configuration_recorders()
+ self.assertIn('ConfigurationRecorders', response)
diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py
index f44de368..83340078 100644
--- a/tests/integration/dynamodb2/test_highlevel.py
+++ b/tests/integration/dynamodb2/test_highlevel.py
@@ -33,7 +33,7 @@ from boto.dynamodb2.fields import (HashKey, RangeKey, KeysOnlyIndex,
GlobalAllIndex)
from boto.dynamodb2.items import Item
from boto.dynamodb2.table import Table
-from boto.dynamodb2.types import NUMBER
+from boto.dynamodb2.types import NUMBER, STRING
try:
import json
@@ -716,3 +716,106 @@ class DynamoDBv2Test(unittest.TestCase):
for rs_item in rs:
self.assertEqual(rs_item['username'], ['johndoe'])
+
+ def test_update_table_online_indexing_support(self):
+ # Create a table using gsi to test the DynamoDB online indexing support
+ # https://github.com/boto/boto/pull/2925
+ users = Table.create('online_indexing_support_users', schema=[
+ HashKey('user_id')
+ ], throughput={
+ 'read': 5,
+ 'write': 5
+ }, global_indexes=[
+ GlobalAllIndex('EmailGSIIndex', parts=[
+ HashKey('email')
+ ], throughput={
+ 'read': 2,
+ 'write': 2
+ })
+ ])
+
+ # Add this function to be called after tearDown()
+ self.addCleanup(users.delete)
+
+ # Wait for it.
+ time.sleep(60)
+
+ # Fetch fresh table desc from DynamoDB
+ users.describe()
+
+ # Assert if everything is fine so far
+ self.assertEqual(len(users.global_indexes), 1)
+ self.assertEqual(users.global_indexes[0].throughput['read'], 2)
+ self.assertEqual(users.global_indexes[0].throughput['write'], 2)
+
+ # Update a GSI throughput. it should work.
+ users.update_global_secondary_index(global_indexes={
+ 'EmailGSIIndex': {
+ 'read': 2,
+ 'write': 1,
+ }
+ })
+
+ # Wait for it.
+ time.sleep(60)
+
+ # Fetch fresh table desc from DynamoDB
+ users.describe()
+
+ # Assert if everything is fine so far
+ self.assertEqual(len(users.global_indexes), 1)
+ self.assertEqual(users.global_indexes[0].throughput['read'], 2)
+ self.assertEqual(users.global_indexes[0].throughput['write'], 1)
+
+ # Update a GSI throughput using the old fashion way for compatibility
+ # purposes. it should work.
+ users.update(global_indexes={
+ 'EmailGSIIndex': {
+ 'read': 3,
+ 'write': 2,
+ }
+ })
+
+ # Wait for it.
+ time.sleep(60)
+
+ # Fetch fresh table desc from DynamoDB
+ users.describe()
+
+ # Assert if everything is fine so far
+ self.assertEqual(len(users.global_indexes), 1)
+ self.assertEqual(users.global_indexes[0].throughput['read'], 3)
+ self.assertEqual(users.global_indexes[0].throughput['write'], 2)
+
+ # Delete a GSI. it should work.
+ users.delete_global_secondary_index('EmailGSIIndex')
+
+ # Wait for it.
+ time.sleep(60)
+
+ # Fetch fresh table desc from DynamoDB
+ users.describe()
+
+ # Assert if everything is fine so far
+ self.assertEqual(len(users.global_indexes), 0)
+
+ # Create a GSI. it should work.
+ users.create_global_secondary_index(
+ global_index=GlobalAllIndex(
+ 'AddressGSIIndex', parts=[
+ HashKey('address', data_type=STRING)
+ ], throughput={
+ 'read': 1,
+ 'write': 1,
+ })
+ )
+ # Wait for it. This operation usually takes much longer than the others
+ time.sleep(60*10)
+
+ # Fetch fresh table desc from DynamoDB
+ users.describe()
+
+ # Assert if everything is fine so far
+ self.assertEqual(len(users.global_indexes), 1)
+ self.assertEqual(users.global_indexes[0].throughput['read'], 1)
+ self.assertEqual(users.global_indexes[0].throughput['write'], 1)
diff --git a/tests/unit/cloudformation/test_connection.py b/tests/unit/cloudformation/test_connection.py
index f4e23f37..613e3d2a 100644
--- a/tests/unit/cloudformation/test_connection.py
+++ b/tests/unit/cloudformation/test_connection.py
@@ -134,7 +134,9 @@ class TestCloudFormationUpdateStack(CloudFormationConnectionBase):
api_response = self.service_connection.update_stack(
'stack_name', template_url='http://url',
template_body=SAMPLE_TEMPLATE,
- parameters=[('KeyName', 'myKeyName')],
+ parameters=[('KeyName', 'myKeyName'), ('KeyName2', "", True),
+ ('KeyName3', "", False), ('KeyName4', None, True),
+ ('KeyName5', "Ignore Me", True)],
tags={'TagKey': 'TagValue'},
notification_arns=['arn:notify1', 'arn:notify2'],
disable_rollback=True,
@@ -149,6 +151,14 @@ class TestCloudFormationUpdateStack(CloudFormationConnectionBase):
'NotificationARNs.member.2': 'arn:notify2',
'Parameters.member.1.ParameterKey': 'KeyName',
'Parameters.member.1.ParameterValue': 'myKeyName',
+ 'Parameters.member.2.ParameterKey': 'KeyName2',
+ 'Parameters.member.2.UsePreviousValue': 'true',
+ 'Parameters.member.3.ParameterKey': 'KeyName3',
+ 'Parameters.member.3.ParameterValue': '',
+ 'Parameters.member.4.UsePreviousValue': 'true',
+ 'Parameters.member.4.ParameterKey': 'KeyName4',
+ 'Parameters.member.5.UsePreviousValue': 'true',
+ 'Parameters.member.5.ParameterKey': 'KeyName5',
'Tags.member.1.Key': 'TagKey',
'Tags.member.1.Value': 'TagValue',
'StackName': 'stack_name',
diff --git a/tests/unit/cloudtrail/test_layer1.py b/tests/unit/cloudtrail/test_layer1.py
index 68b97bac..10f6f702 100644
--- a/tests/unit/cloudtrail/test_layer1.py
+++ b/tests/unit/cloudtrail/test_layer1.py
@@ -25,7 +25,7 @@ class TestDescribeTrails(AWSMockServiceTestCase):
def test_describe(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.describe_trails()
-
+
self.assertEqual(1, len(api_response['trailList']))
self.assertEqual('test', api_response['trailList'][0]['Name'])
@@ -38,7 +38,7 @@ class TestDescribeTrails(AWSMockServiceTestCase):
self.set_http_response(status_code=200)
api_response = self.service_connection.describe_trails(
trail_name_list=['test'])
-
+
self.assertEqual(1, len(api_response['trailList']))
self.assertEqual('test', api_response['trailList'][0]['Name'])
@@ -67,13 +67,15 @@ class TestCreateTrail(AWSMockServiceTestCase):
def test_create(self):
self.set_http_response(status_code=200)
- trail = {'Name': 'test', 'S3BucketName': 'cloudtrail-1',
- 'SnsTopicName': 'cloudtrail-1',
- 'IncludeGlobalServiceEvents': False}
+ api_response = self.service_connection.create_trail(
+ 'test', 'cloudtrail-1', sns_topic_name='cloudtrail-1',
+ include_global_service_events=False)
- api_response = self.service_connection.create_trail(trail=trail)
-
- self.assertEqual(trail, api_response['trail'])
+ self.assertEqual('test', api_response['trail']['Name'])
+ self.assertEqual('cloudtrail-1', api_response['trail']['S3BucketName'])
+ self.assertEqual('cloudtrail-1', api_response['trail']['SnsTopicName'])
+ self.assertEqual(False,
+ api_response['trail']['IncludeGlobalServiceEvents'])
target = self.actual_request.headers['X-Amz-Target']
self.assertTrue('CreateTrail' in target)
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index eed71395..87bdbe48 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -1593,6 +1593,143 @@ class TableTestCase(unittest.TestCase):
}
})
+ def test_create_global_secondary_index(self):
+ with mock.patch.object(
+ self.users.connection,
+ 'update_table',
+ return_value={}) as mock_update:
+ self.users.create_global_secondary_index(
+ global_index=GlobalAllIndex(
+ 'JustCreatedIndex',
+ parts=[
+ HashKey('requiredHashKey')
+ ],
+ throughput={
+ 'read': 2,
+ 'write': 2
+ }
+ )
+ )
+
+ mock_update.assert_called_once_with(
+ 'users',
+ global_secondary_index_updates=[
+ {
+ 'Create': {
+ 'IndexName': 'JustCreatedIndex',
+ 'KeySchema': [
+ {
+ 'KeyType': 'HASH',
+ 'AttributeName': 'requiredHashKey'
+ }
+ ],
+ 'Projection': {
+ 'ProjectionType': 'ALL'
+ },
+ 'ProvisionedThroughput': {
+ 'WriteCapacityUnits': 2,
+ 'ReadCapacityUnits': 2
+ }
+ }
+ }
+ ],
+ attribute_definitions=[
+ {
+ 'AttributeName': 'requiredHashKey',
+ 'AttributeType': 'S'
+ }
+ ]
+ )
+
+ def test_delete_global_secondary_index(self):
+ with mock.patch.object(
+ self.users.connection,
+ 'update_table',
+ return_value={}) as mock_update:
+ self.users.delete_global_secondary_index('RandomGSIIndex')
+
+ mock_update.assert_called_once_with(
+ 'users',
+ global_secondary_index_updates=[
+ {
+ 'Delete': {
+ 'IndexName': 'RandomGSIIndex',
+ }
+ }
+ ]
+ )
+
+ def test_update_global_secondary_index(self):
+ # Updating a single global secondary index
+ with mock.patch.object(
+ self.users.connection,
+ 'update_table',
+ return_value={}) as mock_update:
+ self.users.update_global_secondary_index(global_indexes={
+ 'A_IndexToBeUpdated': {
+ 'read': 5,
+ 'write': 5
+ }
+ })
+
+ mock_update.assert_called_once_with(
+ 'users',
+ global_secondary_index_updates=[
+ {
+ 'Update': {
+ 'IndexName': 'A_IndexToBeUpdated',
+ "ProvisionedThroughput": {
+ "ReadCapacityUnits": 5,
+ "WriteCapacityUnits": 5
+ },
+ }
+ }
+ ]
+ )
+
+ # Updating multiple global secondary indexes
+ with mock.patch.object(
+ self.users.connection,
+ 'update_table',
+ return_value={}) as mock_update:
+ self.users.update_global_secondary_index(global_indexes={
+ 'A_IndexToBeUpdated': {
+ 'read': 5,
+ 'write': 5
+ },
+ 'B_IndexToBeUpdated': {
+ 'read': 9,
+ 'write': 9
+ }
+ })
+
+ args, kwargs = mock_update.call_args
+ self.assertEqual(args, ('users',))
+ update = kwargs['global_secondary_index_updates'][:]
+ update.sort(key=lambda x: x['Update']['IndexName'])
+ self.assertDictEqual(
+ update[0],
+ {
+ 'Update': {
+ 'IndexName': 'A_IndexToBeUpdated',
+ 'ProvisionedThroughput': {
+ 'WriteCapacityUnits': 5,
+ 'ReadCapacityUnits': 5
+ }
+ }
+ })
+ self.assertDictEqual(
+ update[1],
+ {
+ 'Update': {
+ 'IndexName': 'B_IndexToBeUpdated',
+ 'ProvisionedThroughput': {
+ 'WriteCapacityUnits': 9,
+ 'ReadCapacityUnits': 9
+ }
+ }
+ })
+
def test_delete(self):
with mock.patch.object(
self.users.connection,
diff --git a/tests/unit/ec2/autoscale/test_group.py b/tests/unit/ec2/autoscale/test_group.py
index 08d672bc..a5df4589 100644
--- a/tests/unit/ec2/autoscale/test_group.py
+++ b/tests/unit/ec2/autoscale/test_group.py
@@ -634,6 +634,69 @@ class TestAttachInstances(AWSMockServiceTestCase):
}, ignore_params_values=['Version'])
+class TestDetachInstances(AWSMockServiceTestCase):
+ connection_class = AutoScaleConnection
+
+ def setUp(self):
+ super(TestDetachInstances, self).setUp()
+
+ def default_body(self):
+ return b"""
+ <DetachInstancesResponse>
+ <ResponseMetadata>
+ <RequestId>requestid</RequestId>
+ </ResponseMetadata>
+ </DetachInstancesResponse>
+ """
+
+ def test_detach_instances(self):
+ self.set_http_response(status_code=200)
+ self.service_connection.detach_instances(
+ 'autoscale',
+ ['inst2', 'inst1', 'inst4']
+ )
+ self.assert_request_parameters({
+ 'Action': 'DetachInstances',
+ 'AutoScalingGroupName': 'autoscale',
+ 'InstanceIds.member.1': 'inst2',
+ 'InstanceIds.member.2': 'inst1',
+ 'InstanceIds.member.3': 'inst4',
+ 'ShouldDecrementDesiredCapacity': 'true',
+ }, ignore_params_values=['Version'])
+
+ def test_detach_instances_with_decrement_desired_capacity(self):
+ self.set_http_response(status_code=200)
+ self.service_connection.detach_instances(
+ 'autoscale',
+ ['inst2', 'inst1', 'inst4'],
+ True
+ )
+ self.assert_request_parameters({
+ 'Action': 'DetachInstances',
+ 'AutoScalingGroupName': 'autoscale',
+ 'InstanceIds.member.1': 'inst2',
+ 'InstanceIds.member.2': 'inst1',
+ 'InstanceIds.member.3': 'inst4',
+ 'ShouldDecrementDesiredCapacity': 'true',
+ }, ignore_params_values=['Version'])
+
+ def test_detach_instances_without_decrement_desired_capacity(self):
+ self.set_http_response(status_code=200)
+ self.service_connection.detach_instances(
+ 'autoscale',
+ ['inst2', 'inst1', 'inst4'],
+ False
+ )
+ self.assert_request_parameters({
+ 'Action': 'DetachInstances',
+ 'AutoScalingGroupName': 'autoscale',
+ 'InstanceIds.member.1': 'inst2',
+ 'InstanceIds.member.2': 'inst1',
+ 'InstanceIds.member.3': 'inst4',
+ 'ShouldDecrementDesiredCapacity': 'false',
+ }, ignore_params_values=['Version'])
+
+
class TestGetAccountLimits(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
diff --git a/tests/unit/swf/test_layer2_domain.py b/tests/unit/swf/test_layer2_domain.py
index b56cb4b1..43efc8a2 100644
--- a/tests/unit/swf/test_layer2_domain.py
+++ b/tests/unit/swf/test_layer2_domain.py
@@ -11,6 +11,7 @@ class TestDomain(unittest.TestCase):
self.domain = Domain(name='test-domain', description='My test domain')
self.domain.aws_access_key_id = 'inheritable access key'
self.domain.aws_secret_access_key = 'inheritable secret key'
+ self.domain.region = 'test-region'
def test_domain_instantiation(self):
self.assertEquals('test-domain', self.domain.name)
@@ -47,6 +48,7 @@ class TestDomain(unittest.TestCase):
for activity_type in activity_types:
self.assertIsInstance(activity_type, ActivityType)
self.assertTrue(activity_type.name in expected_names)
+ self.assertEquals(self.domain.region, activity_type.region)
def test_domain_list_workflows(self):
self.domain._swf.list_workflow_types.return_value = {
@@ -68,6 +70,7 @@ class TestDomain(unittest.TestCase):
self.assertEquals(self.domain.aws_access_key_id, workflow_type.aws_access_key_id)
self.assertEquals(self.domain.aws_secret_access_key, workflow_type.aws_secret_access_key)
self.assertEquals(self.domain.name, workflow_type.domain)
+ self.assertEquals(self.domain.region, workflow_type.region)
def test_domain_list_executions(self):
self.domain._swf.list_open_workflow_executions.return_value = {
@@ -107,6 +110,7 @@ class TestDomain(unittest.TestCase):
self.assertEquals(self.domain.aws_access_key_id, wf_execution.aws_access_key_id)
self.assertEquals(self.domain.aws_secret_access_key, wf_execution.aws_secret_access_key)
self.assertEquals(self.domain.name, wf_execution.domain)
+ self.assertEquals(self.domain.region, wf_execution.region)
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index 1ad3df6e..69e8816e 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -524,6 +524,16 @@ class TestHTTPRequest(unittest.TestCase):
{'Some-Header': 'should%20be%20url%20encoded',
'User-Agent': UserAgent})
+ def test_content_length_str(self):
+ request = HTTPRequest('PUT', 'https', 'amazon.com', 443, None,
+ None, {}, {}, 'Body')
+ mock_connection = mock.Mock()
+ request.authorize(mock_connection)
+
+ # Ensure Content-Length header is a str. This is more explicit than
+ # relying on other code cast the value later. (Python 2.7.0, for
+ # example, assumes headers are of type str.)
+ self.assertIsInstance(request.headers['Content-Length'], str)
if __name__ == '__main__':
unittest.main()