summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml2
-rw-r--r--README.rst6
-rw-r--r--boto/__init__.py2
-rw-r--r--boto/auth.py53
-rw-r--r--boto/connection.py3
-rw-r--r--boto/dynamodb/layer1.py3
-rw-r--r--boto/dynamodb2/layer1.py9
-rw-r--r--boto/dynamodb2/table.py60
-rw-r--r--boto/ec2/connection.py13
-rw-r--r--boto/ec2/ec2object.py23
-rw-r--r--boto/ec2/networkinterface.py64
-rw-r--r--boto/endpoints.json7
-rw-r--r--boto/iam/connection.py2
-rw-r--r--boto/provider.py2
-rw-r--r--boto/roboto/awsqueryservice.py2
-rw-r--r--boto/route53/connection.py14
-rw-r--r--boto/route53/healthcheck.py13
-rw-r--r--boto/route53/record.py14
-rw-r--r--boto/s3/acl.py9
-rw-r--r--boto/s3/bucket.py2
-rw-r--r--boto/s3/connection.py25
-rw-r--r--boto/utils.py42
-rw-r--r--docs/source/cloudwatch_tut.rst85
-rw-r--r--docs/source/index.rst1
-rw-r--r--docs/source/ref/route53.rst29
-rw-r--r--docs/source/releasenotes/v2.30.0.rst28
-rw-r--r--docs/source/s3_tut.rst55
-rw-r--r--docs/source/sqs_tut.rst44
-rw-r--r--docs/source/swf_tut.rst148
-rw-r--r--tests/integration/route53/test_health_check.py16
-rw-r--r--tests/unit/dynamodb2/test_table.py72
-rwxr-xr-xtests/unit/ec2/test_connection.py12
-rw-r--r--tests/unit/ec2/test_ec2object.py145
-rw-r--r--tests/unit/ec2/test_networkinterface.py74
-rw-r--r--tests/unit/provider/test_provider.py2
-rw-r--r--tests/unit/route53/test_connection.py113
-rw-r--r--tests/unit/s3/test_bucket.py32
-rw-r--r--tests/unit/s3/test_connection.py43
-rw-r--r--tests/unit/utils/test_utils.py29
39 files changed, 1167 insertions, 131 deletions
diff --git a/.travis.yml b/.travis.yml
index bd45e0c5..81717069 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,6 +3,8 @@ python:
- "2.6"
- "2.7"
before_install:
+ - sudo apt-get update
+ - sudo apt-get --reinstall install -qq language-pack-en language-pack-de
- sudo apt-get install swig
install: pip install --allow-all-external -r requirements.txt
script: python tests/test.py unit
diff --git a/README.rst b/README.rst
index 843a8a1d..d8fbc045 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.29.1
+boto 2.30.0
-Released: 30-May-2014
+Released: 1-Jul-2014
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
@@ -60,7 +60,7 @@ At the moment, boto supports:
* Monitoring
- * Amazon CloudWatch
+ * Amazon CloudWatch (EC2 Only)
* Networking
diff --git a/boto/__init__.py b/boto/__init__.py
index de578ab1..1505bc41 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -37,7 +37,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.29.1'
+__version__ = '2.30.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
diff --git a/boto/auth.py b/boto/auth.py
index 2f6c873b..d9f5c1a6 100644
--- a/boto/auth.py
+++ b/boto/auth.py
@@ -602,6 +602,11 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
if part == 's3':
# If it's by itself, the region is the previous part.
region_name = parts[-offset]
+
+ # Unless it's Vhosted classic
+ if region_name == 'amazonaws':
+ region_name = 'us-east-1'
+
break
elif part.startswith('s3-'):
region_name = self.clean_region_name(part)
@@ -666,6 +671,54 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
req = self.mangle_path_and_params(req)
return super(S3HmacAuthV4Handler, self).add_auth(req, **kwargs)
+ def presign(self, req, expires, iso_date=None):
+ """
+ Presign a request using SigV4 query params. Takes in an HTTP request
+ and an expiration time in seconds and returns a URL.
+
+ http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
+ """
+ if iso_date is None:
+ iso_date = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
+
+ region = self.determine_region_name(req.host)
+ service = self.determine_service_name(req.host)
+
+ params = {
+ 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
+ 'X-Amz-Credential': '%s/%s/%s/%s/aws4_request' % (
+ self._provider.access_key,
+ iso_date[:8],
+ region,
+ service
+ ),
+ 'X-Amz-Date': iso_date,
+ 'X-Amz-Expires': expires,
+ 'X-Amz-SignedHeaders': 'host'
+ }
+
+ if self._provider.security_token:
+ params['X-Amz-Security-Token'] = self._provider.security_token
+
+ req.params.update(params)
+
+ cr = self.canonical_request(req)
+
+ # We need to replace the payload SHA with a constant
+ cr = '\n'.join(cr.split('\n')[:-1]) + '\nUNSIGNED-PAYLOAD'
+
+ # Date header is expected for string_to_sign, but unused otherwise
+ req.headers['X-Amz-Date'] = iso_date
+
+ sts = self.string_to_sign(req, cr)
+ signature = self.signature(req, sts)
+
+ # Add signature to params now that we have it
+ req.params['X-Amz-Signature'] = signature
+
+ return 'https://%s%s?%s' % (req.host, req.path,
+ urllib.urlencode(req.params))
+
class QueryAuthHandler(AuthHandler):
"""
diff --git a/boto/connection.py b/boto/connection.py
index 051007df..ebe26cdc 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -901,7 +901,8 @@ class AWSAuthConnection(object):
self.is_secure)
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests.
- next_sleep = random.random() * (2 ** i)
+ next_sleep = min(random.random() * (2 ** i),
+ boto.config.get('Boto', 'max_retry_delay', 60))
try:
# we now re-sign each request before it is retried
boto.log.debug('Token: %s' % self.provider.security_token)
diff --git a/boto/dynamodb/layer1.py b/boto/dynamodb/layer1.py
index 317cf433..01a49020 100644
--- a/boto/dynamodb/layer1.py
+++ b/boto/dynamodb/layer1.py
@@ -173,7 +173,8 @@ class Layer1(AWSAuthConnection):
if i == 0:
next_sleep = 0
else:
- next_sleep = 0.05 * (2 ** i)
+ next_sleep = min(0.05 * (2 ** i),
+ boto.config.get('Boto', 'max_retry_delay', 60))
return next_sleep
def list_tables(self, limit=None, start_table=None):
diff --git a/boto/dynamodb2/layer1.py b/boto/dynamodb2/layer1.py
index 9a1c4adf..360c7d08 100644
--- a/boto/dynamodb2/layer1.py
+++ b/boto/dynamodb2/layer1.py
@@ -2123,7 +2123,7 @@ class DynamoDBConnection(AWSQueryConnection):
'ProvisionedThroughputExceededException',
i
)
- next_sleep = self._exponential_time(i)
+ next_sleep = self._truncated_exponential_time(i)
i += 1
status = (msg, i, next_sleep)
if i == self.NumberRetries:
@@ -2150,12 +2150,13 @@ class DynamoDBConnection(AWSQueryConnection):
if actual_crc32 != expected_crc32:
msg = ("The calculated checksum %s did not match the expected "
"checksum %s" % (actual_crc32, expected_crc32))
- status = (msg, i + 1, self._exponential_time(i))
+ status = (msg, i + 1, self._truncated_exponential_time(i))
return status
- def _exponential_time(self, i):
+ def _truncated_exponential_time(self, i):
if i == 0:
next_sleep = 0
else:
- next_sleep = 0.05 * (2 ** i)
+ next_sleep = min(0.05 * (2 ** i),
+ boto.config.get('Boto', 'max_retry_delay', 60))
return next_sleep
diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py
index 37833dd9..66e6337d 100644
--- a/boto/dynamodb2/table.py
+++ b/boto/dynamodb2/table.py
@@ -648,16 +648,35 @@ class Table(object):
self.connection.update_item(self.table_name, raw_key, item_data, **kwargs)
return True
- def delete_item(self, **kwargs):
+ def delete_item(self, expected=None, conditional_operator=None, **kwargs):
"""
- Deletes an item in DynamoDB.
+ Deletes a single item. You can perform a conditional delete operation
+ that deletes the item if it exists, or if it has an expected attribute
+ value.
+
+ Conditional deletes are useful for only deleting items if specific
+ conditions are met. If those conditions are met, DynamoDB performs
+ the delete. Otherwise, the item is not deleted.
+
+ To specify the expected attribute values of the item, you can pass a
+ dictionary of conditions to ``expected``. Each condition should follow
+ the pattern ``<attributename>__<comparison_operator>=<value_to_expect>``.
**IMPORTANT** - Be careful when using this method, there is no undo.
To specify the key of the item you'd like to get, you can specify the
key attributes as kwargs.
- Returns ``True`` on success.
+ Optionally accepts an ``expected`` parameter which is a dictionary of
+ expected attribute value conditions.
+
+ Optionally accepts a ``conditional_operator`` which applies to the
+ expected attribute value conditions:
+
+ + `AND` - If all of the conditions evaluate to true (default)
+ + `OR` - True if at least one condition evaluates to true
+
+ Returns ``True`` on success, ``False`` on failed conditional delete.
Example::
@@ -676,9 +695,21 @@ class Table(object):
... })
True
+ # Conditional delete
+ >>> users.delete_item(username='johndoe',
+ ... expected={'balance__eq': 0})
+ True
"""
+ expected = self._build_filters(expected, using=FILTER_OPERATORS)
raw_key = self._encode_keys(kwargs)
- self.connection.delete_item(self.table_name, raw_key)
+
+ try:
+ self.connection.delete_item(self.table_name, raw_key,
+ expected=expected,
+ conditional_operator=conditional_operator)
+ except exceptions.ConditionalCheckFailedException:
+ return False
+
return True
def get_key_fields(self):
@@ -969,7 +1000,8 @@ class Table(object):
return results
def query_count(self, index=None, consistent=False, conditional_operator=None,
- query_filter=None, **filter_kwargs):
+ query_filter=None, scan_index_forward=True, limit=None,
+ **filter_kwargs):
"""
Queries the exact count of matching items in a DynamoDB table.
@@ -1003,6 +1035,22 @@ class Table(object):
Returns an integer which represents the exact amount of matched
items.
+ :type scan_index_forward: boolean
+ :param scan_index_forward: Specifies ascending (true) or descending
+ (false) traversal of the index. DynamoDB returns results reflecting
+ the requested order determined by the range key. If the data type
+ is Number, the results are returned in numeric order. For String,
+ the results are returned in order of ASCII character code values.
+ For Binary, DynamoDB treats each byte of the binary data as
+ unsigned when it compares binary values.
+
+ If ScanIndexForward is not specified, the results are returned in
+ ascending order.
+
+ :type limit: integer
+ :param limit: The maximum number of items to evaluate (not necessarily
+ the number of matching items).
+
Example::
# Look for last names equal to "Doe".
@@ -1037,6 +1085,8 @@ class Table(object):
key_conditions=key_conditions,
query_filter=built_query_filter,
conditional_operator=conditional_operator,
+ limit=limit,
+ scan_index_forward=scan_index_forward,
)
return int(raw_results.get('Count', 0))
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index 36db8aab..6cb108f9 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -798,6 +798,9 @@ class EC2Connection(AWSQueryConnection):
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
+ * t2.micro
+ * t2.small
+ * t2.medium
:type placement: string
:param placement: The Availability Zone to launch the instance into.
@@ -1493,6 +1496,9 @@ class EC2Connection(AWSQueryConnection):
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
+ * t2.micro
+ * t2.small
+ * t2.medium
:type placement: string
:param placement: The availability zone in which to launch
@@ -4213,11 +4219,14 @@ class EC2Connection(AWSQueryConnection):
# Network Interface methods
- def get_all_network_interfaces(self, filters=None, dry_run=False):
+ def get_all_network_interfaces(self, network_interface_ids=None, filters=None, dry_run=False):
"""
Retrieve all of the Elastic Network Interfaces (ENI's)
associated with your account.
+ :type network_interface_ids: list
+ :param network_interface_ids: a list of strings representing ENI IDs
+
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
@@ -4235,6 +4244,8 @@ class EC2Connection(AWSQueryConnection):
:return: A list of :class:`boto.ec2.networkinterface.NetworkInterface`
"""
params = {}
+ if network_interface_ids:
+ self.build_list_params(params, network_interface_ids, 'NetworkInterfaceId')
if filters:
self.build_filter_params(params, filters)
if dry_run:
diff --git a/boto/ec2/ec2object.py b/boto/ec2/ec2object.py
index f697e664..383602e5 100644
--- a/boto/ec2/ec2object.py
+++ b/boto/ec2/ec2object.py
@@ -85,6 +85,27 @@ class TaggedEC2Object(EC2Object):
self.tags = TagSet()
self.tags[key] = value
+ def add_tags(self, tags, dry_run=False):
+ """
+ Add tags to this object. Tags are stored by AWS and can be used
+ to organize and filter resources. Adding tags involves a round-trip
+ to the EC2 service.
+
+ :type tags: dict
+ :param tags: A dictionary of key-value pairs for the tags being stored.
+ If for some tags you want only the name and no value, the
+ corresponding value for that tag name should be an empty
+ string.
+ """
+ status = self.connection.create_tags(
+ [self.id],
+ tags,
+ dry_run=dry_run
+ )
+ if self.tags is None:
+ self.tags = TagSet()
+ self.tags.update(tags)
+
def remove_tag(self, key, value=None, dry_run=False):
"""
Remove a tag from this object. Removing a tag involves a round-trip
@@ -102,7 +123,7 @@ class TaggedEC2Object(EC2Object):
NOTE: There is an important distinction between
a value of '' and a value of None.
"""
- if value:
+ if value is not None:
tags = {key : value}
else:
tags = [key]
diff --git a/boto/ec2/networkinterface.py b/boto/ec2/networkinterface.py
index b786edb6..6596439e 100644
--- a/boto/ec2/networkinterface.py
+++ b/boto/ec2/networkinterface.py
@@ -167,6 +167,70 @@ class NetworkInterface(TaggedEC2Object):
else:
setattr(self, name, value)
+ def _update(self, updated):
+ self.__dict__.update(updated.__dict__)
+
+ def update(self, validate=False, dry_run=False):
+ """
+ Update the data associated with this ENI by querying EC2.
+
+ :type validate: bool
+ :param validate: By default, if EC2 returns no data about the
+ ENI the update method returns quietly. If
+ the validate param is True, however, it will
+ raise a ValueError exception if no data is
+ returned from EC2.
+ """
+ rs = self.connection.get_all_network_interfaces(
+ [self.id],
+ dry_run=dry_run
+ )
+ if len(rs) > 0:
+ self._update(rs[0])
+ elif validate:
+ raise ValueError('%s is not a valid ENI ID' % self.id)
+ return self.status
+
+ def attach(self, instance_id, device_index, dry_run=False):
+ """
+ Attach this ENI to an EC2 instance.
+
+ :type instance_id: str
+ :param instance_id: The ID of the EC2 instance to which it will
+ be attached.
+
+ :type device_index: int
+ :param device_index: The interface nunber, N, on the instance (eg. ethN)
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.attach_network_interface(
+ self.id,
+ instance_id,
+ device_index,
+ dry_run=dry_run
+ )
+
+ def detach(self, force=False, dry_run=False):
+ """
+ Detach this ENI from an EC2 instance.
+
+ :type force: bool
+ :param force: Forces detachment if the previous detachment
+ attempt did not occur cleanly.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ attachment_id = getattr(self.attachment, 'id', None)
+
+ return self.connection.detach_network_interface(
+ attachment_id,
+ force,
+ dry_run=dry_run
+ )
+
def delete(self, dry_run=False):
return self.connection.delete_network_interface(
self.id,
diff --git a/boto/endpoints.json b/boto/endpoints.json
index a4681e08..27b50762 100644
--- a/boto/endpoints.json
+++ b/boto/endpoints.json
@@ -41,8 +41,11 @@
"us-west-2": "cloudsearch.us-west-2.amazonaws.com"
},
"cloudtrail": {
+ "ap-northeast-1": "cloudtrail.ap-northeast-1.amazonaws.com",
+ "ap-southeast-1": "cloudtrail.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "cloudtrail.ap-southeast-2.amazonaws.com",
"eu-west-1": "cloudtrail.eu-west-1.amazonaws.com",
+ "sa-east-1": "cloudtrail.sa-east-1.amazonaws.com",
"us-east-1": "cloudtrail.us-east-1.amazonaws.com",
"us-west-1": "cloudtrail.us-west-1.amazonaws.com",
"us-west-2": "cloudtrail.us-west-2.amazonaws.com"
@@ -185,7 +188,9 @@
"us-west-2": "importexport.amazonaws.com"
},
"kinesis": {
- "us-east-1": "kinesis.us-east-1.amazonaws.com"
+ "us-east-1": "kinesis.us-east-1.amazonaws.com",
+ "us-west-2": "kinesis.us-west-2.amazonaws.com",
+ "eu-west-1": "kinesis.eu-west-1.amazonaws.com"
},
"opsworks": {
"us-east-1": "opsworks.us-east-1.amazonaws.com"
diff --git a/boto/iam/connection.py b/boto/iam/connection.py
index c6ee3007..bbb0afd9 100644
--- a/boto/iam/connection.py
+++ b/boto/iam/connection.py
@@ -1142,7 +1142,7 @@ class IAMConnection(AWSQueryConnection):
permission to assume the role.
:type path: string
- :param path: The path to the instance profile.
+ :param path: The path to the role.
"""
params = {
'RoleName': role_name,
diff --git a/boto/provider.py b/boto/provider.py
index 148d9404..70693965 100644
--- a/boto/provider.py
+++ b/boto/provider.py
@@ -191,7 +191,7 @@ class Provider(object):
# Load shared credentials file if it exists
shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials')
self.shared_credentials = Config(do_load=False)
- if os.path.exists(shared_path):
+ if os.path.isfile(shared_path):
self.shared_credentials.load_from_path(shared_path)
self.get_credentials(access_key, secret_key, security_token, profile_name)
diff --git a/boto/roboto/awsqueryservice.py b/boto/roboto/awsqueryservice.py
index cb3a21d0..ab9492f4 100644
--- a/boto/roboto/awsqueryservice.py
+++ b/boto/roboto/awsqueryservice.py
@@ -48,7 +48,7 @@ class AWSQueryService(boto.connection.AWSQueryConnection):
def check_for_credential_file(self):
"""
- Checks for the existance of an AWS credential file.
+ Checks for the existence of an AWS credential file.
If the environment variable AWS_CREDENTIAL_FILE is
set and points to a file, that file will be read and
will be searched credentials.
diff --git a/boto/route53/connection.py b/boto/route53/connection.py
index 7f45c778..522dd09c 100644
--- a/boto/route53/connection.py
+++ b/boto/route53/connection.py
@@ -213,6 +213,13 @@ class Route53Connection(AWSAuthConnection):
body)
def delete_hosted_zone(self, hosted_zone_id):
+ """
+ Delete the hosted zone specified by the given id.
+
+ :type hosted_zone_id: str
+ :param hosted_zone_id: The hosted zone's id
+
+ """
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
@@ -480,6 +487,10 @@ class Route53Connection(AWSAuthConnection):
"""
Returns a list of Zone objects, one for each of the Hosted
Zones defined for the AWS account.
+
+ :rtype: list
+ :returns: A list of Zone objects.
+
"""
zones = self.get_all_hosted_zones()
return [Zone(self, zone) for zone in
@@ -519,7 +530,8 @@ class Route53Connection(AWSAuthConnection):
'PriorRequestNotComplete',
i
)
- next_sleep = random.random() * (2 ** i)
+ next_sleep = min(random.random() * (2 ** i),
+ boto.config.get('Boto', 'max_retry_delay', 60))
i += 1
status = (msg, i, next_sleep)
diff --git a/boto/route53/healthcheck.py b/boto/route53/healthcheck.py
index 9f112b7d..85d6919b 100644
--- a/boto/route53/healthcheck.py
+++ b/boto/route53/healthcheck.py
@@ -53,7 +53,7 @@ class HealthCheck(object):
POSTXMLBody = """
<HealthCheckConfig>
- <IPAddress>%(ip_addr)s</IPAddress>
+ %(ip_addr_part)s
<Port>%(port)s</Port>
<Type>%(type)s</Type>
<ResourcePath>%(resource_path)s</ResourcePath>
@@ -64,6 +64,8 @@ class HealthCheck(object):
</HealthCheckConfig>
"""
+ XMLIpAddrPart = """<IPAddress>%(ip_addr)s</IPAddress>"""
+
XMLFQDNPart = """<FullyQualifiedDomainName>%(fqdn)s</FullyQualifiedDomainName>"""
XMLStringMatchPart = """<SearchString>%(string_match)s</SearchString>"""
@@ -77,13 +79,13 @@ class HealthCheck(object):
HealthCheck object
:type ip_addr: str
- :param ip_addr: IP Address
+ :param ip_addr: Optional IP Address
:type port: int
:param port: Port to check
:type hc_type: str
- :param ip_addr: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
+ :param hc_type: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
:type resource_path: str
:param resource_path: Path to check
@@ -122,7 +124,7 @@ class HealthCheck(object):
def to_xml(self):
params = {
- 'ip_addr': self.ip_addr,
+ 'ip_addr_part': '',
'port': self.port,
'type': self.hc_type,
'resource_path': self.resource_path,
@@ -135,6 +137,9 @@ class HealthCheck(object):
if self.fqdn is not None:
params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn}
+ if self.ip_addr:
+ params['ip_addr_part'] = self.XMLIpAddrPart % {'ip_addr': self.ip_addr}
+
if self.string_match is not None:
params['string_match_part'] = self.XMLStringMatchPart % {'string_match' : self.string_match}
diff --git a/boto/route53/record.py b/boto/route53/record.py
index a2dbe089..664739b8 100644
--- a/boto/route53/record.py
+++ b/boto/route53/record.py
@@ -122,16 +122,16 @@ class ResourceRecordSets(ResultSet):
:type alias_evaluate_target_health: Boolean
:param alias_evaluate_target_health: *Required for alias resource record sets* Indicates
- whether this Resource Record Set should respect the health status of
- any health checks associated with the ALIAS target record which it is
- linked to.
+ whether this Resource Record Set should respect the health status of
+ any health checks associated with the ALIAS target record which it is
+ linked to.
:type health_check: str
:param health_check: Health check to associate with this record
-
+
:type failover: str
:param failover: *Failover resource record sets only* Whether this is the
- primary or secondary resource record set.
+ primary or secondary resource record set.
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
@@ -213,7 +213,7 @@ class Record(object):
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
-
+
FailoverBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Failover>%(failover)s</Failover>
@@ -363,6 +363,8 @@ class Record(object):
self.region = value
elif name == 'Failover':
self.failover = value
+ elif name == 'HealthCheckId':
+ self.health_check = value
def startElement(self, name, attrs, connection):
return None
diff --git a/boto/s3/acl.py b/boto/s3/acl.py
index c54ddc62..51613883 100644
--- a/boto/s3/acl.py
+++ b/boto/s3/acl.py
@@ -32,6 +32,7 @@ class Policy(object):
def __init__(self, parent=None):
self.parent = parent
+ self.namespace = None
self.acl = None
def __repr__(self):
@@ -50,6 +51,9 @@ class Policy(object):
return "<Policy: %s>" % ", ".join(grants)
def startElement(self, name, attrs, connection):
+ if name == 'AccessControlPolicy':
+ self.namespace = attrs.get('xmlns', None)
+ return None
if name == 'Owner':
self.owner = User(self)
return self.owner
@@ -68,7 +72,10 @@ class Policy(object):
setattr(self, name, value)
def to_xml(self):
- s = '<AccessControlPolicy>'
+ if self.namespace is not None:
+ s = '<AccessControlPolicy xmlns="{0}">'.format(self.namespace)
+ else:
+ s = '<AccessControlPolicy>'
s += self.owner.to_xml()
s += self.acl.to_xml()
s += '</AccessControlPolicy>'
diff --git a/boto/s3/bucket.py b/boto/s3/bucket.py
index ed409703..45c82da7 100644
--- a/boto/s3/bucket.py
+++ b/boto/s3/bucket.py
@@ -146,7 +146,7 @@ class Bucket(object):
response_headers=None, validate=True):
"""
Check to see if a particular key exists within the bucket. This
- method uses a HEAD request to check for the existance of the key.
+ method uses a HEAD request to check for the existence of the key.
Returns: An instance of a Key object or None
:param key_name: The name of the key to retrieve
diff --git a/boto/s3/connection.py b/boto/s3/connection.py
index d6b3b52f..e3ada795 100644
--- a/boto/s3/connection.py
+++ b/boto/s3/connection.py
@@ -350,9 +350,34 @@ class S3Connection(AWSAuthConnection):
return {"action": url, "fields": fields}
+ def generate_url_sigv4(self, expires_in, method, bucket='', key='',
+ headers=None, force_http=False,
+ response_headers=None, version_id=None,
+ iso_date=None):
+ path = self.calling_format.build_path_base(bucket, key)
+ auth_path = self.calling_format.build_auth_path(bucket, key)
+ host = self.calling_format.build_host(self.server_name(), bucket)
+
+ params = {}
+ if version_id is not None:
+ params['VersionId'] = version_id
+
+ http_request = self.build_base_http_request(method, path, auth_path,
+ headers=headers, host=host,
+ params=params)
+
+ return self._auth_handler.presign(http_request, expires_in,
+ iso_date=iso_date)
+
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None):
+ if self._auth_handler.capability[0] == 'hmac-v4-s3':
+ # Handle the special sigv4 case
+ return self.generate_url_sigv4(expires_in, method, bucket=bucket,
+ key=key, headers=headers, force_http=force_http,
+ response_headers=response_headers, version_id=version_id)
+
headers = headers or {}
if expires_in_absolute:
expires = int(expires_in)
diff --git a/boto/utils.py b/boto/utils.py
index 9f071744..ae900da6 100644
--- a/boto/utils.py
+++ b/boto/utils.py
@@ -61,6 +61,11 @@ import email.utils
import email.encoders
import gzip
import base64
+import threading
+import locale
+
+from contextlib import contextmanager
+
try:
from hashlib import md5
except ImportError:
@@ -232,7 +237,8 @@ def retry_url(url, retry_on_404=True, num_retries=10):
boto.log.exception('Caught exception reading instance data')
# If not on the last iteration of the loop then sleep.
if i + 1 != num_retries:
- time.sleep(2 ** i)
+ time.sleep(min(2 ** i,
+ boto.config.get('Boto', 'max_retry_delay', 60)))
boto.log.error('Unable to read instance data, giving up')
return ''
@@ -313,7 +319,9 @@ class LazyLoadMetadata(dict):
" for the '%s' try" % (i + 1))
if i + 1 != self._num_retries:
- next_sleep = random.random() * (2 ** i)
+ next_sleep = min(
+ random.random() * 2 ** i,
+ boto.config.get('Boto', 'max_retry_delay', 60))
time.sleep(next_sleep)
else:
boto.log.error('Unable to read meta data, giving up')
@@ -446,7 +454,20 @@ def get_instance_userdata(version='latest', sep=None,
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
RFC1123 = '%a, %d %b %Y %H:%M:%S %Z'
+LOCALE_LOCK = threading.Lock()
+
+@contextmanager
+def setlocale(name):
+ """
+ A context manager to set the locale in a threadsafe manner.
+ """
+ with LOCALE_LOCK:
+ saved = locale.setlocale(locale.LC_ALL)
+ try:
+ yield locale.setlocale(locale.LC_ALL, name)
+ finally:
+ locale.setlocale(locale.LC_ALL, saved)
def get_ts(ts=None):
if not ts:
@@ -455,17 +476,18 @@ def get_ts(ts=None):
def parse_ts(ts):
- ts = ts.strip()
- try:
- dt = datetime.datetime.strptime(ts, ISO8601)
- return dt
- except ValueError:
+ with setlocale('C'):
+ ts = ts.strip()
try:
- dt = datetime.datetime.strptime(ts, ISO8601_MS)
+ dt = datetime.datetime.strptime(ts, ISO8601)
return dt
except ValueError:
- dt = datetime.datetime.strptime(ts, RFC1123)
- return dt
+ try:
+ dt = datetime.datetime.strptime(ts, ISO8601_MS)
+ return dt
+ except ValueError:
+ dt = datetime.datetime.strptime(ts, RFC1123)
+ return dt
def find_class(module_name, class_name=None):
diff --git a/docs/source/cloudwatch_tut.rst b/docs/source/cloudwatch_tut.rst
index 37263a8d..027cd980 100644
--- a/docs/source/cloudwatch_tut.rst
+++ b/docs/source/cloudwatch_tut.rst
@@ -16,45 +16,39 @@ it does, you can do this::
>>> c = boto.ec2.cloudwatch.connect_to_region('us-west-2')
>>> metrics = c.list_metrics()
>>> metrics
- [Metric:NetworkIn,
- Metric:NetworkOut,
- Metric:NetworkOut(InstanceType,m1.small),
- Metric:NetworkIn(InstanceId,i-e573e68c),
- Metric:CPUUtilization(InstanceId,i-e573e68c),
- Metric:DiskWriteBytes(InstanceType,m1.small),
- Metric:DiskWriteBytes(ImageId,ami-a1ffb63),
- Metric:NetworkOut(ImageId,ami-a1ffb63),
- Metric:DiskWriteOps(InstanceType,m1.small),
- Metric:DiskReadBytes(InstanceType,m1.small),
- Metric:DiskReadOps(ImageId,ami-a1ffb63),
- Metric:CPUUtilization(InstanceType,m1.small),
- Metric:NetworkIn(ImageId,ami-a1ffb63),
- Metric:DiskReadOps(InstanceType,m1.small),
- Metric:DiskReadBytes,
+ [Metric:DiskReadBytes,
Metric:CPUUtilization,
- Metric:DiskWriteBytes(InstanceId,i-e573e68c),
- Metric:DiskWriteOps(InstanceId,i-e573e68c),
+ Metric:DiskWriteOps,
Metric:DiskWriteOps,
Metric:DiskReadOps,
- Metric:CPUUtilization(ImageId,ami-a1ffb63),
- Metric:DiskReadOps(InstanceId,i-e573e68c),
- Metric:NetworkOut(InstanceId,i-e573e68c),
- Metric:DiskReadBytes(ImageId,ami-a1ffb63),
- Metric:DiskReadBytes(InstanceId,i-e573e68c),
- Metric:DiskWriteBytes,
- Metric:NetworkIn(InstanceType,m1.small),
- Metric:DiskWriteOps(ImageId,ami-a1ffb63)]
+ Metric:DiskReadBytes,
+ Metric:DiskReadOps,
+ Metric:CPUUtilization,
+ Metric:DiskWriteOps,
+ Metric:NetworkIn,
+ Metric:NetworkOut,
+ Metric:NetworkIn,
+ Metric:DiskReadBytes,
+ Metric:DiskWriteBytes,
+ Metric:DiskWriteBytes,
+ Metric:NetworkIn,
+ Metric:NetworkIn,
+ Metric:NetworkOut,
+ Metric:NetworkOut,
+ Metric:DiskReadOps,
+ Metric:CPUUtilization,
+ Metric:DiskReadOps,
+ Metric:CPUUtilization,
+ Metric:DiskWriteBytes,
+ Metric:DiskWriteBytes,
+ Metric:DiskReadBytes,
+ Metric:NetworkOut,
+ Metric:DiskWriteOps]
+
The list_metrics call will return a list of all of the available metrics
that you can query against. Each entry in the list is a Metric object.
-As you can see from the list above, some of the metrics are generic metrics
-and some have Dimensions associated with them (e.g. InstanceType=m1.small).
-The Dimension can be used to refine your query. So, for example, I could
-query the metric Metric:CPUUtilization which would create the desired statistic
-by aggregating cpu utilization data across all sources of information available
-or I could refine that by querying the metric
-Metric:CPUUtilization(InstanceId,i-e573e68c) which would use only the data
-associated with the instance identified by the instance ID i-e573e68c.
+As you can see from the list above, some of the metrics are repeated. The repeated metrics are across different dimensions (per-instance, per-image type, per instance type) which can identified by looking at the dimensions property.
Because for this example, I'm only monitoring a single instance, the set
of metrics available to me are fairly limited. If I was monitoring many
@@ -62,12 +56,21 @@ instances, using many different instance types and AMI's and also several
load balancers, the list of available metrics would grow considerably.
Once you have the list of available metrics, you can actually
-query the CloudWatch system for that metric. Let's choose the CPU utilization
-metric for our instance.::
+query the CloudWatch system for that metric.
+Let's choose the CPU utilization metric for one of the ImageID.::
+ >>> m_image = metrics[7]
+ >>> m_image
+ Metric:CPUUtilization
+ >>> m_image.dimensions
+ {u'ImageId': [u'ami-6ac2a85a']}
+
+Let's choose another CPU utilization metric for our instance.::
- >>> m = metrics[5]
+ >>> m = metrics[20]
>>> m
- Metric:CPUUtilization(InstanceId,i-e573e68c)
+ Metric:CPUUtilization
+ >>> m.dimensions
+ {u'InstanceId': [u'i-4ca81747']}
The Metric object has a query method that lets us actually perform
the query against the collected data in CloudWatch. To call that,
@@ -87,8 +90,7 @@ values::
And Units must be one of the following::
- ['Seconds', 'Percent', 'Bytes', 'Bits', 'Count',
- 'Bytes/Second', 'Bits/Second', 'Count/Second']
+ ['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', None]
The query method also takes an optional parameter, period. This
parameter controls the granularity (in seconds) of the data returned.
@@ -108,9 +110,8 @@ about that particular data point.::
>>> d = datapoints[0]
>>> d
- {u'Average': 0.0,
- u'SampleCount': 1.0,
- u'Timestamp': u'2009-05-21T19:55:00Z',
+ {u'Timestamp': datetime.datetime(2014, 6, 23, 22, 25),
+ u'Average': 20.0,
u'Unit': u'Percent'}
My server obviously isn't very busy right now!
diff --git a/docs/source/index.rst b/docs/source/index.rst
index c97d3919..c260822a 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -119,6 +119,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.30.0
releasenotes/v2.29.1
releasenotes/v2.29.0
releasenotes/v2.28.0
diff --git a/docs/source/ref/route53.rst b/docs/source/ref/route53.rst
index f3a03bab..1d4af2c6 100644
--- a/docs/source/ref/route53.rst
+++ b/docs/source/ref/route53.rst
@@ -9,26 +9,47 @@ boto.route53.connection
-----------------------
.. automodule:: boto.route53.connection
- :members:
+ :members:
:undoc-members:
boto.route53.exception
----------------------
.. automodule:: boto.route53.exception
- :members:
+ :members:
+ :undoc-members:
+
+boto.route53.healthcheck
+------------------------
+
+.. automodule:: boto.route53.healthcheck
+ :members:
+ :undoc-members:
+
+boto.route53.hostedzone
+-----------------------
+
+.. automodule:: boto.route53.hostedzone
+ :members:
:undoc-members:
boto.route53.record
-------------------
.. automodule:: boto.route53.record
- :members:
+ :members:
+ :undoc-members:
+
+boto.route53.status
+-------------------
+
+.. automodule:: boto.route53.status
+ :members:
:undoc-members:
boto.route53.zone
-----------------
.. automodule:: boto.route53.zone
- :members:
+ :members:
:undoc-members:
diff --git a/docs/source/releasenotes/v2.30.0.rst b/docs/source/releasenotes/v2.30.0.rst
new file mode 100644
index 00000000..bf5ad6d0
--- /dev/null
+++ b/docs/source/releasenotes/v2.30.0.rst
@@ -0,0 +1,28 @@
+boto v2.30.0
+============
+
+:date: 2014/07/01
+
+This release adds new Amazon EC2 instance types, new regions for AWS CloudTrail and Amazon Kinesis, Amazon S3 presigning using signature version 4, and several documentation and bugfixes.
+
+
+Changes
+-------
+* Add EC2 T2 instance types (:sha:`544f8925cb`)
+* Add new regions for CloudTrail and Kinesis (:sha:`4d67e19914`)
+* Fixed some code formatting and typo in SQS tutorial docs. (:issue:`2332`, :sha:`08c8fed`)
+* Documentation update -- Child workflows and poll API. (:issue:`2333`, :issue:`2063`, :issue:`2064`, :sha:`4835676`)
+* DOC Tutorial update for metrics and use of dimensions property. (:issue:`2340`, :issue:`2336`, :sha:`45fda90`)
+* Let people know only EC2 supported for cloudwatch. (:issue:`2341`, :sha:`98f03e2`)
+* Add namespace to AccessControlPolicy xml representation. (:issue:`2342`, :sha:`ce07446`)
+* Make ip_addr optional in Route53 HealthCheck. (:issue:`2345`, :sha:`79c35ca`)
+* Add S3 SigV4 Presigning. (:issue:`2349`, :sha:`125c4ce`)
+* Add missing route53 autodoc. (:issue:`2343`, :sha:`6472811`)
+* Adds scan_index_forward and limit to DynamoDB table query count. (:issue:`2184`, :sha:`4b6d222`)
+* Add method TaggedEC2Object.add_tags(). (:issue:`2259`, :sha:`eea5467`)
+* Add network interface lookup to EC2. Add update/attach/detach methods to NetworkInterface object. (:issue:`2311`, :sha:`4d44530`)
+* Parse date/time in a locale independent manner. (:issue:`2317`, :issue:`2271`, :sha:`3b715e5`)
+* Add documentation for delete_hosted_zone. (:issue:`2316`, :sha:`a0fdd39`)
+* s/existance/existence/ (:issue:`2315`, :sha:`b8dfa1c`)
+* Add multipart upload section to the S3 tutorial. (:issue:`2308`, :sha:`99953d4`)
+* Only attempt shared creds load if path is a file. (:issue:`2305`, :sha:`0bffa3b`)
diff --git a/docs/source/s3_tut.rst b/docs/source/s3_tut.rst
index 9db92211..e5de8af9 100644
--- a/docs/source/s3_tut.rst
+++ b/docs/source/s3_tut.rst
@@ -161,6 +161,61 @@ exists within a bucket, you can skip the check for a key on the server.
>>> key_we_know_is_there = b.get_key('mykey', validate=False)
+Storing Large Data
+------------------
+
+At times the data you may want to store will be hundreds of megabytes or
+more in size. S3 allows you to split such files into smaller components.
+You upload each component in turn and then S3 combines them into the final
+object. While this is fairly straightforward, it requires a few extra steps
+to be taken. The example below makes use of the FileChunkIO module, so
+``pip install FileChunkIO`` if it isn't already installed.
+
+::
+
+ >>> import math, os
+ >>> import boto
+ >>> from filechunkio import FileChunkIO
+
+ # Connect to S3
+ >>> c = boto.connect_s3()
+ >>> b = c.get_bucket('mybucket')
+
+ # Get file info
+ >>> source_path = 'path/to/your/file.ext'
+ >>> source_size = os.stat(source_path).st_size
+
+ # Create a multipart upload request
+ >>> mp = b.initiate_multipart_upload(os.path.basename(source_path))
+
+ # Use a chunk size of 50 MiB (feel free to change this)
+ >>> chunk_size = 52428800
+ >>> chunk_count = int(math.ceil(source_size / chunk_size))
+
+ # Send the file parts, using FileChunkIO to create a file-like object
+ # that points to a certain byte range within the original file. We
+ # set bytes to never exceed the original file size.
+ >>> for i in range(chunk_count + 1):
+ >>> offset = chunk_size * i
+ >>> bytes = min(chunk_size, source_size - offset)
+ >>> with FileChunkIO(source_path, 'r', offset=offset,
+ bytes=bytes) as fp:
+ >>> mp.upload_part_from_file(fp, part_num=i + 1)
+
+ # Finish the upload
+ >>> mp.complete_upload()
+
+It is also possible to upload the parts in parallel using threads. The
+``s3put`` script that ships with Boto provides an example of doing so
+using a thread pool.
+
+Note that if you forget to call either ``mp.complete_upload()`` or
+``mp.cancel_upload()`` you will be left with an incomplete upload and
+charged for the storage consumed by the uploaded parts. A call to
+``bucket.get_all_multipart_uploads()`` can help to show lost multipart
+upload parts.
+
+
Accessing A Bucket
------------------
diff --git a/docs/source/sqs_tut.rst b/docs/source/sqs_tut.rst
index f86aa3e8..9b8e508c 100644
--- a/docs/source/sqs_tut.rst
+++ b/docs/source/sqs_tut.rst
@@ -22,7 +22,7 @@ The recommended method of doing this is as follows::
At this point the variable conn will point to an SQSConnection object in the
US-WEST-2 region. Bear in mind that just as any other AWS service, SQS is
region-specific. In this example, the AWS access key and AWS secret key are
-passed in to the method explicitely. Alternatively, you can set the environment
+passed in to the method explicitly. Alternatively, you can set the environment
variables:
* ``AWS_ACCESS_KEY_ID`` - Your AWS Access Key ID
@@ -116,17 +116,17 @@ values of the message that was written to the queue.
Arbitrary message attributes can be defined by setting a simple dictionary
of values on the message object::
->>> m = Message()
->>> m.message_attributes = {
- "name1": {
- "data_type": "String",
- "string_value": "I am a string"
- },
- "name2": {
- "data_type": "Number",
- "string_value": "12"
- }
-}
+ >>> m = Message()
+ >>> m.message_attributes = {
+ ... "name1": {
+ ... "data_type": "String",
+ ... "string_value": "I am a string"
+ ... },
+ ... "name2": {
+ ... "data_type": "Number",
+ ... "string_value": "12"
+ ... }
+ ... }
Note that by default, these arbitrary attributes are not returned when
you request messages from a queue. Instead, you must request them via
@@ -159,7 +159,7 @@ default boto Message object. To register your message class, you would::
where MyMessage is the class definition for your message class. Your
message class should subclass the boto Message because there is a small
-bit of Python magic happening in the __setattr__ method of the boto Message
+bit of Python magic happening in the ``__setattr__`` method of the boto Message
class.
Reading Messages
@@ -203,14 +203,14 @@ passing a num_messages parameter (defaults to 1) you can control the maximum
number of messages that will be returned by the method. To show this
feature off, first let's load up a few more messages.
->>> for i in range(1, 11):
-... m = Message()
-... m.set_body('This is message %d' % i)
-... q.write(m)
-...
->>> rs = q.get_messages(10)
->>> len(rs)
-10
+ >>> for i in range(1, 11):
+ ... m = Message()
+ ... m.set_body('This is message %d' % i)
+ ... q.write(m)
+ ...
+ >>> rs = q.get_messages(10)
+ >>> len(rs)
+ 10
Don't be alarmed if the length of the result set returned by the get_messages
call is less than 10. Sometimes it takes some time for new messages to become
@@ -275,5 +275,5 @@ messages in a queue to a local file:
>>> q.dump('messages.txt', sep='\n------------------\n')
This will read all of the messages in the queue and write the bodies of
-each of the messages to the file messages.txt. The option sep argument
+each of the messages to the file messages.txt. The optional ``sep`` argument
is a separator that will be printed between each message body in the file.
diff --git a/docs/source/swf_tut.rst b/docs/source/swf_tut.rst
index 68588265..ffbacfd2 100644
--- a/docs/source/swf_tut.rst
+++ b/docs/source/swf_tut.rst
@@ -1,5 +1,5 @@
.. swf_tut:
- :Authors: Slawek "oozie" Ligus <root@ooz.ie>
+ :Authors: Slawek "oozie" Ligus <root@ooz.ie>, Brad Morris <bradley.s.morris@gmail.com>
===============================
Amazon Simple Workflow Tutorial
@@ -60,7 +60,7 @@ Before workflows and activities can be used, they have to be registered with SWF
registerables = []
registerables.append(swf.Domain(name=DOMAIN))
- for workflow_type in ('HelloWorkflow', 'SerialWorkflow', 'ParallelWorkflow'):
+ for workflow_type in ('HelloWorkflow', 'SerialWorkflow', 'ParallelWorkflow', 'SubWorkflow'):
registerables.append(swf.WorkflowType(domain=DOMAIN, name=workflow_type, version=VERSION, task_list='default'))
for activity_type in ('HelloWorld', 'ActivityA', 'ActivityB', 'ActivityC'):
@@ -441,11 +441,11 @@ The decider schedules all activities at once and marks progress until all activi
import boto.swf.layer2 as swf
import time
-
+
SCHED_COUNT = 5
-
+
class ParallelDecider(swf.Decider):
-
+
domain = 'boto_tutorial'
task_list = 'default'
def run(self):
@@ -480,12 +480,12 @@ Again, the only bit of information a worker needs is which task list to poll.
# parallel_worker.py
import time
import boto.swf.layer2 as swf
-
+
class ParallelWorker(swf.ActivityWorker):
-
+
domain = 'boto_tutorial'
task_list = 'default'
-
+
def run(self):
"""Report current time."""
activity_task = self.poll()
@@ -517,7 +517,7 @@ Run two or more workers to see how the service partitions work execution in para
working on activity1
working on activity3
working on activity4
-
+
.. code-block:: bash
$ python -i parallel_worker.py
@@ -528,6 +528,136 @@ Run two or more workers to see how the service partitions work execution in para
As seen above, the work was partitioned between the two running workers.
+Sub-Workflows
+-------------
+
+Sometimes it's desired or necessary to break the process up into multiple workflows.
+
+Since the decider is stateless, it's up to you to determine which workflow is being used and which action
+you would like to take.
+
+.. code-block:: python
+
+ import boto.swf.layer2 as swf
+
+ class SubWorkflowDecider(swf.Decider):
+
+ domain = 'boto_tutorial'
+ task_list = 'default'
+ version = '1.0'
+
+ def run(self):
+ history = self.poll()
+ events = []
+ if 'events' in history:
+ events = history['events']
+ # Collect the entire history if there are enough events to become paginated
+ while 'nextPageToken' in history:
+ history = self.poll(next_page_token=history['nextPageToken'])
+ if 'events' in history:
+ events = events + history['events']
+
+ workflow_type = history['workflowType']['name']
+
+ # Get all of the relevent events that have happened since the last decision task was started
+ workflow_events = [e for e in events
+ if e['eventId'] > history['previousStartedEventId'] and
+ not e['eventType'].startswith('Decision')]
+
+ decisions = swf.Layer1Decisions()
+
+ for event in workflow_events:
+ last_event_type = event['eventType']
+ if last_event_type == 'WorkflowExecutionStarted':
+ if workflow_type == 'SerialWorkflow':
+ decisions.start_child_workflow_execution('SubWorkflow', self.version,
+ "subworkflow_1", task_list=self.task_list, input="sub_1")
+ elif workflow_type == 'SubWorkflow':
+ for i in range(2):
+ decisions.schedule_activity_task("activity_%d" % i, 'ActivityA', self.version, task_list='a_tasks')
+ else:
+ decisions.fail_workflow_execution(reason="Unknown workflow %s" % workflow_type)
+ break
+
+ elif last_event_type == 'ChildWorkflowExecutionCompleted':
+ decisions.schedule_activity_task("activity_2", 'ActivityB', self.version, task_list='b_tasks')
+
+ elif last_event_type == 'ActivityTaskCompleted':
+ attrs = event['activityTaskCompletedEventAttributes']
+ activity = events[attrs['scheduledEventId'] - 1]
+ activity_name = activity['activityTaskScheduledEventAttributes']['activityType']['name']
+
+ if activity_name == 'ActivityA':
+ completed_count = sum([1 for a in events if a['eventType'] == 'ActivityTaskCompleted'])
+ if completed_count == 2:
+ # Complete the child workflow
+ decisions.complete_workflow_execution()
+ elif activity_name == 'ActivityB':
+ # Complete the parent workflow
+ decisions.complete_workflow_execution()
+
+ self.complete(decisions=decisions)
+ return True
+
+Misc
+----
+
+Some of these things are not obvious by reading the API documents, so hopefully they help you
+avoid some time-consuming pitfalls.
+
+Pagination
+==========
+
+When the decider polls for new tasks, the maximum number of events it will return at a time is 100
+(configurable to a smaller number, but not larger). When running a workflow, this number gets quickly
+exceeded. If it does, the decision task will contain a key ``nextPageToken`` which can be submit to the
+``poll()`` call to get the next page of events.
+
+.. code-block:: python
+
+ decision_task = self.poll()
+
+ events = []
+ if 'events' in decision_task:
+ events = decision_task['events']
+ while 'nextPageToken' in decision_task:
+ decision_task = self.poll(next_page_token=decision_task['nextPageToken'])
+ if 'events' in decision_task:
+ events += decision_task['events']
+
+Depending on your workflow logic, you might not need to aggregate all of the events.
+
+Decision Tasks
+==============
+
+When first running deciders and activities, it may seem that the decider gets called for every event that
+an activity triggers; however, this is not the case. More than one event can happen between decision tasks.
+The decision task will contain a key ``previousStartedEventId`` that lets you know the ``eventId`` of the
+last DecisionTaskStarted event that was processed. Your script will need to handle all of the events
+that have happened since then, not just the last activity.
+
+.. code-block:: python
+
+ workflow_events = [e for e in events if e['eventId'] > decision_task['previousStartedEventId']]
+
+You may also wish to still filter out tasks that start with 'Decision' or filter it in some other way
+that fulfills your needs. You will now have to iterate over the workflow_events list and respond to
+each event, as it may contain multiple events.
+
+Filtering Events
+================
+
+When running many tasks in parallel, a common task is searching through the history to see how many events
+of a particular activity type started, completed, and/or failed. Some basic list comprehension makes
+this trivial.
+
+.. code-block:: python
+
+ def filter_completed_events(self, events, type):
+ completed = [e for e in events if e['eventType'] == 'ActivityTaskCompleted']
+ orig = [events[e['activityTaskCompletedEventAttributes']['scheduledEventId']-1] for e in completed]
+ return [e for e in orig if e['activityTaskScheduledEventAttributes']['activityType']['name'] == type]
+
.. _Amazon SWF API Reference: http://docs.aws.amazon.com/amazonswf/latest/apireference/Welcome.html
.. _StackOverflow questions: http://stackoverflow.com/questions/tagged/amazon-swf
.. _Miscellaneous Blog Articles: http://log.ooz.ie/search/label/SimpleWorkflow
diff --git a/tests/integration/route53/test_health_check.py b/tests/integration/route53/test_health_check.py
index ed4db5ae..ad336802 100644
--- a/tests/integration/route53/test_health_check.py
+++ b/tests/integration/route53/test_health_check.py
@@ -38,16 +38,28 @@ class TestRoute53HealthCheck(Route53TestCase):
self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id'])
def test_create_https_health_check(self):
- hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTPS", resource_path="/testing")
+ hc = HealthCheck(ip_addr="54.217.7.118", port=443, hc_type="HTTPS", resource_path="/testing")
result = self.conn.create_health_check(hc)
self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS')
self.assertEquals(result[u'CreateHealthCheckResponse'][
u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118')
- self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '443')
self.assertEquals(result[u'CreateHealthCheckResponse'][
u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing')
+ self.assertFalse('FullyQualifiedDomainName' in result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'])
self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id'])
+ def test_create_https_health_check_fqdn(self):
+ hc = HealthCheck(ip_addr=None, port=443, hc_type="HTTPS", resource_path="/", fqdn="google.com")
+ result = self.conn.create_health_check(hc)
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'FullyQualifiedDomainName'], 'google.com')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '443')
+ self.assertEquals(result[u'CreateHealthCheckResponse'][
+ u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/')
+ self.assertFalse('IPAddress' in result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'])
+ self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id'])
def test_create_and_list_health_check(self):
hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing")
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index af674a3e..cc807f4b 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -1,5 +1,5 @@
import mock
-import unittest
+from tests.unit import unittest
from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import (HashKey, RangeKey,
AllIndex, KeysOnlyIndex, IncludeIndex,
@@ -1774,7 +1774,40 @@ class TableTestCase(unittest.TestCase):
'date_joined': {
'N': '23456'
}
- })
+ }, expected=None, conditional_operator=None)
+
+ def test_delete_item_conditionally(self):
+ with mock.patch.object(
+ self.users.connection,
+ 'delete_item',
+ return_value={}) as mock_delete_item:
+ self.assertTrue(self.users.delete_item(expected={'balance__eq': 0},
+ username='johndoe', date_joined=23456))
+
+ mock_delete_item.assert_called_once_with('users', {
+ 'username': {
+ 'S': 'johndoe'
+ },
+ 'date_joined': {
+ 'N': '23456'
+ }
+ },
+ expected={
+ 'balance': {
+ 'ComparisonOperator': 'EQ', 'AttributeValueList': [{'N': '0'}]
+ },
+ },
+ conditional_operator=None)
+
+ def side_effect(*args, **kwargs):
+ raise exceptions.ConditionalCheckFailedException(400, '', {})
+
+ with mock.patch.object(
+ self.users.connection,
+ 'delete_item',
+ side_effect=side_effect) as mock_delete_item:
+ self.assertFalse(self.users.delete_item(expected={'balance__eq': 0},
+ username='johndoe', date_joined=23456))
def test_get_key_fields_no_schema_populated(self):
expected = {
@@ -2593,6 +2626,41 @@ class TableTestCase(unittest.TestCase):
return_value=expected) as mock_count:
self.assertEqual(self.users.count(), 5)
+ def test_query_count_simple(self):
+ expected_0 = {
+ 'Count': 0.0,
+ }
+
+ expected_1 = {
+ 'Count': 10.0,
+ }
+
+ with mock.patch.object(
+ self.users.connection,
+ 'query',
+ return_value=expected_0) as mock_query:
+ results = self.users.query_count(username__eq='notmyname')
+ self.assertTrue(isinstance(results, int))
+ self.assertEqual(results, 0)
+ self.assertEqual(mock_query.call_count, 1)
+ self.assertIn('scan_index_forward', mock_query.call_args[1])
+ self.assertEqual(True, mock_query.call_args[1]['scan_index_forward'])
+ self.assertIn('limit', mock_query.call_args[1])
+ self.assertEqual(None, mock_query.call_args[1]['limit'])
+
+ with mock.patch.object(
+ self.users.connection,
+ 'query',
+ return_value=expected_1) as mock_query:
+ results = self.users.query_count(username__gt='somename', consistent=True, scan_index_forward=False, limit=10)
+ self.assertTrue(isinstance(results, int))
+ self.assertEqual(results, 10)
+ self.assertEqual(mock_query.call_count, 1)
+ self.assertIn('scan_index_forward', mock_query.call_args[1])
+ self.assertEqual(False, mock_query.call_args[1]['scan_index_forward'])
+ self.assertIn('limit', mock_query.call_args[1])
+ self.assertEqual(10, mock_query.call_args[1]['limit'])
+
def test_private_batch_get(self):
expected = {
"ConsumedCapacity": {
diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py
index deeb673d..d0d9734e 100755
--- a/tests/unit/ec2/test_connection.py
+++ b/tests/unit/ec2/test_connection.py
@@ -702,6 +702,18 @@ class TestGetAllNetworkInterfaces(TestEC2ConnectionBase):
</networkInterfaceSet>
</DescribeNetworkInterfacesResponse>"""
+ def test_get_all_network_interfaces(self):
+ self.set_http_response(status_code=200)
+ result = self.ec2.get_all_network_interfaces(network_interface_ids=['eni-0f62d866'])
+ self.assert_request_parameters({
+ 'Action': 'DescribeNetworkInterfaces',
+ 'NetworkInterfaceId.1': 'eni-0f62d866'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+ self.assertEqual(len(result), 1)
+ self.assertEqual(result[0].id, 'eni-0f62d866')
+
def test_attachment_has_device_index(self):
self.set_http_response(status_code=200)
parsed = self.ec2.get_all_network_interfaces()
diff --git a/tests/unit/ec2/test_ec2object.py b/tests/unit/ec2/test_ec2object.py
new file mode 100644
index 00000000..e4091bef
--- /dev/null
+++ b/tests/unit/ec2/test_ec2object.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+from boto.ec2.connection import EC2Connection
+from boto.ec2.ec2object import TaggedEC2Object
+
+
+CREATE_TAGS_RESPONSE = r"""<?xml version="1.0" encoding="UTF-8"?>
+<CreateTagsResponse xmlns="http://ec2.amazonaws.com/doc/2014-05-01/">
+ <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
+ <return>true</return>
+</CreateTagsResponse>
+"""
+
+
+DELETE_TAGS_RESPONSE = r"""<?xml version="1.0" encoding="UTF-8"?>
+<DeleteTagsResponse xmlns="http://ec2.amazonaws.com/doc/2014-05-01/">
+ <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
+ <return>true</return>
+</DeleteTagsResponse>
+"""
+
+
+class TestAddTags(AWSMockServiceTestCase):
+ connection_class = EC2Connection
+
+ def default_body(self):
+ return CREATE_TAGS_RESPONSE
+
+ def test_add_tag(self):
+ self.set_http_response(status_code=200)
+ taggedEC2Object = TaggedEC2Object(self.service_connection)
+ taggedEC2Object.id = "i-abcd1234"
+ taggedEC2Object.tags["already_present_key"] = "already_present_value"
+
+ taggedEC2Object.add_tag("new_key", "new_value")
+
+ self.assert_request_parameters({
+ 'ResourceId.1': 'i-abcd1234',
+ 'Action': 'CreateTags',
+ 'Tag.1.Key': 'new_key',
+ 'Tag.1.Value': 'new_value'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+ self.assertEqual(taggedEC2Object.tags, {
+ "already_present_key":"already_present_value",
+ "new_key":"new_value"})
+
+ def test_add_tags(self):
+ self.set_http_response(status_code=200)
+ taggedEC2Object = TaggedEC2Object(self.service_connection)
+ taggedEC2Object.id = "i-abcd1234"
+ taggedEC2Object.tags["already_present_key"] = "already_present_value"
+
+ taggedEC2Object.add_tags({"key1":"value1", "key2":"value2"})
+
+ self.assert_request_parameters({
+ 'ResourceId.1': 'i-abcd1234',
+ 'Action': 'CreateTags',
+ 'Tag.1.Key': 'key1',
+ 'Tag.1.Value': 'value1',
+ 'Tag.2.Key': 'key2',
+ 'Tag.2.Value': 'value2'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+ self.assertEqual(taggedEC2Object.tags, {
+ "already_present_key":"already_present_value",
+ "key1":"value1",
+ "key2": "value2"})
+
+
+class TestRemoveTags(AWSMockServiceTestCase):
+ connection_class = EC2Connection
+
+ def default_body(self):
+ return DELETE_TAGS_RESPONSE
+
+ def test_remove_tag(self):
+ self.set_http_response(status_code=200)
+ taggedEC2Object = TaggedEC2Object(self.service_connection)
+ taggedEC2Object.id = "i-abcd1234"
+ taggedEC2Object.tags["key1"] = "value1"
+ taggedEC2Object.tags["key2"] = "value2"
+
+ taggedEC2Object.remove_tag("key1", "value1")
+
+ self.assert_request_parameters({
+ 'ResourceId.1': 'i-abcd1234',
+ 'Action': 'DeleteTags',
+ 'Tag.1.Key': 'key1',
+ 'Tag.1.Value': 'value1'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+ self.assertEqual(taggedEC2Object.tags, {"key2":"value2"})
+
+ def test_remove_tag_no_value(self):
+ self.set_http_response(status_code=200)
+ taggedEC2Object = TaggedEC2Object(self.service_connection)
+ taggedEC2Object.id = "i-abcd1234"
+ taggedEC2Object.tags["key1"] = "value1"
+ taggedEC2Object.tags["key2"] = "value2"
+
+ taggedEC2Object.remove_tag("key1")
+
+ self.assert_request_parameters({
+ 'ResourceId.1': 'i-abcd1234',
+ 'Action': 'DeleteTags',
+ 'Tag.1.Key': 'key1'},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+ self.assertEqual(taggedEC2Object.tags, {"key2":"value2"})
+
+ def test_remove_tag_empty_value(self):
+ self.set_http_response(status_code=200)
+ taggedEC2Object = TaggedEC2Object(self.service_connection)
+ taggedEC2Object.id = "i-abcd1234"
+ taggedEC2Object.tags["key1"] = "value1"
+ taggedEC2Object.tags["key2"] = "value2"
+
+ taggedEC2Object.remove_tag("key1", "")
+
+ self.assert_request_parameters({
+ 'ResourceId.1': 'i-abcd1234',
+ 'Action': 'DeleteTags',
+ 'Tag.1.Key': 'key1',
+ 'Tag.1.Value': ''},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+ self.assertEqual(taggedEC2Object.tags, {"key2":"value2"})
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/ec2/test_networkinterface.py b/tests/unit/ec2/test_networkinterface.py
index 81fa4aef..651b6b83 100644
--- a/tests/unit/ec2/test_networkinterface.py
+++ b/tests/unit/ec2/test_networkinterface.py
@@ -21,12 +21,86 @@
# IN THE SOFTWARE.
#
+import mock
from tests.unit import unittest
from boto.exception import BotoClientError
from boto.ec2.networkinterface import NetworkInterfaceCollection
from boto.ec2.networkinterface import NetworkInterfaceSpecification
from boto.ec2.networkinterface import PrivateIPAddress
+from boto.ec2.networkinterface import Attachment, NetworkInterface
+
+
+class NetworkInterfaceTests(unittest.TestCase):
+ def setUp(self):
+
+ self.attachment = Attachment()
+ self.attachment.id = 'eni-attach-1'
+ self.attachment.instance_id = 10
+ self.attachment.status = "some status"
+ self.attachment.device_index = 100
+
+ self.eni_one = NetworkInterface()
+ self.eni_one.id = 'eni-1'
+ self.eni_one.status = "one_status"
+ self.eni_one.attachment = self.attachment
+
+ self.eni_two = NetworkInterface()
+ self.eni_two.connection = mock.Mock()
+ self.eni_two.id = 'eni-2'
+ self.eni_two.status = "two_status"
+ self.eni_two.attachment = None
+
+ def test_update_with_validate_true_raises_value_error(self):
+ self.eni_one.connection = mock.Mock()
+ self.eni_one.connection.get_all_network_interfaces.return_value = []
+ with self.assertRaisesRegexp(ValueError, "^eni-1 is not a valid ENI ID$"):
+ self.eni_one.update(True)
+
+ def test_update_with_result_set_greater_than_0_updates_dict(self):
+ self.eni_two.connection.get_all_network_interfaces.return_value = [self.eni_one]
+ self.eni_two.update()
+
+ assert all([self.eni_two.status == "one_status",
+ self.eni_two.id == 'eni-1',
+ self.eni_two.attachment == self.attachment])
+
+ def test_update_returns_status(self):
+ self.eni_one.connection = mock.Mock()
+ self.eni_one.connection.get_all_network_interfaces.return_value = [self.eni_two]
+ retval = self.eni_one.update()
+ self.assertEqual(retval, "two_status")
+
+ def test_attach_calls_attach_eni(self):
+ self.eni_one.connection = mock.Mock()
+ self.eni_one.attach("instance_id", 11)
+ self.eni_one.connection.attach_network_interface.assert_called_with(
+ 'eni-1',
+ "instance_id",
+ 11,
+ dry_run=False
+ )
+
+ def test_detach_calls_detach_network_interface(self):
+ self.eni_one.connection = mock.Mock()
+ self.eni_one.detach()
+ self.eni_one.connection.detach_network_interface.assert_called_with(
+ 'eni-attach-1',
+ False,
+ dry_run=False
+ )
+
+ def test_detach_with_no_attach_data(self):
+ self.eni_two.connection = mock.Mock()
+ self.eni_two.detach()
+ self.eni_two.connection.detach_network_interface.assert_called_with(
+ None, False, dry_run=False)
+
+ def test_detach_with_force_calls_detach_network_interface_with_force(self):
+ self.eni_one.connection = mock.Mock()
+ self.eni_one.detach(True)
+ self.eni_one.connection.detach_network_interface.assert_called_with(
+ 'eni-attach-1', True, dry_run=False)
class TestNetworkInterfaceCollection(unittest.TestCase):
diff --git a/tests/unit/provider/test_provider.py b/tests/unit/provider/test_provider.py
index b8141451..14b22ec3 100644
--- a/tests/unit/provider/test_provider.py
+++ b/tests/unit/provider/test_provider.py
@@ -402,7 +402,7 @@ class TestProvider(unittest.TestCase):
self.assertEqual(p.access_key, 'cfg_access_key')
self.assertEqual(p.secret_key, 'cfg_secret_key')
- @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('os.path.isfile', return_value=True)
@mock.patch.object(provider.Config, 'load_from_path')
def test_shared_config_loading(self, load_from_path, exists):
provider.Provider('aws')
diff --git a/tests/unit/route53/test_connection.py b/tests/unit/route53/test_connection.py
index 760026f2..7888a44e 100644
--- a/tests/unit/route53/test_connection.py
+++ b/tests/unit/route53/test_connection.py
@@ -27,6 +27,7 @@ import xml.dom.minidom
from boto.exception import BotoServerError
from boto.route53.connection import Route53Connection
from boto.route53.exception import DNSServerError
+from boto.route53.healthcheck import HealthCheck
from boto.route53.record import ResourceRecordSets, Record
from boto.route53.zone import Zone
@@ -297,6 +298,18 @@ class TestGetAllRRSetsRoute53(AWSMockServiceTestCase):
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
+ <ResourceRecordSet>
+ <Name>us-west-2-evaluate-health-healthcheck.example.com.</Name>
+ <Type>A</Type>
+ <SetIdentifier>latency-example-us-west-2-evaluate-health-healthcheck</SetIdentifier>
+ <Region>us-west-2</Region>
+ <AliasTarget>
+ <HostedZoneId>ABCDEFG123456</HostedZoneId>
+ <EvaluateTargetHealth>true</EvaluateTargetHealth>
+ <DNSName>example-123456-evaluate-health-healthcheck.us-west-2.elb.amazonaws.com.</DNSName>
+ </AliasTarget>
+ <HealthCheckId>076a32f8-86f7-4c9e-9fa2-c163d5be67d9</HealthCheckId>
+ </ResourceRecordSet>
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
<MaxItems>100</MaxItems>
@@ -339,7 +352,7 @@ class TestGetAllRRSetsRoute53(AWSMockServiceTestCase):
self.assertEqual(no_evaluate_record.alias_dns_name, 'example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.')
no_evaluate_xml = no_evaluate_record.to_xml()
self.assertTrue('<EvaluateTargetHealth>false</EvaluateTargetHealth>' in no_evaluate_xml)
-
+
failover_record = response[4]
self.assertEqual(failover_record.name, 'failover.example.com.')
self.assertEqual(failover_record.type, 'A')
@@ -347,6 +360,96 @@ class TestGetAllRRSetsRoute53(AWSMockServiceTestCase):
self.assertEqual(failover_record.failover, 'PRIMARY')
self.assertEqual(failover_record.ttl, '60')
+ healthcheck_record = response[5]
+ self.assertEqual(healthcheck_record.health_check, '076a32f8-86f7-4c9e-9fa2-c163d5be67d9')
+ self.assertEqual(healthcheck_record.name, 'us-west-2-evaluate-health-healthcheck.example.com.')
+ self.assertEqual(healthcheck_record.identifier, 'latency-example-us-west-2-evaluate-health-healthcheck')
+ self.assertEqual(healthcheck_record.alias_dns_name, 'example-123456-evaluate-health-healthcheck.us-west-2.elb.amazonaws.com.')
+
+@attr(route53=True)
+class TestCreateHealthCheckRoute53IpAddress(AWSMockServiceTestCase):
+ connection_class = Route53Connection
+
+ def setUp(self):
+ super(TestCreateHealthCheckRoute53IpAddress, self).setUp()
+
+ def default_body(self):
+ return """
+<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
+ <HealthCheck>
+ <Id>34778cf8-e31e-4974-bad0-b108bd1623d3</Id>
+ <CallerReference>2fa48c8f-76ef-4253-9874-8bcb2b0d7694</CallerReference>
+ <HealthCheckConfig>
+ <IPAddress>74.125.228.81</IPAddress>
+ <Port>443</Port>
+ <Type>HTTPS_STR_MATCH</Type>
+ <SearchString>OK</SearchString>
+ <ResourcePath>/health_check</ResourcePath>
+ <RequestInterval>30</RequestInterval>
+ <FailureThreshold>3</FailureThreshold>
+ </HealthCheckConfig>
+ </HealthCheck>
+</CreateHealthCheckResponse>
+ """
+
+ def test_create_health_check_ip_address(self):
+ self.set_http_response(status_code=201)
+ hc = HealthCheck(ip_addr='74.125.228.81', port=443, hc_type='HTTPS_STR_MATCH', resource_path='/health_check', string_match='OK')
+ hc_xml = hc.to_xml()
+ self.assertFalse('<FullyQualifiedDomainName>' in hc_xml)
+ self.assertTrue('<IPAddress>' in hc_xml)
+
+ response = self.service_connection.create_health_check(hc)
+ hc_resp = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig']
+ self.assertEqual(hc_resp['IPAddress'], '74.125.228.81')
+ self.assertEqual(hc_resp['ResourcePath'], '/health_check')
+ self.assertEqual(hc_resp['Type'], 'HTTPS_STR_MATCH')
+ self.assertEqual(hc_resp['Port'], '443')
+ self.assertEqual(hc_resp['ResourcePath'], '/health_check')
+ self.assertEqual(hc_resp['SearchString'], 'OK')
+ self.assertEqual(response['CreateHealthCheckResponse']['HealthCheck']['Id'], '34778cf8-e31e-4974-bad0-b108bd1623d3')
+
+@attr(route53=True)
+class TestCreateHealthCheckRoute53FQDN(AWSMockServiceTestCase):
+ connection_class = Route53Connection
+
+ def setUp(self):
+ super(TestCreateHealthCheckRoute53FQDN, self).setUp()
+
+ def default_body(self):
+ return """
+<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
+ <HealthCheck>
+ <Id>f9abfe10-8d2a-4bbd-8f35-796f0f8572f2</Id>
+ <CallerReference>3246ac17-b651-4295-a5c8-c132a59693d7</CallerReference>
+ <HealthCheckConfig>
+ <Port>443</Port>
+ <Type>HTTPS</Type>
+ <ResourcePath>/health_check</ResourcePath>
+ <FullyQualifiedDomainName>example.com</FullyQualifiedDomainName>
+ <RequestInterval>30</RequestInterval>
+ <FailureThreshold>3</FailureThreshold>
+ </HealthCheckConfig>
+ </HealthCheck>
+</CreateHealthCheckResponse>
+ """
+
+ def test_create_health_check_fqdn(self):
+ self.set_http_response(status_code=201)
+ hc = HealthCheck(ip_addr='', port=443, hc_type='HTTPS', resource_path='/health_check', fqdn='example.com')
+ hc_xml = hc.to_xml()
+ self.assertTrue('<FullyQualifiedDomainName>' in hc_xml)
+ self.assertFalse('<IPAddress>' in hc_xml)
+
+ response = self.service_connection.create_health_check(hc)
+ hc_resp = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig']
+ self.assertEqual(hc_resp['FullyQualifiedDomainName'], 'example.com')
+ self.assertEqual(hc_resp['ResourcePath'], '/health_check')
+ self.assertEqual(hc_resp['Type'], 'HTTPS')
+ self.assertEqual(hc_resp['Port'], '443')
+ self.assertEqual(hc_resp['ResourcePath'], '/health_check')
+ self.assertEqual(response['CreateHealthCheckResponse']['HealthCheck']['Id'], 'f9abfe10-8d2a-4bbd-8f35-796f0f8572f2')
+
@attr(route53=True)
class TestChangeResourceRecordSetsRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
@@ -372,11 +475,11 @@ class TestChangeResourceRecordSetsRoute53(AWSMockServiceTestCase):
rrsets.add_change_record('CREATE', Record('wrr.example.com', 'CNAME', 60, ['cname.target'], weight=10, identifier='weight-1'))
rrsets.add_change_record('CREATE', Record('lbr.example.com', 'TXT', 60, ['text record'], region='us-west-2', identifier='region-1'))
rrsets.add_change_record('CREATE', Record('failover.example.com', 'A', 60, ['2.2.2.2'], health_check='hc-1234', failover='PRIMARY', identifier='primary'))
-
+
changes_xml = rrsets.to_xml()
-
+
# the whitespacing doesn't match exactly, so we'll pretty print and drop all new lines
- # not the best, but
+ # not the best, but
actual_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(changes_xml).toprettyxml())
expected_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString("""
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
@@ -458,7 +561,7 @@ class TestChangeResourceRecordSetsRoute53(AWSMockServiceTestCase):
</ChangeBatch>
</ChangeResourceRecordSetsRequest>
""").toprettyxml())
-
+
# Note: the alias XML should not include the TTL, even if it's specified in the object model
self.assertEqual(actual_xml, expected_xml)
diff --git a/tests/unit/s3/test_bucket.py b/tests/unit/s3/test_bucket.py
index bf638511..ebb98c3f 100644
--- a/tests/unit/s3/test_bucket.py
+++ b/tests/unit/s3/test_bucket.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from mock import patch
+import xml.dom.minidom
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
@@ -195,3 +196,34 @@ class TestS3Bucket(AWSMockServiceTestCase):
version_id='something',
validate=False
)
+
+ def acl_policy(self):
+ return """<?xml version="1.0" encoding="UTF-8"?>
+ <AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Owner>
+ <ID>owner_id</ID>
+ <DisplayName>owner_display_name</DisplayName>
+ </Owner>
+ <AccessControlList>
+ <Grant>
+ <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:type="CanonicalUser">
+ <ID>grantee_id</ID>
+ <DisplayName>grantee_display_name</DisplayName>
+ </Grantee>
+ <Permission>FULL_CONTROL</Permission>
+ </Grant>
+ </AccessControlList>
+ </AccessControlPolicy>"""
+
+ def test_bucket_acl_policy_namespace(self):
+ self.set_http_response(status_code=200)
+ bucket = self.service_connection.get_bucket('mybucket')
+
+ self.set_http_response(status_code=200, body=self.acl_policy())
+ policy = bucket.get_acl()
+
+ xml_policy = policy.to_xml()
+ document = xml.dom.minidom.parseString(xml_policy)
+ namespace = document.documentElement.namespaceURI
+ self.assertEqual(namespace, 'http://s3.amazonaws.com/doc/2006-03-01/')
diff --git a/tests/unit/s3/test_connection.py b/tests/unit/s3/test_connection.py
index ded110c4..e481c84c 100644
--- a/tests/unit/s3/test_connection.py
+++ b/tests/unit/s3/test_connection.py
@@ -93,6 +93,49 @@ class TestSigV4HostError(MockServiceWithConfigTestCase):
)
+class TestSigV4Presigned(MockServiceWithConfigTestCase):
+ connection_class = S3Connection
+
+ def test_sigv4_presign(self):
+ self.config = {
+ 's3': {
+ 'use-sigv4': True,
+ }
+ }
+
+ conn = self.connection_class(
+ aws_access_key_id='less',
+ aws_secret_access_key='more',
+ host='s3.amazonaws.com'
+ )
+
+ # Here we force an input iso_date to ensure we always get the
+ # same signature.
+ url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
+ key='test.txt', iso_date='20140625T000000Z')
+
+ self.assertIn('a937f5fbc125d98ac8f04c49e0204ea1526a7b8ca058000a54c192457be05b7d', url)
+
+ def test_sigv4_presign_optional_params(self):
+ self.config = {
+ 's3': {
+ 'use-sigv4': True,
+ }
+ }
+
+ conn = self.connection_class(
+ aws_access_key_id='less',
+ aws_secret_access_key='more',
+ security_token='token',
+ host='s3.amazonaws.com'
+ )
+
+ url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
+ key='test.txt', version_id=2)
+
+ self.assertIn('VersionId=2', url)
+ self.assertIn('X-Amz-Security-Token=token', url)
+
class TestUnicodeCallingFormat(AWSMockServiceTestCase):
connection_class = S3Connection
diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py
index 88a6dba3..f7328e7d 100644
--- a/tests/unit/utils/test_utils.py
+++ b/tests/unit/utils/test_utils.py
@@ -24,8 +24,10 @@ try:
except ImportError:
import unittest
+import datetime
import hashlib
import hmac
+import locale
import mock
import thread
import time
@@ -259,5 +261,32 @@ class TestLazyLoadMetadata(unittest.TestCase):
'http://169.254.169.254/latest/user-data',
retry_on_404=False)
+
+class TestStringToDatetimeParsing(unittest.TestCase):
+ """ Test string to datetime parsing """
+ def setUp(self):
+ self._saved = locale.setlocale(locale.LC_ALL)
+ locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
+
+ def tearDown(self):
+ locale.setlocale(locale.LC_ALL, self._saved)
+
+ def test_nonus_locale(self):
+ test_string = 'Thu, 15 May 2014 09:06:03 GMT'
+
+ # Default strptime shoudl fail
+ with self.assertRaises(ValueError):
+ datetime.datetime.strptime(test_string, boto.utils.RFC1123)
+
+ # Our parser should succeed
+ result = boto.utils.parse_ts(test_string)
+
+ self.assertEqual(2014, result.year)
+ self.assertEqual(5, result.month)
+ self.assertEqual(15, result.day)
+ self.assertEqual(9, result.hour)
+ self.assertEqual(6, result.minute)
+
+
if __name__ == '__main__':
unittest.main()