summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel G. Taylor <danielgtaylor@gmail.com>2013-12-12 19:49:22 -0800
committerDaniel G. Taylor <danielgtaylor@gmail.com>2013-12-12 19:49:22 -0800
commit29a7fc6a6ce0851b9b85aed34a6c0087bd967482 (patch)
treeea709dd442b69b9c6c35ae5743188d72162da968
parent2fcb85372e333d013790822086583ecf17fda35d (diff)
parentd18eac82d6f543c948c380348f69d09acbf7cada (diff)
downloadboto-29a7fc6a6ce0851b9b85aed34a6c0087bd967482.tar.gz
Merge branch 'release-2.20.0'2.20.0
-rw-r--r--README.rst6
-rwxr-xr-xbin/sdbadmin7
-rw-r--r--boto/__init__.py46
-rw-r--r--boto/connection.py10
-rw-r--r--boto/directconnect/__init__.py66
-rw-r--r--boto/directconnect/exceptions.py28
-rw-r--r--boto/directconnect/layer1.py633
-rw-r--r--boto/dynamodb2/items.py6
-rw-r--r--boto/dynamodb2/layer1.py178
-rw-r--r--boto/dynamodb2/table.py6
-rw-r--r--boto/ec2/autoscale/__init__.py2
-rw-r--r--boto/ec2/autoscale/group.py4
-rw-r--r--boto/ec2/cloudwatch/metric.py7
-rw-r--r--boto/ec2/connection.py20
-rw-r--r--boto/ec2/elb/__init__.py4
-rw-r--r--boto/ec2/image.py5
-rw-r--r--boto/elasticache/layer1.py13
-rw-r--r--boto/kinesis/__init__.py45
-rw-r--r--boto/kinesis/exceptions.py51
-rw-r--r--boto/kinesis/layer1.py707
-rw-r--r--boto/rds/__init__.py2
-rw-r--r--boto/rds/dbinstance.py39
-rw-r--r--boto/rds/dbsnapshot.py34
-rw-r--r--boto/utils.py134
-rw-r--r--docs/source/index.rst1
-rw-r--r--docs/source/releasenotes/v2.20.0.rst31
-rw-r--r--setup.py3
-rw-r--r--tests/integration/directconnect/__init__.py0
-rw-r--r--tests/integration/directconnect/test_directconnect.py40
-rw-r--r--tests/integration/kinesis/__init__.py0
-rw-r--r--tests/integration/kinesis/test_kinesis.py84
-rw-r--r--tests/unit/directconnect/__init__.py0
-rw-r--r--tests/unit/directconnect/test_layer1.py58
-rw-r--r--tests/unit/dynamodb2/test_table.py11
-rw-r--r--tests/unit/ec2/autoscale/test_group.py23
-rw-r--r--tests/unit/elasticache/__init__.py0
-rw-r--r--tests/unit/elasticache/test_api_interface.py20
-rw-r--r--tests/unit/rds/test_connection.py46
-rw-r--r--tests/unit/rds/test_snapshot.py15
-rw-r--r--tests/unit/test_connection.py32
-rw-r--r--tests/unit/utils/test_utils.py71
41 files changed, 2313 insertions, 175 deletions
diff --git a/README.rst b/README.rst
index d58d89a2..06bc104c 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.19.0
+boto 2.20.0
-Released: 27-November-2013
+Released: 12-December-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
@@ -23,6 +23,7 @@ At the moment, boto supports:
* Amazon Elastic Compute Cloud (EC2)
* Amazon Elastic Map Reduce (EMR)
* AutoScaling
+ * Amazon Kinesis
* Content Delivery
@@ -66,6 +67,7 @@ At the moment, boto supports:
* Amazon Route53
* Amazon Virtual Private Cloud (VPC)
* Elastic Load Balancing (ELB)
+ * AWS Direct Connect
* Payments and Billing
diff --git a/bin/sdbadmin b/bin/sdbadmin
index 3fbd3f44..8b072cc4 100755
--- a/bin/sdbadmin
+++ b/bin/sdbadmin
@@ -51,7 +51,7 @@ def confirm(message="Are you sure?"):
return choice and len(choice) > 0 and choice[0].lower() == "y"
-def dump_db(domain, file_name, use_json=False):
+def dump_db(domain, file_name, use_json=False, sort_attributes=False):
"""
Dump SDB domain to file
"""
@@ -59,7 +59,7 @@ def dump_db(domain, file_name, use_json=False):
if use_json:
for item in domain:
data = {"name": item.name, "attributes": item}
- print >> f, json.dumps(data)
+ print >> f, json.dumps(data, sort_keys=sort_attributes)
else:
doc = domain.to_xml(f)
@@ -113,6 +113,7 @@ if __name__ == "__main__":
parser.add_option("-a", "--all-domains", help="Operate on all domains", action="store_true", default=False, dest="all_domains")
if json:
parser.add_option("-j", "--use-json", help="Load/Store as JSON instead of XML", action="store_true", default=False, dest="json")
+ parser.add_option("-s", "--sort-attibutes", help="Sort the element attributes", action="store_true", default=False, dest="sort_attributes")
parser.add_option("-d", "--domain", help="Do functions on domain (may be more then one)", action="append", dest="domains")
parser.add_option("-f", "--file", help="Input/Output file we're operating on", dest="file_name")
parser.add_option("-r", "--region", help="Region (e.g. us-east-1[default] or eu-west-1)", default="us-east-1", dest="region_name")
@@ -170,7 +171,7 @@ if __name__ == "__main__":
file_name = options.file_name
else:
file_name = "%s.db" % domain.name
- dump_db(domain, file_name, options.json)
+ dump_db(domain, file_name, options.json, options.sort_attributes)
if options.load:
for domain in domains:
diff --git a/boto/__init__.py b/boto/__init__.py
index 6c0ae7eb..a2b19893 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -36,7 +36,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.19.0'
+__version__ = '2.20.0'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s Python/%s %s/%s' % (
@@ -744,6 +744,50 @@ def connect_cloudtrail(aws_access_key_id=None,
)
+def connect_directconnect(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to AWS DirectConnect
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.directconnect.layer1.DirectConnectConnection`
+ :return: A connection to the AWS DirectConnect service
+ """
+ from boto.directconnect.layer1 import DirectConnectConnection
+ return DirectConnectConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+def connect_kinesis(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to Amazon Kinesis
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ rtype: :class:`boto.kinesis.layer1.KinesisConnection`
+ :return: A connection to the Amazon Kinesis service
+ """
+ from boto.kinesis.layer1 import KinesisConnection
+ return KinesisConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True, is_latest=False):
diff --git a/boto/connection.py b/boto/connection.py
index 7d699eaa..a7edf6a8 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -840,6 +840,13 @@ class AWSAuthConnection(object):
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
+ def set_host_header(self, request):
+ try:
+ request.headers['Host'] = \
+ self._auth_handler.host_header(self.host, request)
+ except AttributeError:
+ request.headers['Host'] = self.host.split(':', 1)[0]
+
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
@@ -880,7 +887,8 @@ class AWSAuthConnection(object):
# the port info. All others should be now be up to date and
# not include the port.
if 's3' not in self._required_auth_capability():
- request.headers['Host'] = self.host.split(':', 1)[0]
+ self.set_host_header(request)
+
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
diff --git a/boto/directconnect/__init__.py b/boto/directconnect/__init__.py
new file mode 100644
index 00000000..0fa314ca
--- /dev/null
+++ b/boto/directconnect/__init__.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the AWS DirectConnect service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.directconnect.layer1 import DirectConnectConnection
+
+ return [RegionInfo(name='us-east-1',
+ endpoint='directconnect.us-east-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='us-west-1',
+ endpoint='directconnect.us-west-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='us-west-2',
+ endpoint='directconnect.us-west-2.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='eu-west-1',
+ endpoint='directconnect.eu-west-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='directconnect.ap-southeast-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='ap-southeast-2',
+ endpoint='directconnect.ap-southeast-2.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='ap-southeast-3',
+ endpoint='directconnect.ap-southeast-3.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='sa-east-1',
+ endpoint='directconnect.sa-east-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/directconnect/exceptions.py b/boto/directconnect/exceptions.py
new file mode 100644
index 00000000..e3cac9ba
--- /dev/null
+++ b/boto/directconnect/exceptions.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+class DirectConnectClientException(Exception):
+ pass
+
+
+class DirectConnectServerException(Exception):
+ pass
diff --git a/boto/directconnect/layer1.py b/boto/directconnect/layer1.py
new file mode 100644
index 00000000..b6a87699
--- /dev/null
+++ b/boto/directconnect/layer1.py
@@ -0,0 +1,633 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+import boto
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.directconnect import exceptions
+
+
+class DirectConnectConnection(AWSQueryConnection):
+ """
+ AWS Direct Connect makes it easy to establish a dedicated network
+ connection from your premises to Amazon Web Services (AWS). Using
+ AWS Direct Connect, you can establish private connectivity between
+ AWS and your data center, office, or colocation environment, which
+ in many cases can reduce your network costs, increase bandwidth
+ throughput, and provide a more consistent network experience than
+ Internet-based connections.
+
+ The AWS Direct Connect API Reference provides descriptions,
+ syntax, and usage examples for each of the actions and data types
+ for AWS Direct Connect. Use the following links to get started
+ using the AWS Direct Connect API Reference :
+
+
+ + `Actions`_: An alphabetical list of all AWS Direct Connect
+ actions.
+ + `Data Types`_: An alphabetical list of all AWS Direct Connect
+ data types.
+ + `Common Query Parameters`_: Parameters that all Query actions
+ can use.
+ + `Common Errors`_: Client and server errors that all actions can
+ return.
+ """
+ APIVersion = "2012-10-25"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "directconnect.us-east-1.amazonaws.com"
+ ServiceName = "DirectConnect"
+ TargetPrefix = "OvertureService"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "DirectConnectClientException": exceptions.DirectConnectClientException,
+ "DirectConnectServerException": exceptions.DirectConnectServerException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs:
+ kwargs['host'] = region.endpoint
+
+ AWSQueryConnection.__init__(self, **kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def allocate_connection_on_interconnect(self, bandwidth, connection_name,
+ owner_account, interconnect_id,
+ vlan):
+ """
+ Creates a hosted connection on an interconnect.
+
+ Allocates a VLAN number and a specified amount of bandwidth
+ for use by a hosted connection on the given interconnect.
+
+ :type bandwidth: string
+ :param bandwidth: Bandwidth of the connection.
+ Example: " 500Mbps "
+
+ Default: None
+
+ :type connection_name: string
+ :param connection_name: Name of the provisioned connection.
+ Example: " 500M Connection to AWS "
+
+ Default: None
+
+ :type owner_account: string
+ :param owner_account: Numeric account Id of the customer for whom the
+ connection will be provisioned.
+ Example: 123443215678
+
+ Default: None
+
+ :type interconnect_id: string
+ :param interconnect_id: ID of the interconnect on which the connection
+ will be provisioned.
+ Example: dxcon-456abc78
+
+ Default: None
+
+ :type vlan: integer
+ :param vlan: The dedicated VLAN provisioned to the connection.
+ Example: 101
+
+ Default: None
+
+ """
+ params = {
+ 'bandwidth': bandwidth,
+ 'connectionName': connection_name,
+ 'ownerAccount': owner_account,
+ 'interconnectId': interconnect_id,
+ 'vlan': vlan,
+ }
+ return self.make_request(action='AllocateConnectionOnInterconnect',
+ body=json.dumps(params))
+
+ def allocate_private_virtual_interface(self, connection_id,
+ owner_account,
+ new_private_virtual_interface_allocation):
+ """
+ Provisions a private virtual interface to be owned by a
+ different customer.
+
+ The owner of a connection calls this function to provision a
+ private virtual interface which will be owned by another AWS
+ customer.
+
+ Virtual interfaces created using this function must be
+ confirmed by the virtual interface owner by calling
+ ConfirmPrivateVirtualInterface. Until this step has been
+ completed, the virtual interface will be in 'Confirming'
+ state, and will not be available for handling traffic.
+
+ :type connection_id: string
+ :param connection_id: The connection ID on which the private virtual
+ interface is provisioned.
+ Default: None
+
+ :type owner_account: string
+ :param owner_account: The AWS account that will own the new private
+ virtual interface.
+ Default: None
+
+ :type new_private_virtual_interface_allocation: dict
+ :param new_private_virtual_interface_allocation: Detailed information
+ for the private virtual interface to be provisioned.
+ Default: None
+
+ """
+ params = {
+ 'connectionId': connection_id,
+ 'ownerAccount': owner_account,
+ 'newPrivateVirtualInterfaceAllocation': new_private_virtual_interface_allocation,
+ }
+ return self.make_request(action='AllocatePrivateVirtualInterface',
+ body=json.dumps(params))
+
+ def allocate_public_virtual_interface(self, connection_id, owner_account,
+ new_public_virtual_interface_allocation):
+ """
+ Provisions a public virtual interface to be owned by a
+ different customer.
+
+ The owner of a connection calls this function to provision a
+ public virtual interface which will be owned by another AWS
+ customer.
+
+ Virtual interfaces created using this function must be
+ confirmed by the virtual interface owner by calling
+ ConfirmPublicVirtualInterface. Until this step has been
+ completed, the virtual interface will be in 'Confirming'
+ state, and will not be available for handling traffic.
+
+ :type connection_id: string
+ :param connection_id: The connection ID on which the public virtual
+ interface is provisioned.
+ Default: None
+
+ :type owner_account: string
+ :param owner_account: The AWS account that will own the new public
+ virtual interface.
+ Default: None
+
+ :type new_public_virtual_interface_allocation: dict
+ :param new_public_virtual_interface_allocation: Detailed information
+ for the public virtual interface to be provisioned.
+ Default: None
+
+ """
+ params = {
+ 'connectionId': connection_id,
+ 'ownerAccount': owner_account,
+ 'newPublicVirtualInterfaceAllocation': new_public_virtual_interface_allocation,
+ }
+ return self.make_request(action='AllocatePublicVirtualInterface',
+ body=json.dumps(params))
+
+ def confirm_connection(self, connection_id):
+ """
+ Confirm the creation of a hosted connection on an
+ interconnect.
+
+ Upon creation, the hosted connection is initially in the
+ 'Ordering' state, and will remain in this state until the
+ owner calls ConfirmConnection to confirm creation of the
+ hosted connection.
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ """
+ params = {'connectionId': connection_id, }
+ return self.make_request(action='ConfirmConnection',
+ body=json.dumps(params))
+
+ def confirm_private_virtual_interface(self, virtual_interface_id,
+ virtual_gateway_id):
+ """
+ Accept ownership of a private virtual interface created by
+ another customer.
+
+ After the virtual interface owner calls this function, the
+ virtual interface will be created and attached to the given
+ virtual private gateway, and will be available for handling
+ traffic.
+
+ :type virtual_interface_id: string
+ :param virtual_interface_id: ID of the virtual interface.
+ Example: dxvif-123dfg56
+
+ Default: None
+
+ :type virtual_gateway_id: string
+ :param virtual_gateway_id: ID of the virtual private gateway that will
+ be attached to the virtual interface.
+ A virtual private gateway can be managed via the Amazon Virtual Private
+ Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
+
+ Default: None
+
+ """
+ params = {
+ 'virtualInterfaceId': virtual_interface_id,
+ 'virtualGatewayId': virtual_gateway_id,
+ }
+ return self.make_request(action='ConfirmPrivateVirtualInterface',
+ body=json.dumps(params))
+
+ def confirm_public_virtual_interface(self, virtual_interface_id):
+ """
+ Accept ownership of a public virtual interface created by
+ another customer.
+
+ After the virtual interface owner calls this function, the
+ specified virtual interface will be created and made available
+ for handling traffic.
+
+ :type virtual_interface_id: string
+ :param virtual_interface_id: ID of the virtual interface.
+ Example: dxvif-123dfg56
+
+ Default: None
+
+ """
+ params = {'virtualInterfaceId': virtual_interface_id, }
+ return self.make_request(action='ConfirmPublicVirtualInterface',
+ body=json.dumps(params))
+
+ def create_connection(self, location, bandwidth, connection_name):
+ """
+ Creates a new connection between the customer network and a
+ specific AWS Direct Connect location.
+
+ A connection links your internal network to an AWS Direct
+ Connect location over a standard 1 gigabit or 10 gigabit
+ Ethernet fiber-optic cable. One end of the cable is connected
+ to your router, the other to an AWS Direct Connect router. An
+ AWS Direct Connect location provides access to Amazon Web
+ Services in the region it is associated with. You can
+ establish connections with AWS Direct Connect locations in
+ multiple regions, but a connection in one region does not
+ provide connectivity to other regions.
+
+ :type location: string
+ :param location: Where the connection is located.
+ Example: EqSV5
+
+ Default: None
+
+ :type bandwidth: string
+ :param bandwidth: Bandwidth of the connection.
+ Example: 1Gbps
+
+ Default: None
+
+ :type connection_name: string
+ :param connection_name: The name of the connection.
+ Example: " My Connection to AWS "
+
+ Default: None
+
+ """
+ params = {
+ 'location': location,
+ 'bandwidth': bandwidth,
+ 'connectionName': connection_name,
+ }
+ return self.make_request(action='CreateConnection',
+ body=json.dumps(params))
+
+ def create_interconnect(self, interconnect_name, bandwidth, location):
+ """
+ Creates a new interconnect between a AWS Direct Connect
+ partner's network and a specific AWS Direct Connect location.
+
+ An interconnect is a connection which is capable of hosting
+ other connections. The AWS Direct Connect partner can use an
+ interconnect to provide sub-1Gbps AWS Direct Connect service
+ to tier 2 customers who do not have their own connections.
+ Like a standard connection, an interconnect links the AWS
+ Direct Connect partner's network to an AWS Direct Connect
+ location over a standard 1 Gbps or 10 Gbps Ethernet fiber-
+ optic cable. One end is connected to the partner's router, the
+ other to an AWS Direct Connect router.
+
+ For each end customer, the AWS Direct Connect partner
+ provisions a connection on their interconnect by calling
+ AllocateConnectionOnInterconnect. The end customer can then
+ connect to AWS resources by creating a virtual interface on
+ their connection, using the VLAN assigned to them by the AWS
+ Direct Connect partner.
+
+ :type interconnect_name: string
+ :param interconnect_name: The name of the interconnect.
+ Example: " 1G Interconnect to AWS "
+
+ Default: None
+
+ :type bandwidth: string
+ :param bandwidth: The port bandwidth
+ Example: 1Gbps
+
+ Default: None
+
+ Available values: 1Gbps,10Gbps
+
+ :type location: string
+ :param location: Where the interconnect is located
+ Example: EqSV5
+
+ Default: None
+
+ """
+ params = {
+ 'interconnectName': interconnect_name,
+ 'bandwidth': bandwidth,
+ 'location': location,
+ }
+ return self.make_request(action='CreateInterconnect',
+ body=json.dumps(params))
+
+ def create_private_virtual_interface(self, connection_id,
+ new_private_virtual_interface):
+ """
+ Creates a new private virtual interface. A virtual interface
+ is the VLAN that transports AWS Direct Connect traffic. A
+ private virtual interface supports sending traffic to a single
+ virtual private cloud (VPC).
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ :type new_private_virtual_interface: dict
+ :param new_private_virtual_interface: Detailed information for the
+ private virtual interface to be created.
+ Default: None
+
+ """
+ params = {
+ 'connectionId': connection_id,
+ 'newPrivateVirtualInterface': new_private_virtual_interface,
+ }
+ return self.make_request(action='CreatePrivateVirtualInterface',
+ body=json.dumps(params))
+
+ def create_public_virtual_interface(self, connection_id,
+ new_public_virtual_interface):
+ """
+ Creates a new public virtual interface. A virtual interface is
+ the VLAN that transports AWS Direct Connect traffic. A public
+ virtual interface supports sending traffic to public services
+ of AWS such as Amazon Simple Storage Service (Amazon S3).
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ :type new_public_virtual_interface: dict
+ :param new_public_virtual_interface: Detailed information for the
+ public virtual interface to be created.
+ Default: None
+
+ """
+ params = {
+ 'connectionId': connection_id,
+ 'newPublicVirtualInterface': new_public_virtual_interface,
+ }
+ return self.make_request(action='CreatePublicVirtualInterface',
+ body=json.dumps(params))
+
+ def delete_connection(self, connection_id):
+ """
+ Deletes the connection.
+
+ Deleting a connection only stops the AWS Direct Connect port
+ hour and data transfer charges. You need to cancel separately
+ with the providers any services or charges for cross-connects
+ or network circuits that connect you to the AWS Direct Connect
+ location.
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ """
+ params = {'connectionId': connection_id, }
+ return self.make_request(action='DeleteConnection',
+ body=json.dumps(params))
+
+ def delete_interconnect(self, interconnect_id):
+ """
+ Deletes the specified interconnect.
+
+ :type interconnect_id: string
+ :param interconnect_id: The ID of the interconnect.
+ Example: dxcon-abc123
+
+ """
+ params = {'interconnectId': interconnect_id, }
+ return self.make_request(action='DeleteInterconnect',
+ body=json.dumps(params))
+
+ def delete_virtual_interface(self, virtual_interface_id):
+ """
+ Deletes a virtual interface.
+
+ :type virtual_interface_id: string
+ :param virtual_interface_id: ID of the virtual interface.
+ Example: dxvif-123dfg56
+
+ Default: None
+
+ """
+ params = {'virtualInterfaceId': virtual_interface_id, }
+ return self.make_request(action='DeleteVirtualInterface',
+ body=json.dumps(params))
+
+ def describe_connections(self, connection_id=None):
+ """
+ Displays all connections in this region.
+
+ If a connection ID is provided, the call returns only that
+ particular connection.
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ """
+ params = {}
+ if connection_id is not None:
+ params['connectionId'] = connection_id
+ return self.make_request(action='DescribeConnections',
+ body=json.dumps(params))
+
+ def describe_connections_on_interconnect(self, interconnect_id):
+ """
+ Return a list of connections that have been provisioned on the
+ given interconnect.
+
+ :type interconnect_id: string
+ :param interconnect_id: ID of the interconnect on which a list of
+ connection is provisioned.
+ Example: dxcon-abc123
+
+ Default: None
+
+ """
+ params = {'interconnectId': interconnect_id, }
+ return self.make_request(action='DescribeConnectionsOnInterconnect',
+ body=json.dumps(params))
+
+ def describe_interconnects(self, interconnect_id=None):
+ """
+ Returns a list of interconnects owned by the AWS account.
+
+ If an interconnect ID is provided, it will only return this
+ particular interconnect.
+
+ :type interconnect_id: string
+ :param interconnect_id: The ID of the interconnect.
+ Example: dxcon-abc123
+
+ """
+ params = {}
+ if interconnect_id is not None:
+ params['interconnectId'] = interconnect_id
+ return self.make_request(action='DescribeInterconnects',
+ body=json.dumps(params))
+
+ def describe_locations(self):
+ """
+ Returns the list of AWS Direct Connect locations in the
+ current AWS region. These are the locations that may be
+ selected when calling CreateConnection or CreateInterconnect.
+ """
+ params = {}
+ return self.make_request(action='DescribeLocations',
+ body=json.dumps(params))
+
+ def describe_virtual_gateways(self):
+ """
+ Returns a list of virtual private gateways owned by the AWS
+ account.
+
+ You can create one or more AWS Direct Connect private virtual
+ interfaces linking to a virtual private gateway. A virtual
+ private gateway can be managed via Amazon Virtual Private
+ Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
+ """
+ params = {}
+ return self.make_request(action='DescribeVirtualGateways',
+ body=json.dumps(params))
+
+ def describe_virtual_interfaces(self, connection_id=None,
+ virtual_interface_id=None):
+ """
+ Displays all virtual interfaces for an AWS account. Virtual
+ interfaces deleted fewer than 15 minutes before
+ DescribeVirtualInterfaces is called are also returned. If a
+ connection ID is included then only virtual interfaces
+ associated with this connection will be returned. If a virtual
+ interface ID is included then only a single virtual interface
+ will be returned.
+
+ A virtual interface (VLAN) transmits the traffic between the
+ AWS Direct Connect location and the customer.
+
+ If a connection ID is provided, only virtual interfaces
+ provisioned on the specified connection will be returned. If a
+ virtual interface ID is provided, only this particular virtual
+ interface will be returned.
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ :type virtual_interface_id: string
+ :param virtual_interface_id: ID of the virtual interface.
+ Example: dxvif-123dfg56
+
+ Default: None
+
+ """
+ params = {}
+ if connection_id is not None:
+ params['connectionId'] = connection_id
+ if virtual_interface_id is not None:
+ params['virtualInterfaceId'] = virtual_interface_id
+ return self.make_request(action='DescribeVirtualInterfaces',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read()
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/dynamodb2/items.py b/boto/dynamodb2/items.py
index 9f076afa..257a7459 100644
--- a/boto/dynamodb2/items.py
+++ b/boto/dynamodb2/items.py
@@ -19,6 +19,9 @@ class Item(object):
This object presents a dictionary-like interface for accessing/storing
data. It also tries to intelligently track how data has changed throughout
the life of the instance, to be as efficient as possible about updates.
+
+ Empty items, or items that have no data, are considered falsey.
+
"""
def __init__(self, table, data=None, loaded=False):
"""
@@ -105,6 +108,9 @@ class Item(object):
def __contains__(self, key):
return key in self._data
+ def __nonzero__(self):
+ return bool(self._data)
+
def _determine_alterations(self):
"""
Checks the ``-orig_data`` against the ``_data`` to determine what
diff --git a/boto/dynamodb2/layer1.py b/boto/dynamodb2/layer1.py
index 796a6a79..0555107a 100644
--- a/boto/dynamodb2/layer1.py
+++ b/boto/dynamodb2/layer1.py
@@ -35,10 +35,9 @@ from boto.dynamodb2 import exceptions
class DynamoDBConnection(AWSQueryConnection):
"""
- Amazon DynamoDB is a fast, highly scalable, highly available,
- cost-effective non-relational database service. Amazon DynamoDB
- removes traditional scalability limitations on data storage while
- maintaining low latency and predictable performance.
+ Amazon DynamoDB **Overview**
+ This is the Amazon DynamoDB API Reference. This guide provides
+ descriptions and samples of the Amazon DynamoDB API.
"""
APIVersion = "2012-08-10"
DefaultRegionName = "us-east-1"
@@ -130,7 +129,7 @@ class DynamoDBConnection(AWSQueryConnection):
result. Requests for nonexistent items consume the minimum
read capacity units according to the type of read. For more
information, see `Capacity Units Calculations`_ in the Amazon
- DynamoDB Developer Guide .
+ DynamoDB Developer Guide.
:type request_items: map
:param request_items:
@@ -150,7 +149,9 @@ class DynamoDBConnection(AWSQueryConnection):
`False` (the default), an eventually consistent read is used.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
"""
params = {'RequestItems': request_items, }
@@ -256,7 +257,9 @@ class DynamoDBConnection(AWSQueryConnection):
match those of the schema in the table's attribute definition.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@@ -274,7 +277,8 @@ class DynamoDBConnection(AWSQueryConnection):
body=json.dumps(params))
def create_table(self, attribute_definitions, table_name, key_schema,
- provisioned_throughput, local_secondary_indexes=None):
+ provisioned_throughput, local_secondary_indexes=None,
+ global_secondary_indexes=None):
"""
The CreateTable operation adds a new table to your account. In
an AWS account, table names must be unique within each region.
@@ -306,7 +310,7 @@ class DynamoDBConnection(AWSQueryConnection):
:param key_schema: Specifies the attributes that make up the primary
key for the table. The attributes in KeySchema must also be defined
in the AttributeDefinitions array. For more information, see `Data
- Model`_ in the Amazon DynamoDB Developer Guide .
+ Model`_ in the Amazon DynamoDB Developer Guide.
Each KeySchemaElement in the array is composed of:
@@ -323,7 +327,7 @@ class DynamoDBConnection(AWSQueryConnection):
KeyType of `RANGE`.
For more information, see `Specifying the Primary Key`_ in the Amazon
- DynamoDB Developer Guide .
+ DynamoDB Developer Guide.
:type local_secondary_indexes: list
:param local_secondary_indexes:
@@ -360,8 +364,15 @@ class DynamoDBConnection(AWSQueryConnection):
attribute into two different indexes, this counts as two distinct
attributes when determining the total.
+ :type global_secondary_indexes: list
+ :param global_secondary_indexes:
+
:type provisioned_throughput: dict
- :param provisioned_throughput:
+ :param provisioned_throughput: The provisioned throughput settings for
+ the specified table. The settings can be modified using the
+ UpdateTable operation.
+ For current minimum and maximum provisioned throughput values, see
+ `Limits`_ in the Amazon DynamoDB Developer Guide.
"""
params = {
@@ -372,6 +383,8 @@ class DynamoDBConnection(AWSQueryConnection):
}
if local_secondary_indexes is not None:
params['LocalSecondaryIndexes'] = local_secondary_indexes
+ if global_secondary_indexes is not None:
+ params['GlobalSecondaryIndexes'] = global_secondary_indexes
return self.make_request(action='CreateTable',
body=json.dumps(params))
@@ -459,7 +472,9 @@ class DynamoDBConnection(AWSQueryConnection):
+ `ALL_OLD` - The content of the old item is returned.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@@ -496,15 +511,6 @@ class DynamoDBConnection(AWSQueryConnection):
operations, such as GetItem and PutItem , on a table in the
`DELETING` state until the table deletion is complete.
- Tables are unique among those associated with the AWS Account
- issuing the request, and the AWS region that receives the
- request (such as dynamodb.us-east-1.amazonaws.com). Each
- Amazon DynamoDB endpoint is entirely independent. For example,
- if you have two tables called "MyTable," one in dynamodb.us-
- east-1.amazonaws.com and one in dynamodb.us-
- west-1.amazonaws.com, they are completely independent and do
- not share any data; deleting one does not delete the other.
-
When you delete a table, any local secondary indexes on that
table are also deleted.
@@ -564,7 +570,9 @@ class DynamoDBConnection(AWSQueryConnection):
are used.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
"""
params = {'TableName': table_name, 'Key': key, }
@@ -582,14 +590,6 @@ class DynamoDBConnection(AWSQueryConnection):
Returns an array of all the tables associated with the current
account and endpoint.
- Each Amazon DynamoDB endpoint is entirely independent. For
- example, if you have two tables called "MyTable," one in
- dynamodb.us-east-1.amazonaws.com and one in dynamodb.us-
- west-1.amazonaws.com , they are completely independent and do
- not share any data. The ListTables operation returns all of
- the table names associated with the account making the
- request, for the endpoint that receives the request.
-
:type exclusive_start_table_name: string
:param exclusive_start_table_name: The name of the table that starts
the list. If you already ran a ListTables operation and received a
@@ -639,7 +639,7 @@ class DynamoDBConnection(AWSQueryConnection):
primary key attribute, or attributes.
For more information about using this API, see `Working with
- Items`_ in the Amazon DynamoDB Developer Guide .
+ Items`_ in the Amazon DynamoDB Developer Guide.
:type table_name: string
:param table_name: The name of the table to contain the item.
@@ -653,7 +653,7 @@ class DynamoDBConnection(AWSQueryConnection):
the table's attribute definition.
For more information about primary keys, see `Primary Key`_ in the
- Amazon DynamoDB Developer Guide .
+ Amazon DynamoDB Developer Guide.
Each element in the Item map is an AttributeValue object.
@@ -714,7 +714,9 @@ class DynamoDBConnection(AWSQueryConnection):
the content of the old item is returned.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@@ -834,7 +836,7 @@ class DynamoDBConnection(AWSQueryConnection):
limit, it stops the operation and returns the matching values up to
the limit, and a LastEvaluatedKey to apply in a subsequent
operation to continue the operation. For more information see
- `Query and Scan`_ in the Amazon DynamoDB Developer Guide .
+ `Query and Scan`_ in the Amazon DynamoDB Developer Guide.
:type consistent_read: boolean
:param consistent_read: If set to `True`, then the operation uses
@@ -846,7 +848,7 @@ class DynamoDBConnection(AWSQueryConnection):
The selection criteria for the query.
For a query on a table, you can only have conditions on the table
- primary key attributes. you must specify the hash key attribute
+ primary key attributes. You must specify the hash key attribute
name and value as an `EQ` condition. You can optionally specify a
second condition, referring to the range key attribute.
@@ -878,7 +880,7 @@ class DynamoDBConnection(AWSQueryConnection):
example, equals, greater than, less than, etc. Valid comparison
operators for Query: `EQ | LE | LT | GE | GT | BEGINS_WITH |
BETWEEN` For information on specifying data types in JSON, see
- `JSON Data Format`_ in the Amazon DynamoDB Developer Guide . The
+ `JSON Data Format`_ in the Amazon DynamoDB Developer Guide. The
following are descriptions of each comparison operator.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
@@ -938,18 +940,16 @@ class DynamoDBConnection(AWSQueryConnection):
ascending order.
:type exclusive_start_key: map
- :param exclusive_start_key: The primary key of the item from which to
- continue an earlier operation. An earlier operation might provide
- this value as the LastEvaluatedKey if that operation was
- interrupted before completion; either because of the result set
- size or because of the setting for Limit . The LastEvaluatedKey can
- be passed back in a new request to continue the operation from that
- point.
+ :param exclusive_start_key: The primary key of the first item that this
+ operation will evaluate. Use the value that was returned for
+ LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
"""
params = {'TableName': table_name, }
@@ -994,10 +994,10 @@ class DynamoDBConnection(AWSQueryConnection):
The result set is eventually consistent.
By default, Scan operations proceed sequentially; however, for
- faster performance on large tables, applications can perform a
+ faster performance on large tables, applications can request a
parallel Scan by specifying the Segment and TotalSegments
parameters. For more information, see `Parallel Scan`_ in the
- Amazon DynamoDB Developer Guide .
+ Amazon DynamoDB Developer Guide.
:type table_name: string
:param table_name: The name of the table containing the requested
@@ -1020,7 +1020,7 @@ class DynamoDBConnection(AWSQueryConnection):
limit, it stops the operation and returns the matching values up to
the limit, and a LastEvaluatedKey to apply in a subsequent
operation to continue the operation. For more information see
- `Query and Scan`_ in the Amazon DynamoDB Developer Guide .
+ `Query and Scan`_ in the Amazon DynamoDB Developer Guide.
:type select: string
:param select: The attributes to be returned in the result. You can
@@ -1084,7 +1084,7 @@ class DynamoDBConnection(AWSQueryConnection):
operators for Scan: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL
| CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` For
information on specifying data types in JSON, see `JSON Data
- Format`_ in the Amazon DynamoDB Developer Guide . The following are
+ Format`_ in the Amazon DynamoDB Developer Guide. The following are
descriptions of each comparison operator.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
@@ -1164,44 +1164,27 @@ class DynamoDBConnection(AWSQueryConnection):
"2", "1"]}`
:type exclusive_start_key: map
- :param exclusive_start_key: The primary key of the item from which to
- continue an earlier operation. An earlier operation might provide
- this value as the LastEvaluatedKey if that operation was
- interrupted before completion; either because of the result set
- size or because of the setting for Limit . The LastEvaluatedKey can
- be passed back in a new request to continue the operation from that
- point.
+ :param exclusive_start_key: The primary key of the first item that this
+ operation will evaluate. Use the value that was returned for
+ LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
- If you are performing a parallel scan, the value of ExclusiveStartKey
- must fall into the key space of the Segment being scanned. For
- example, suppose that there are two application threads scanning a
- table using the following Scan parameters
-
-
- + Thread 0: Segment =0; TotalSegments =2
- + Thread 1: Segment =1; TotalSegments =2
-
-
- Now suppose that the Scan request for Thread 0 completed and returned a
- LastEvaluatedKey of "X". Because "X" is part of Segment 0's key
- space, it cannot be used anywhere else in the table. If Thread 1
- were to issue another Scan request with an ExclusiveStartKey of
- "X", Amazon DynamoDB would throw an InputValidationError because
- hash key "X" cannot be in Segment 1.
+ In a parallel scan, a Scan request that includes ExclusiveStartKey must
+ specify the same segment whose previous Scan returned the
+ corresponding value of LastEvaluatedKey .
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type total_segments: integer
- :param total_segments: For parallel Scan requests, TotalSegments
- represents the total number of segments for a table that is being
- scanned. Segments are a way to logically divide a table into
- equally sized portions, for the duration of the Scan request. The
- value of TotalSegments corresponds to the number of application
- "workers" (such as threads or processes) that will perform the
- parallel Scan . For example, if you want to scan a table using four
+ :param total_segments: For a parallel Scan request, TotalSegments
+ represents the total number of segments into which the Scan
+ operation will be divided. The value of TotalSegments corresponds
+ to the number of application workers that will perform the parallel
+ scan. For example, if you want to scan a table using four
application threads, you would specify a TotalSegments value of 4.
The value for TotalSegments must be greater than or equal to 1, and
less than or equal to 4096. If you specify a TotalSegments value of
@@ -1210,15 +1193,17 @@ class DynamoDBConnection(AWSQueryConnection):
If you specify TotalSegments , you must also specify Segment .
:type segment: integer
- :param segment: For parallel Scan requests, Segment identifies an
- individual segment to be scanned by an application "worker" (such
- as a thread or a process). Each worker issues a Scan request with a
- distinct value for the segment it will scan.
+ :param segment: For a parallel Scan request, Segment identifies an
+ individual segment to be scanned by an application worker.
Segment IDs are zero-based, so the first segment is always 0. For
example, if you want to scan a table using four application
threads, the first thread would specify a Segment value of 0, the
second thread would specify 1, and so on.
+ The value of LastEvaluatedKey returned from a parallel Scan request
+ must be used as ExclusiveStartKey with the same Segment ID in a
+ subsequent Scan operation.
+
The value for Segment must be greater than or equal to 0, and less than
the value provided for TotalSegments .
@@ -1411,7 +1396,9 @@ class DynamoDBConnection(AWSQueryConnection):
returned.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@@ -1434,7 +1421,8 @@ class DynamoDBConnection(AWSQueryConnection):
return self.make_request(action='UpdateItem',
body=json.dumps(params))
- def update_table(self, table_name, provisioned_throughput):
+ def update_table(self, table_name, provisioned_throughput=None,
+ global_secondary_index_updates=None):
"""
Updates the provisioned throughput for the given table.
Setting the throughput for a table helps you manage
@@ -1443,7 +1431,7 @@ class DynamoDBConnection(AWSQueryConnection):
The provisioned throughput values can be upgraded or
downgraded based on the maximums and minimums listed in the
- `Limits`_ section in the Amazon DynamoDB Developer Guide .
+ `Limits`_ section in the Amazon DynamoDB Developer Guide.
The table must be in the `ACTIVE` state for this operation to
succeed. UpdateTable is an asynchronous operation; while
@@ -1462,13 +1450,21 @@ class DynamoDBConnection(AWSQueryConnection):
:param table_name: The name of the table to be updated.
:type provisioned_throughput: dict
- :param provisioned_throughput:
+ :param provisioned_throughput: The provisioned throughput settings for
+ the specified table. The settings can be modified using the
+ UpdateTable operation.
+ For current minimum and maximum provisioned throughput values, see
+ `Limits`_ in the Amazon DynamoDB Developer Guide.
+
+ :type global_secondary_index_updates: list
+ :param global_secondary_index_updates:
"""
- params = {
- 'TableName': table_name,
- 'ProvisionedThroughput': provisioned_throughput,
- }
+ params = {'TableName': table_name, }
+ if provisioned_throughput is not None:
+ params['ProvisionedThroughput'] = provisioned_throughput
+ if global_secondary_index_updates is not None:
+ params['GlobalSecondaryIndexUpdates'] = global_secondary_index_updates
return self.make_request(action='UpdateTable',
body=json.dumps(params))
diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py
index b8bc4730..930edeeb 100644
--- a/boto/dynamodb2/table.py
+++ b/boto/dynamodb2/table.py
@@ -424,7 +424,7 @@ class Table(object):
with boto.dynamodb. Unlike get_item, it takes hash_key and range_key first,
although you may still specify keyword arguments instead.
- Also unlike the get_item command, if the returned item has no keys
+ Also unlike the get_item command, if the returned item has no keys
(i.e., it does not exist in DynamoDB), a None result is returned, instead
of an empty key object.
@@ -668,6 +668,10 @@ class Table(object):
lookup['AttributeValueList'].append(
self._dynamizer.encode(value[1])
)
+ # Special-case the ``IN`` case
+ elif field_bits[-1] == 'in':
+ for val in value:
+ lookup['AttributeValueList'].append(self._dynamizer.encode(val))
else:
# Fix up the value for encoding, because it was built to only work
# with ``set``s.
diff --git a/boto/ec2/autoscale/__init__.py b/boto/ec2/autoscale/__init__.py
index 2a54adf1..864b5dde 100644
--- a/boto/ec2/autoscale/__init__.py
+++ b/boto/ec2/autoscale/__init__.py
@@ -785,7 +785,7 @@ class AutoScaleConnection(AWSQueryConnection):
params = {'AutoScalingGroupName': group_name,
'DesiredCapacity': desired_capacity}
if honor_cooldown:
- params['HonorCooldown'] = json.dumps('True')
+ params['HonorCooldown'] = 'true'
return self.get_status('SetDesiredCapacity', params)
diff --git a/boto/ec2/autoscale/group.py b/boto/ec2/autoscale/group.py
index 12fdb20d..d3646877 100644
--- a/boto/ec2/autoscale/group.py
+++ b/boto/ec2/autoscale/group.py
@@ -129,8 +129,8 @@ class AutoScalingGroup(object):
:param health_check_type: The service you want the health status from,
Amazon EC2 or Elastic Load Balancer.
- :type launch_config_name: str or LaunchConfiguration
- :param launch_config_name: Name of launch configuration (required).
+ :type launch_config: str or LaunchConfiguration
+ :param launch_config: Name of launch configuration (required).
:type load_balancers: list
:param load_balancers: List of load balancers.
diff --git a/boto/ec2/cloudwatch/metric.py b/boto/ec2/cloudwatch/metric.py
index 9c19b941..f92f282a 100644
--- a/boto/ec2/cloudwatch/metric.py
+++ b/boto/ec2/cloudwatch/metric.py
@@ -77,13 +77,6 @@ class Metric(object):
:param statistics: A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum
- :type dimensions: dict
- :param dimensions: A dictionary of dimension key/values where
- the key is the dimension name and the value
- is either a scalar value or an iterator
- of values to be associated with that
- dimension.
-
:type unit: string
:param unit: The unit for the metric. Value values are:
Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index 5a656d10..abe192c5 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -752,6 +752,11 @@ class EC2Connection(AWSQueryConnection):
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
+ * g2.2xlarge
+ * i2.xlarge
+ * i2.2xlarge
+ * i2.4xlarge
+ * i2.8xlarge
:type placement: string
:param placement: The Availability Zone to launch the instance into.
@@ -1397,16 +1402,29 @@ class EC2Connection(AWSQueryConnection):
:type instance_type: string
:param instance_type: The type of instance to run:
+ * t1.micro
* m1.small
+ * m1.medium
* m1.large
* m1.xlarge
+ * m3.xlarge
+ * m3.2xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
+ * cr1.8xlarge
+ * hi1.4xlarge
+ * hs1.8xlarge
* cc1.4xlarge
- * t1.micro
+ * cg1.4xlarge
+ * cc2.8xlarge
+ * g2.2xlarge
+ * i2.xlarge
+ * i2.2xlarge
+ * i2.4xlarge
+ * i2.8xlarge
:type placement: string
:param placement: The availability zone in which to launch
diff --git a/boto/ec2/elb/__init__.py b/boto/ec2/elb/__init__.py
index d36e1372..4b6c4e24 100644
--- a/boto/ec2/elb/__init__.py
+++ b/boto/ec2/elb/__init__.py
@@ -159,7 +159,7 @@ class ELBConnection(AWSQueryConnection):
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
- 'HTTPS'; SSLCertificateID is the ARN of a AWS AIM
+ 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type subnets: list of strings
@@ -264,7 +264,7 @@ class ELBConnection(AWSQueryConnection):
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
- 'HTTPS'; SSLCertificateID is the ARN of a AWS AIM
+ 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type complex_listeners: List of tuples
diff --git a/boto/ec2/image.py b/boto/ec2/image.py
index 3e4e3171..f424b085 100644
--- a/boto/ec2/image.py
+++ b/boto/ec2/image.py
@@ -218,6 +218,11 @@ class Image(TaggedEC2Object):
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
+ * g2.2xlarge
+ * i2.xlarge
+ * i2.2xlarge
+ * i2.4xlarge
+ * i2.8xlarge
:type placement: string
:param placement: The Availability Zone to launch the instance into.
diff --git a/boto/elasticache/layer1.py b/boto/elasticache/layer1.py
index f1dc3a26..fadd1f7c 100644
--- a/boto/elasticache/layer1.py
+++ b/boto/elasticache/layer1.py
@@ -99,8 +99,8 @@ class ElastiCacheConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
- def create_cache_cluster(self, cache_cluster_id, num_cache_nodes,
- cache_node_type, engine,
+ def create_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
+ cache_node_type=None, engine=None,
replication_group_id=None, engine_version=None,
cache_parameter_group_name=None,
cache_subnet_group_name=None,
@@ -244,10 +244,13 @@ class ElastiCacheConnection(AWSQueryConnection):
"""
params = {
'CacheClusterId': cache_cluster_id,
- 'NumCacheNodes': num_cache_nodes,
- 'CacheNodeType': cache_node_type,
- 'Engine': engine,
}
+ if num_cache_nodes is not None:
+ params['NumCacheNodes'] = num_cache_nodes
+ if cache_node_type is not None:
+ params['CacheNodeType'] = cache_node_type
+ if engine is not None:
+ params['Engine'] = engine
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if engine_version is not None:
diff --git a/boto/kinesis/__init__.py b/boto/kinesis/__init__.py
new file mode 100644
index 00000000..1c19a3b2
--- /dev/null
+++ b/boto/kinesis/__init__.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the Amazon Kinesis service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.kinesis.layer1 import KinesisConnection
+
+ return [RegionInfo(name='us-east-1',
+ endpoint='kinesis.us-east-1.amazonaws.com',
+ connection_cls=KinesisConnection),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/kinesis/exceptions.py b/boto/kinesis/exceptions.py
new file mode 100644
index 00000000..708f4636
--- /dev/null
+++ b/boto/kinesis/exceptions.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import BotoServerError
+
+
+class ProvisionedThroughputExceededException(BotoServerError):
+ pass
+
+
+class LimitExceededException(BotoServerError):
+ pass
+
+
+class ExpiredIteratorException(BotoServerError):
+ pass
+
+
+class ResourceInUseException(BotoServerError):
+ pass
+
+
+class ResourceNotFoundException(BotoServerError):
+ pass
+
+
+class InvalidArgumentException(BotoServerError):
+ pass
+
+
+class SubscriptionRequiredException(BotoServerError):
+ pass
diff --git a/boto/kinesis/layer1.py b/boto/kinesis/layer1.py
new file mode 100644
index 00000000..2f486e99
--- /dev/null
+++ b/boto/kinesis/layer1.py
@@ -0,0 +1,707 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+import base64
+import boto
+
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.kinesis import exceptions
+
+
+class KinesisConnection(AWSQueryConnection):
+ """
+ Amazon Kinesis Service API Reference
+ Amazon Kinesis is a managed service that scales elastically for
+ real time processing of streaming big data.
+ """
+ APIVersion = "2013-12-02"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
+ ServiceName = "Kinesis"
+ TargetPrefix = "Kinesis_20131202"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
+ "LimitExceededException": exceptions.LimitExceededException,
+ "ExpiredIteratorException": exceptions.ExpiredIteratorException,
+ "ResourceInUseException": exceptions.ResourceInUseException,
+ "ResourceNotFoundException": exceptions.ResourceNotFoundException,
+ "InvalidArgumentException": exceptions.InvalidArgumentException,
+ "SubscriptionRequiredException": exceptions.SubscriptionRequiredException
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ if 'host' not in kwargs:
+ kwargs['host'] = region.endpoint
+ AWSQueryConnection.__init__(self, **kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def create_stream(self, stream_name, shard_count):
+ """
+ This operation adds a new Amazon Kinesis stream to your AWS
+ account. A stream captures and transports data records that
+ are continuously emitted from different data sources or
+ producers . Scale-out within an Amazon Kinesis stream is
+ explicitly supported by means of shards, which are uniquely
+ identified groups of data records in an Amazon Kinesis stream.
+
+ You specify and control the number of shards that a stream is
+ composed of. Each shard can support up to 5 read transactions
+ per second up to a maximum total of 2 MB of data read per
+ second. Each shard can support up to 1000 write transactions
+ per second up to a maximum total of 1 MB data written per
+ second. You can add shards to a stream if the amount of data
+ input increases and you can remove shards if the amount of
+ data input decreases.
+
+ The stream name identifies the stream. The name is scoped to
+ the AWS account used by the application. It is also scoped by
+ region. That is, two streams in two different accounts can
+ have the same name, and two streams in the same account, but
+ in two different regions, can have the same name.
+
+ `CreateStream` is an asynchronous operation. Upon receiving a
+ `CreateStream` request, Amazon Kinesis immediately returns and
+ sets the stream status to CREATING. After the stream is
+ created, Amazon Kinesis sets the stream status to ACTIVE. You
+ should perform read and write operations only on an ACTIVE
+ stream.
+
+ You receive a `LimitExceededException` when making a
+ `CreateStream` request if you try to do one of the following:
+
+
+ + Have more than five streams in the CREATING state at any
+ point in time.
+ + Create more shards than are authorized for your account.
+
+
+ **Note:** The default limit for an AWS account is two shards
+ per stream. If you need to create a stream with more than two
+ shards, contact AWS Support to increase the limit on your
+ account.
+
+ You can use the `DescribeStream` operation to check the stream
+ status, which is returned in `StreamStatus`.
+
+ `CreateStream` has a limit of 5 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: A name to identify the stream. The stream name is
+ scoped to the AWS account used by the application that creates the
+ stream. It is also scoped by region. That is, two streams in two
+ different AWS accounts can have the same name, and two streams in
+ the same AWS account, but in two different regions, can have the
+ same name.
+
+ :type shard_count: integer
+ :param shard_count: The number of shards that the stream will use. The
+ throughput of the stream is a function of the number of shards;
+ more shards are required for greater provisioned throughput.
+ **Note:** The default limit for an AWS account is two shards per
+ stream. If you need to create a stream with more than two shards,
+ contact AWS Support to increase the limit on your account.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'ShardCount': shard_count,
+ }
+ return self.make_request(action='CreateStream',
+ body=json.dumps(params))
+
+ def delete_stream(self, stream_name):
+ """
+ This operation deletes a stream and all of its shards and
+ data. You must shut down any applications that are operating
+ on the stream before you delete the stream. If an application
+ attempts to operate on a deleted stream, it will receive the
+ exception `ResourceNotFoundException`.
+
+ If the stream is in the ACTIVE state, you can delete it. After
+ a `DeleteStream` request, the specified stream is in the
+ DELETING state until Amazon Kinesis completes the deletion.
+
+ **Note:** Amazon Kinesis might continue to accept data read
+ and write operations, such as PutRecord and GetRecords, on a
+ stream in the DELETING state until the stream deletion is
+ complete.
+
+ When you delete a stream, any shards in that stream are also
+ deleted.
+
+ You can use the DescribeStream operation to check the state of
+ the stream, which is returned in `StreamStatus`.
+
+ `DeleteStream` has a limit of 5 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream to delete.
+
+ """
+ params = {'StreamName': stream_name, }
+ return self.make_request(action='DeleteStream',
+ body=json.dumps(params))
+
+ def describe_stream(self, stream_name, limit=None,
+ exclusive_start_shard_id=None):
+ """
+ This operation returns the following information about the
+ stream: the current status of the stream, the stream Amazon
+ Resource Name (ARN), and an array of shard objects that
+ comprise the stream. For each shard object there is
+ information about the hash key and sequence number ranges that
+ the shard spans, and the IDs of any earlier shards that played
+ in a role in a MergeShards or SplitShard operation that
+ created the shard. A sequence number is the identifier
+ associated with every record ingested in the Amazon Kinesis
+ stream. The sequence number is assigned by the Amazon Kinesis
+ service when a record is put into the stream.
+
+ You can limit the number of returned shards using the `Limit`
+ parameter. The number of shards in a stream may be too large
+ to return from a single call to `DescribeStream`. You can
+ detect this by using the `HasMoreShards` flag in the returned
+ output. `HasMoreShards` is set to `True` when there is more
+ data available.
+
+ If there are more shards available, you can request more
+ shards by using the shard ID of the last shard returned by the
+ `DescribeStream` request, in the `ExclusiveStartShardId`
+ parameter in a subsequent request to `DescribeStream`.
+ `DescribeStream` is a paginated operation.
+
+ `DescribeStream` has a limit of 10 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream to describe.
+
+ :type limit: integer
+ :param limit: The maximum number of shards to return.
+
+ :type exclusive_start_shard_id: string
+ :param exclusive_start_shard_id: The shard ID of the shard to start
+ with for the stream description.
+
+ """
+ params = {'StreamName': stream_name, }
+ if limit is not None:
+ params['Limit'] = limit
+ if exclusive_start_shard_id is not None:
+ params['ExclusiveStartShardId'] = exclusive_start_shard_id
+ return self.make_request(action='DescribeStream',
+ body=json.dumps(params))
+
+ def get_records(self, shard_iterator, limit=None, b64_decode=True):
+ """
+ This operation returns one or more data records from a shard.
+ A `GetRecords` operation request can retrieve up to 10 MB of
+ data.
+
+ You specify a shard iterator for the shard that you want to
+ read data from in the `ShardIterator` parameter. The shard
+ iterator specifies the position in the shard from which you
+ want to start reading data records sequentially. A shard
+ iterator specifies this position using the sequence number of
+ a data record in the shard. For more information about the
+ shard iterator, see GetShardIterator.
+
+ `GetRecords` may return a partial result if the response size
+ limit is exceeded. You will get an error, but not a partial
+ result if the shard's provisioned throughput is exceeded, the
+ shard iterator has expired, or an internal processing failure
+ has occurred. Clients can request a smaller amount of data by
+ specifying a maximum number of returned records using the
+ `Limit` parameter. The `Limit` parameter can be set to an
+ integer value of up to 10,000. If you set the value to an
+ integer greater than 10,000, you will receive
+ `InvalidArgumentException`.
+
+ A new shard iterator is returned by every `GetRecords` request
+ in `NextShardIterator`, which you use in the `ShardIterator`
+ parameter of the next `GetRecords` request. When you
+ repeatedly read from an Amazon Kinesis stream use a
+ GetShardIterator request to get the first shard iterator to
+ use in your first `GetRecords` request and then use the shard
+ iterator returned in `NextShardIterator` for subsequent reads.
+
+ `GetRecords` can return `null` for the `NextShardIterator` to
+ reflect that the shard has been closed and that the requested
+ shard iterator would never have returned more data.
+
+ If no items can be processed because of insufficient
+ provisioned throughput on the shard involved in the request,
+ `GetRecords` throws `ProvisionedThroughputExceededException`.
+
+ :type shard_iterator: string
+ :param shard_iterator: The position in the shard from which you want to
+ start sequentially reading data records.
+
+ :type limit: integer
+ :param limit: The maximum number of records to return, which can be set
+ to a value of up to 10,000.
+
+ :type b64_decode: boolean
+ :param b64_decode: Decode the Base64-encoded ``Data`` field of records.
+
+ """
+ params = {'ShardIterator': shard_iterator, }
+ if limit is not None:
+ params['Limit'] = limit
+
+ response = self.make_request(action='GetRecords',
+ body=json.dumps(params))
+
+ # Base64 decode the data
+ if b64_decode:
+ for record in response.get('Records', []):
+ record['Data'] = base64.b64decode(record['Data'])
+
+ return response
+
+ def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
+ starting_sequence_number=None):
+ """
+ This operation returns a shard iterator in `ShardIterator`.
+ The shard iterator specifies the position in the shard from
+ which you want to start reading data records sequentially. A
+ shard iterator specifies this position using the sequence
+ number of a data record in a shard. A sequence number is the
+ identifier associated with every record ingested in the Amazon
+ Kinesis stream. The sequence number is assigned by the Amazon
+ Kinesis service when a record is put into the stream.
+
+ You must specify the shard iterator type in the
+ `GetShardIterator` request. For example, you can set the
+ `ShardIteratorType` parameter to read exactly from the
+ position denoted by a specific sequence number by using the
+ AT_SEQUENCE_NUMBER shard iterator type, or right after the
+ sequence number by using the AFTER_SEQUENCE_NUMBER shard
+ iterator type, using sequence numbers returned by earlier
+ PutRecord, GetRecords or DescribeStream requests. You can
+ specify the shard iterator type TRIM_HORIZON in the request to
+ cause `ShardIterator` to point to the last untrimmed record in
+ the shard in the system, which is the oldest data record in
+ the shard. Or you can point to just after the most recent
+ record in the shard, by using the shard iterator type LATEST,
+ so that you always read the most recent data in the shard.
+
+ **Note:** Each shard iterator expires five minutes after it is
+ returned to the requester.
+
+ When you repeatedly read from an Amazon Kinesis stream use a
+ GetShardIterator request to get the first shard iterator to to
+ use in your first `GetRecords` request and then use the shard
+ iterator returned by the `GetRecords` request in
+ `NextShardIterator` for subsequent reads. A new shard iterator
+ is returned by every `GetRecords` request in
+ `NextShardIterator`, which you use in the `ShardIterator`
+ parameter of the next `GetRecords` request.
+
+ If a `GetShardIterator` request is made too often, you will
+ receive a `ProvisionedThroughputExceededException`. For more
+ information about throughput limits, see the `Amazon Kinesis
+ Developer Guide`_.
+
+ `GetShardIterator` can return `null` for its `ShardIterator`
+ to indicate that the shard has been closed and that the
+ requested iterator will return no more data. A shard can be
+ closed by a SplitShard or MergeShards operation.
+
+ `GetShardIterator` has a limit of 5 transactions per second
+ per account per shard.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream.
+
+ :type shard_id: string
+ :param shard_id: The shard ID of the shard to get the iterator for.
+
+ :type shard_iterator_type: string
+ :param shard_iterator_type:
+ Determines how the shard iterator is used to start reading data records
+ from the shard.
+
+ The following are the valid shard iterator types:
+
+
+ + AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
+ by a specific sequence number.
+ + AFTER_SEQUENCE_NUMBER - Start reading right after the position
+ denoted by a specific sequence number.
+ + TRIM_HORIZON - Start reading at the last untrimmed record in the
+ shard in the system, which is the oldest data record in the shard.
+ + LATEST - Start reading just after the most recent record in the
+ shard, so that you always read the most recent data in the shard.
+
+ :type starting_sequence_number: string
+ :param starting_sequence_number: The sequence number of the data record
+ in the shard from which to start reading from.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'ShardId': shard_id,
+ 'ShardIteratorType': shard_iterator_type,
+ }
+ if starting_sequence_number is not None:
+ params['StartingSequenceNumber'] = starting_sequence_number
+ return self.make_request(action='GetShardIterator',
+ body=json.dumps(params))
+
+ def list_streams(self, limit=None, exclusive_start_stream_name=None):
+ """
+ This operation returns an array of the names of all the
+ streams that are associated with the AWS account making the
+ `ListStreams` request. A given AWS account can have many
+ streams active at one time.
+
+ The number of streams may be too large to return from a single
+ call to `ListStreams`. You can limit the number of returned
+ streams using the `Limit` parameter. If you do not specify a
+ value for the `Limit` parameter, Amazon Kinesis uses the
+ default limit, which is currently 10.
+
+ You can detect if there are more streams available to list by
+ using the `HasMoreStreams` flag from the returned output. If
+ there are more streams available, you can request more streams
+ by using the name of the last stream returned by the
+ `ListStreams` request in the `ExclusiveStartStreamName`
+ parameter in a subsequent request to `ListStreams`. The group
+ of stream names returned by the subsequent request is then
+ added to the list. You can continue this process until all the
+ stream names have been collected in the list.
+
+ `ListStreams` has a limit of 5 transactions per second per
+ account.
+
+ :type limit: integer
+ :param limit: The maximum number of streams to list.
+
+ :type exclusive_start_stream_name: string
+ :param exclusive_start_stream_name: The name of the stream to start the
+ list with.
+
+ """
+ params = {}
+ if limit is not None:
+ params['Limit'] = limit
+ if exclusive_start_stream_name is not None:
+ params['ExclusiveStartStreamName'] = exclusive_start_stream_name
+ return self.make_request(action='ListStreams',
+ body=json.dumps(params))
+
+ def merge_shards(self, stream_name, shard_to_merge,
+ adjacent_shard_to_merge):
+ """
+ This operation merges two adjacent shards in a stream and
+ combines them into a single shard to reduce the stream's
+ capacity to ingest and transport data. Two shards are
+ considered adjacent if the union of the hash key ranges for
+ the two shards form a contiguous set with no gaps. For
+ example, if you have two shards, one with a hash key range of
+ 276...381 and the other with a hash key range of 382...454,
+ then you could merge these two shards into a single shard that
+ would have a hash key range of 276...454. After the merge, the
+ single child shard receives data for all hash key values
+ covered by the two parent shards.
+
+ `MergeShards` is called when there is a need to reduce the
+ overall capacity of a stream because of excess capacity that
+ is not being used. The operation requires that you specify the
+ shard to be merged and the adjacent shard for a given stream.
+ For more information about merging shards, see the `Amazon
+ Kinesis Developer Guide`_.
+
+ If the stream is in the ACTIVE state, you can call
+ `MergeShards`. If a stream is in CREATING or UPDATING or
+ DELETING states, then Amazon Kinesis returns a
+ `ResourceInUseException`. If the specified stream does not
+ exist, Amazon Kinesis returns a `ResourceNotFoundException`.
+
+ You can use the DescribeStream operation to check the state of
+ the stream, which is returned in `StreamStatus`.
+
+ `MergeShards` is an asynchronous operation. Upon receiving a
+ `MergeShards` request, Amazon Kinesis immediately returns a
+ response and sets the `StreamStatus` to UPDATING. After the
+ operation is completed, Amazon Kinesis sets the `StreamStatus`
+ to ACTIVE. Read and write operations continue to work while
+ the stream is in the UPDATING state.
+
+ You use the DescribeStream operation to determine the shard
+ IDs that are specified in the `MergeShards` request.
+
+ If you try to operate on too many streams in parallel using
+ CreateStream, DeleteStream, `MergeShards` or SplitShard, you
+ will receive a `LimitExceededException`.
+
+ `MergeShards` has limit of 5 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream for the merge.
+
+ :type shard_to_merge: string
+ :param shard_to_merge: The shard ID of the shard to combine with the
+ adjacent shard for the merge.
+
+ :type adjacent_shard_to_merge: string
+ :param adjacent_shard_to_merge: The shard ID of the adjacent shard for
+ the merge.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'ShardToMerge': shard_to_merge,
+ 'AdjacentShardToMerge': adjacent_shard_to_merge,
+ }
+ return self.make_request(action='MergeShards',
+ body=json.dumps(params))
+
+ def put_record(self, stream_name, data, partition_key,
+ explicit_hash_key=None,
+ sequence_number_for_ordering=None,
+ exclusive_minimum_sequence_number=None,
+ b64_encode=True):
+ """
+ This operation puts a data record into an Amazon Kinesis
+ stream from a producer. This operation must be called to send
+ data from the producer into the Amazon Kinesis stream for
+ real-time ingestion and subsequent processing. The `PutRecord`
+ operation requires the name of the stream that captures,
+ stores, and transports the data; a partition key; and the data
+ blob itself. The data blob could be a segment from a log file,
+ geographic/location data, website clickstream data, or any
+ other data type.
+
+ The partition key is used to distribute data across shards.
+ Amazon Kinesis segregates the data records that belong to a
+ data stream into multiple shards, using the partition key
+ associated with each data record to determine which shard a
+ given data record belongs to.
+
+ Partition keys are Unicode strings, with a maximum length
+ limit of 256 bytes. An MD5 hash function is used to map
+ partition keys to 128-bit integer values and to map associated
+ data records to shards using the hash key ranges of the
+ shards. You can override hashing the partition key to
+ determine the shard by explicitly specifying a hash value
+ using the `ExplicitHashKey` parameter. For more information,
+ see the `Amazon Kinesis Developer Guide`_.
+
+ `PutRecord` returns the shard ID of where the data record was
+ placed and the sequence number that was assigned to the data
+ record.
+
+ The `SequenceNumberForOrdering` sets the initial sequence
+ number for the partition key. Later `PutRecord` requests to
+ the same partition key (from the same client) will
+ automatically increase from `SequenceNumberForOrdering`,
+ ensuring strict sequential ordering.
+
+ If a `PutRecord` request cannot be processed because of
+ insufficient provisioned throughput on the shard involved in
+ the request, `PutRecord` throws
+ `ProvisionedThroughputExceededException`.
+
+ Data records are accessible for only 24 hours from the time
+ that they are added to an Amazon Kinesis stream.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream to put the data record into.
+
+ :type data: blob
+ :param data: The data blob to put into the record, which will be Base64
+ encoded. The maximum size of the data blob is 50 kilobytes (KB).
+ Set `b64_encode` to disable automatic Base64 encoding.
+
+ :type partition_key: string
+ :param partition_key: Determines which shard in the stream the data
+ record is assigned to. Partition keys are Unicode strings with a
+ maximum length limit of 256 bytes. Amazon Kinesis uses the
+ partition key as input to a hash function that maps the partition
+ key and associated data to a specific shard. Specifically, an MD5
+ hash function is used to map partition keys to 128-bit integer
+ values and to map associated data records to shards. As a result of
+ this hashing mechanism, all data records with the same partition
+ key will map to the same shard within the stream.
+
+ :type explicit_hash_key: string
+ :param explicit_hash_key: The hash value used to explicitly determine
+ the shard the data record is assigned to by overriding the
+ partition key hash.
+
+ :type sequence_number_for_ordering: string
+ :param sequence_number_for_ordering: The sequence number to use as the
+ initial number for the partition key. Subsequent calls to
+ `PutRecord` from the same client and for the same partition key
+ will increase from the `SequenceNumberForOrdering` value.
+
+ :type b64_encode: boolean
+ :param b64_encode: Whether to Base64 encode `data`. Can be set to
+ ``False`` if `data` is already encoded to prevent double encoding.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'Data': data,
+ 'PartitionKey': partition_key,
+ }
+ if explicit_hash_key is not None:
+ params['ExplicitHashKey'] = explicit_hash_key
+ if sequence_number_for_ordering is not None:
+ params['SequenceNumberForOrdering'] = sequence_number_for_ordering
+ if b64_encode:
+ params['Data'] = base64.b64encode(params['Data'])
+ return self.make_request(action='PutRecord',
+ body=json.dumps(params))
+
+ def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
+ """
+ This operation splits a shard into two new shards in the
+ stream, to increase the stream's capacity to ingest and
+ transport data. `SplitShard` is called when there is a need to
+ increase the overall capacity of stream because of an expected
+ increase in the volume of data records being ingested.
+
+ `SplitShard` can also be used when a given shard appears to be
+ approaching its maximum utilization, for example, when the set
+ of producers sending data into the specific shard are suddenly
+ sending more than previously anticipated. You can also call
+ the `SplitShard` operation to increase stream capacity, so
+ that more Amazon Kinesis applications can simultaneously read
+ data from the stream for real-time processing.
+
+ The `SplitShard` operation requires that you specify the shard
+ to be split and the new hash key, which is the position in the
+ shard where the shard gets split in two. In many cases, the
+ new hash key might simply be the average of the beginning and
+ ending hash key, but it can be any hash key value in the range
+ being mapped into the shard. For more information about
+ splitting shards, see the `Amazon Kinesis Developer Guide`_.
+
+ You can use the DescribeStream operation to determine the
+ shard ID and hash key values for the `ShardToSplit` and
+ `NewStartingHashKey` parameters that are specified in the
+ `SplitShard` request.
+
+ `SplitShard` is an asynchronous operation. Upon receiving a
+ `SplitShard` request, Amazon Kinesis immediately returns a
+ response and sets the stream status to UPDATING. After the
+ operation is completed, Amazon Kinesis sets the stream status
+ to ACTIVE. Read and write operations continue to work while
+ the stream is in the UPDATING state.
+
+ You can use `DescribeStream` to check the status of the
+ stream, which is returned in `StreamStatus`. If the stream is
+ in the ACTIVE state, you can call `SplitShard`. If a stream is
+ in CREATING or UPDATING or DELETING states, then Amazon
+ Kinesis returns a `ResourceInUseException`.
+
+ If the specified stream does not exist, Amazon Kinesis returns
+ a `ResourceNotFoundException`. If you try to create more
+ shards than are authorized for your account, you receive a
+ `LimitExceededException`.
+
+ **Note:** The default limit for an AWS account is two shards
+ per stream. If you need to create a stream with more than two
+ shards, contact AWS Support to increase the limit on your
+ account.
+
+ If you try to operate on too many streams in parallel using
+ CreateStream, DeleteStream, MergeShards or SplitShard, you
+ will receive a `LimitExceededException`.
+
+ `SplitShard` has limit of 5 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream for the shard split.
+
+ :type shard_to_split: string
+ :param shard_to_split: The shard ID of the shard to split.
+
+ :type new_starting_hash_key: string
+ :param new_starting_hash_key: A hash key value for the starting hash
+ key of one of the child shards created by the split. The hash key
+ range for a given shard constitutes a set of ordered contiguous
+ positive integers. The value for `NewStartingHashKey` must be in
+ the range of hash keys being mapped into the shard. The
+ `NewStartingHashKey` hash key value and all higher hash key values
+ in hash key range are distributed to one of the child shards. All
+ the lower hash key values in the range are distributed to the other
+ child shard.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'ShardToSplit': shard_to_split,
+ 'NewStartingHashKey': new_starting_hash_key,
+ }
+ return self.make_request(action='SplitShard',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read()
+ boto.log.debug(response.getheaders())
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/rds/__init__.py b/boto/rds/__init__.py
index b4c9b68c..bfb0b221 100644
--- a/boto/rds/__init__.py
+++ b/boto/rds/__init__.py
@@ -443,7 +443,7 @@ class RDSConnection(AWSQueryConnection):
# Remove any params set to None
for k, v in params.items():
- if not v: del(params[k])
+ if v is None: del(params[k])
return self.get_object('CreateDBInstance', params, DBInstance)
diff --git a/boto/rds/dbinstance.py b/boto/rds/dbinstance.py
index 043052ea..6a638510 100644
--- a/boto/rds/dbinstance.py
+++ b/boto/rds/dbinstance.py
@@ -22,6 +22,7 @@
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.rds.parametergroup import ParameterGroup
from boto.rds.statusinfo import StatusInfo
+from boto.rds.dbsubnetgroup import DBSubnetGroup
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.resultset import ResultSet
@@ -39,6 +40,8 @@ class DBInstance(object):
:ivar engine: The database engine being used
:ivar status: The status of the database in a string. e.g. "available"
:ivar allocated_storage: The size of the disk in gigabytes (int).
+ :ivar auto_minor_version_upgrade: Indicates that minor version patches
+ are applied automatically.
:ivar endpoint: A tuple that describes the hostname and port of
the instance. This is only available when the database is
in status "available".
@@ -75,7 +78,14 @@ class DBInstance(object):
:ivar read_replica_dbinstance_identifiers: List of read replicas
associated with this DB instance.
:ivar status_infos: The status of a Read Replica. If the instance is not a
- for a read replica, this will be blank.
+ for a read replica, this will be blank.
+ :ivar character_set_name: If present, specifies the name of the character
+ set that this instance is associated with.
+ :ivar subnet_group: Specifies information on the subnet group associated
+ with the DB instance, including the name, description, and subnets
+ in the subnet group.
+ :ivar engine_version: Indicates the database engine version.
+ :ivar license_model: License model information for this DB instance.
"""
def __init__(self, connection=None, id=None):
@@ -85,6 +95,7 @@ class DBInstance(object):
self.engine = None
self.status = None
self.allocated_storage = None
+ self.auto_minor_version_upgrade = None
self.endpoint = None
self.instance_class = None
self.master_username = None
@@ -104,6 +115,10 @@ class DBInstance(object):
self._port = None
self._address = None
self.status_infos = None
+ self.character_set_name = None
+ self.subnet_group = None
+ self.engine_version = None
+ self.license_model = None
def __repr__(self):
return 'DBInstance:%s' % self.id
@@ -135,6 +150,9 @@ class DBInstance(object):
('DBInstanceStatusInfo', StatusInfo)
])
return self.status_infos
+ elif name == 'DBSubnetGroup':
+ self.subnet_group = DBSubnetGroup()
+ return self.subnet_group
return None
def endElement(self, name, value, connection):
@@ -150,6 +168,8 @@ class DBInstance(object):
self.status = value
elif name == 'AllocatedStorage':
self.allocated_storage = int(value)
+ elif name == 'AutoMinorVersionUpgrade':
+ self.auto_minor_version_upgrade = value.lower() == 'true'
elif name == 'DBInstanceClass':
self.instance_class = value
elif name == 'MasterUsername':
@@ -166,7 +186,7 @@ class DBInstance(object):
elif name == 'AvailabilityZone':
self.availability_zone = value
elif name == 'BackupRetentionPeriod':
- self.backup_retention_period = value
+ self.backup_retention_period = int(value)
elif name == 'LatestRestorableTime':
self.latest_restorable_time = value
elif name == 'PreferredMaintenanceWindow':
@@ -178,6 +198,12 @@ class DBInstance(object):
self.multi_az = True
elif name == 'Iops':
self.iops = int(value)
+ elif name == 'CharacterSetName':
+ self.character_set_name = value
+ elif name == 'EngineVersion':
+ self.engine_version = value
+ elif name == 'LicenseModel':
+ self.license_model = value
else:
setattr(self, name, value)
@@ -274,7 +300,8 @@ class DBInstance(object):
multi_az=False,
iops=None,
vpc_security_groups=None,
- apply_immediately=False):
+ apply_immediately=False,
+ new_instance_id=None):
"""
Modify this DBInstance.
@@ -317,6 +344,9 @@ class DBInstance(object):
:param apply_immediately: If true, the modifications will be
applied as soon as possible rather than waiting for the
next preferred maintenance window.
+
+ :type new_instance_id: str
+ :param new_instance_id: The new DB instance identifier.
:type backup_retention_period: int
:param backup_retention_period: The number of days for which
@@ -364,7 +394,8 @@ class DBInstance(object):
multi_az,
apply_immediately,
iops,
- vpc_security_groups)
+ vpc_security_groups,
+ new_instance_id)
class PendingModifiedValues(dict):
diff --git a/boto/rds/dbsnapshot.py b/boto/rds/dbsnapshot.py
index acacd73d..16d8125b 100644
--- a/boto/rds/dbsnapshot.py
+++ b/boto/rds/dbsnapshot.py
@@ -25,8 +25,8 @@ class DBSnapshot(object):
Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DBSnapshot.html
- :ivar EngineVersion: Specifies the version of the database engine
- :ivar LicenseModel: License model information for the restored DB instance
+ :ivar engine_version: Specifies the version of the database engine
+ :ivar license_model: License model information for the restored DB instance
:ivar allocated_storage: Specifies the allocated storage size in gigabytes (GB)
:ivar availability_zone: Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot
:ivar connection: boto.rds.RDSConnection associated with the current object
@@ -38,12 +38,19 @@ class DBSnapshot(object):
:ivar port: Specifies the port that the database engine was listening on at the time of the snapshot
:ivar snapshot_create_time: Provides the time (UTC) when the snapshot was taken
:ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-master-credentials ]
+ :ivar iops: Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.
+ :ivar option_group_name: Provides the option group name for the DB snapshot.
+ :ivar percent_progress: The percentage of the estimated data that has been transferred.
+ :ivar snapshot_type: Provides the type of the DB snapshot.
+ :ivar source_region: The region that the DB snapshot was created in or copied from.
+ :ivar vpc_id: Provides the Vpc Id associated with the DB snapshot.
"""
def __init__(self, connection=None, id=None):
self.connection = connection
self.id = id
self.engine = None
+ self.engine_version = None
self.snapshot_create_time = None
self.instance_create_time = None
self.port = None
@@ -53,6 +60,13 @@ class DBSnapshot(object):
self.allocated_storage = None
self.instance_id = None
self.availability_zone = None
+ self.license_model = None
+ self.iops = None
+ self.option_group_name = None
+ self.percent_progress = None
+ self.snapshot_type = None
+ self.source_region = None
+ self.vpc_id = None
def __repr__(self):
return 'DBSnapshot:%s' % self.id
@@ -63,6 +77,8 @@ class DBSnapshot(object):
def endElement(self, name, value, connection):
if name == 'Engine':
self.engine = value
+ elif name == 'EngineVersion':
+ self.engine_version = value
elif name == 'InstanceCreateTime':
self.instance_create_time = value
elif name == 'SnapshotCreateTime':
@@ -83,6 +99,20 @@ class DBSnapshot(object):
self.allocated_storage = int(value)
elif name == 'SnapshotTime':
self.time = value
+ elif name == 'LicenseModel':
+ self.license_model = value
+ elif name == 'Iops':
+ self.iops = int(value)
+ elif name == 'OptionGroupName':
+ self.option_group_name = value
+ elif name == 'PercentProgress':
+ self.percent_progress = int(value)
+ elif name == 'SnapshotType':
+ self.snapshot_type = value
+ elif name == 'SourceRegion':
+ self.source_region = value
+ elif name == 'VpcId':
+ self.vpc_id = value
else:
setattr(self, name, value)
diff --git a/boto/utils.py b/boto/utils.py
index 6d89b21f..56db88e5 100644
--- a/boto/utils.py
+++ b/boto/utils.py
@@ -50,6 +50,7 @@ import logging.handlers
import boto
import boto.provider
import tempfile
+import random
import smtplib
import datetime
import re
@@ -75,6 +76,11 @@ except ImportError:
from boto.compat import json
+try:
+ from boto.compat.json import JSONDecodeError
+except ImportError:
+ JSONDecodeError = ValueError
+
# List of Query String Arguments of Interest
qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging',
'partNumber', 'policy', 'requestPayment', 'torrent',
@@ -89,7 +95,8 @@ qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging',
# having it listed here should cause no problems because
# GET bucket?storageClass is not part of the S3 API.)
'storageClass',
- # websiteConfig is a QSA for buckets in Google Cloud Storage.
+ # websiteConfig is a QSA for buckets in Google Cloud
+ # Storage.
'websiteConfig',
# compose is a QSA for objects in Google Cloud Storage.
'compose']
@@ -117,8 +124,9 @@ def canonical_string(method, path, headers, expires=None,
interesting_headers = {}
for key in headers:
lk = key.lower()
- if headers[key] != None and (lk in ['content-md5', 'content-type', 'date'] or
- lk.startswith(provider.header_prefix)):
+ if headers[key] is not None and \
+ (lk in ['content-md5', 'content-type', 'date'] or
+ lk.startswith(provider.header_prefix)):
interesting_headers[lk] = str(headers[key]).strip()
# these keys get empty strings if they don't exist
@@ -156,7 +164,7 @@ def canonical_string(method, path, headers, expires=None,
qsa = [a.split('=', 1) for a in qsa]
qsa = [unquote_v(a) for a in qsa if a[0] in qsa_of_interest]
if len(qsa) > 0:
- qsa.sort(cmp=lambda x, y:cmp(x[0], y[0]))
+ qsa.sort(cmp=lambda x, y: cmp(x[0], y[0]))
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
@@ -272,15 +280,48 @@ class LazyLoadMetadata(dict):
if key in self._leaves:
resource = self._leaves[key]
- val = boto.utils.retry_url(self._url + urllib.quote(resource,
- safe="/:"),
- num_retries=self._num_retries)
- if val and val[0] == '{':
- val = json.loads(val)
+
+ for i in range(0, self._num_retries):
+ try:
+ val = boto.utils.retry_url(
+ self._url + urllib.quote(resource,
+ safe="/:"),
+ num_retries=self._num_retries)
+ if val and val[0] == '{':
+ val = json.loads(val)
+ break
+ else:
+ p = val.find('\n')
+ if p > 0:
+ val = val.split('\n')
+ break
+
+ except JSONDecodeError, e:
+ boto.log.debug(
+ "encountered '%s' exception: %s" % (
+ e.__class__.__name__, e))
+ boto.log.debug(
+ 'corrupted JSON data found: %s' % val)
+
+ except Exception, e:
+ boto.log.debug("encountered unretryable" +
+ " '%s' exception, re-raising" % (
+ e.__class__.__name__))
+ raise
+
+ boto.log.error("Caught exception reading meta data" +
+ " for the '%s' try" % (i + 1))
+
+ if i + 1 != self._num_retries:
+ next_sleep = random.random() * (2 ** i)
+ time.sleep(next_sleep)
else:
- p = val.find('\n')
- if p > 0:
- val = val.split('\n')
+ boto.log.error('Unable to read meta data, giving up')
+ boto.log.error(
+ "encountered '%s' exception: %s" % (
+ e.__class__.__name__, e))
+ raise
+
self[key] = val
elif key in self._dicts:
self[key] = LazyLoadMetadata(self._url + key + '/',
@@ -315,19 +356,28 @@ def _build_instance_metadata_url(url, version, path):
"""
Builds an EC2 metadata URL for fetching information about an instance.
- Requires the following arguments: a URL, a version and a path.
-
Example:
- >>> _build_instance_metadata_url('http://169.254.169.254', 'latest', 'meta-data')
+ >>> _build_instance_metadata_url('http://169.254.169.254', 'latest', 'meta-data/')
http://169.254.169.254/latest/meta-data/
+ :type url: string
+ :param url: URL to metadata service, e.g. 'http://169.254.169.254'
+
+ :type version: string
+ :param version: Version of the metadata to get, e.g. 'latest'
+
+ :type path: string
+ :param path: Path of the metadata to get, e.g. 'meta-data/'. If a trailing
+ slash is required it must be passed in with the path.
+
+ :return: The full metadata URL
"""
- return '%s/%s/%s/' % (url, version, path)
+ return '%s/%s/%s' % (url, version, path)
def get_instance_metadata(version='latest', url='http://169.254.169.254',
- data='meta-data', timeout=None, num_retries=5):
+ data='meta-data/', timeout=None, num_retries=5):
"""
Returns the instance metadata as a nested Python dictionary.
Simple values (e.g. local_hostname, hostname, etc.) will be
@@ -358,7 +408,8 @@ def get_instance_identity(version='latest', url='http://169.254.169.254',
Returns the instance identity as a nested Python dictionary.
"""
iid = {}
- base_url = _build_instance_metadata_url(url, version, 'dynamic/instance-identity')
+ base_url = _build_instance_metadata_url(url, version,
+ 'dynamic/instance-identity/')
if timeout is not None:
original = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
@@ -396,6 +447,7 @@ ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
RFC1123 = '%a, %d %b %Y %H:%M:%S %Z'
+
def get_ts(ts=None):
if not ts:
ts = time.gmtime()
@@ -415,6 +467,7 @@ def parse_ts(ts):
dt = datetime.datetime.strptime(ts, RFC1123)
return dt
+
def find_class(module_name, class_name=None):
if class_name:
module_name = "%s.%s" % (module_name, class_name)
@@ -444,13 +497,13 @@ def update_dme(username, password, dme_id, ip_address):
def fetch_file(uri, file=None, username=None, password=None):
"""
- Fetch a file based on the URI provided. If you do not pass in a file pointer
- a tempfile.NamedTemporaryFile, or None if the file could not be
- retrieved is returned.
+ Fetch a file based on the URI provided.
+ If you do not pass in a file pointer a tempfile.NamedTemporaryFile,
+ or None if the file could not be retrieved is returned.
The URI can be either an HTTP url, or "s3://bucket_name/key_name"
"""
boto.log.info('Fetching %s' % uri)
- if file == None:
+ if file is None:
file = tempfile.NamedTemporaryFile()
try:
if uri.startswith('s3://'):
@@ -495,7 +548,7 @@ class ShellCommand(object):
stderr=subprocess.PIPE,
cwd=cwd)
if(self.wait):
- while self.process.poll() == None:
+ while self.process.poll() is None:
time.sleep(1)
t = self.process.communicate()
self.log_fp.write(t[0])
@@ -504,7 +557,8 @@ class ShellCommand(object):
self.exit_code = self.process.returncode
if self.fail_fast and self.exit_code != 0:
- raise Exception("Command " + self.command + " failed with status " + self.exit_code)
+ raise Exception("Command " + self.command +
+ " failed with status " + self.exit_code)
return self.exit_code
@@ -514,12 +568,14 @@ class ShellCommand(object):
def getStatus(self):
return self.exit_code
- status = property(getStatus, setReadOnly, None, 'The exit code for the command')
+ status = property(getStatus, setReadOnly, None,
+ 'The exit code for the command')
def getOutput(self):
return self.log_fp.getvalue()
- output = property(getOutput, setReadOnly, None, 'The STDIN and STDERR output of the command')
+ output = property(getOutput, setReadOnly, None,
+ 'The STDIN and STDERR output of the command')
class AuthSMTPHandler(logging.handlers.SMTPHandler):
@@ -565,10 +621,10 @@ class AuthSMTPHandler(logging.handlers.SMTPHandler):
smtp.login(self.username, self.password)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
- self.fromaddr,
- ','.join(self.toaddrs),
- self.getSubject(record),
- email.utils.formatdate(), msg)
+ self.fromaddr,
+ ','.join(self.toaddrs),
+ self.getSubject(record),
+ email.utils.formatdate(), msg)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
@@ -710,7 +766,7 @@ class Password(object):
"""
self.str = str
if hashfunc:
- self.hashfunc = hashfunc
+ self.hashfunc = hashfunc
def set(self, value):
self.str = self.hashfunc(value).hexdigest()
@@ -719,7 +775,7 @@ class Password(object):
return str(self.str)
def __eq__(self, other):
- if other == None:
+ if other is None:
return False
return str(self.hashfunc(other).hexdigest()) == str(self.str)
@@ -734,12 +790,14 @@ def notify(subject, body=None, html_body=None, to_string=None,
attachments=None, append_instance_id=True):
attachments = attachments or []
if append_instance_id:
- subject = "[%s] %s" % (boto.config.get_value("Instance", "instance-id"), subject)
+ subject = "[%s] %s" % (
+ boto.config.get_value("Instance", "instance-id"), subject)
if not to_string:
to_string = boto.config.get_value('Notification', 'smtp_to', None)
if to_string:
try:
- from_string = boto.config.get_value('Notification', 'smtp_from', 'boto')
+ from_string = boto.config.get_value('Notification',
+ 'smtp_from', 'boto')
msg = email.mime.multipart.MIMEMultipart()
msg['From'] = from_string
msg['Reply-To'] = from_string
@@ -759,11 +817,13 @@ def notify(subject, body=None, html_body=None, to_string=None,
for part in attachments:
msg.attach(part)
- smtp_host = boto.config.get_value('Notification', 'smtp_host', 'localhost')
+ smtp_host = boto.config.get_value('Notification',
+ 'smtp_host', 'localhost')
# Alternate port support
if boto.config.get_value("Notification", "smtp_port"):
- server = smtplib.SMTP(smtp_host, int(boto.config.get_value("Notification", "smtp_port")))
+ server = smtplib.SMTP(smtp_host, int(
+ boto.config.get_value("Notification", "smtp_port")))
else:
server = smtplib.SMTP(smtp_host)
@@ -874,7 +934,7 @@ def guess_mime_type(content, deftype):
:rtype: <type>:
:return: <description>
"""
- #Mappings recognized by cloudinit
+ # Mappings recognized by cloudinit
starts_with_mappings = {
'#include': 'text/x-include-url',
'#!': 'text/x-shellscript',
diff --git a/docs/source/index.rst b/docs/source/index.rst
index d6f05db9..c6e7faa0 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -115,6 +115,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.20.0
releasenotes/v2.19.0
releasenotes/v2.18.0
releasenotes/v2.17.0
diff --git a/docs/source/releasenotes/v2.20.0.rst b/docs/source/releasenotes/v2.20.0.rst
new file mode 100644
index 00000000..2700e1b6
--- /dev/null
+++ b/docs/source/releasenotes/v2.20.0.rst
@@ -0,0 +1,31 @@
+boto v2.20.0
+============
+
+:date: 2013/12/12
+
+This release adds support for Amazon Kinesis and AWS Direct Connect. Amazon EC2 gets support for new i2 instance types and is more resilient against metadata failures, Amazon DynamoDB gets support for global secondary indexes and Amazon Relational Database Service (RDS) supports new DBInstance and DBSnapshot attributes. There are several other fixes for various services, including updated support for CloudStack and Eucalyptus.
+
+
+Features
+--------
+* Add support for Amazon Kinesis (:sha:`d0b684e`)
+* Add support for i2 instance types to EC2. (:sha:`0f5371f`)
+* Add support for DynamoDB Global Secondary Indexes (:sha:`297cacb`)
+* Add support for AWS Direct Connect. (:issue:`1894`, :issue:`1894`, :sha:`3cbca26`)
+* Add option for sorting SDB dumps to sdbadmin. (:issue:`1888`, :issue:`1888`, :sha:`070e4f6`)
+* Add a retry when EC2 metadata is returned as corrupt JSON. (:issue:`1883`, :issue:`1883`, :issue:`1868`, :sha:`41470a0`)
+* Added some missing attributes to DBInstance and DBSnapshot. (:issue:`1880`, :issue:`1880`, :sha:`2751dff`)
+
+
+Bugfixes
+--------
+* Implement nonzero for DynamoDB Item to consider empty items falsey (:issue:`1899`, :sha:`808e550`)
+* Remove `dimensions` from Metric.query() docstring. (:issue:`1901`, :issue:`1901`, :sha:`ba6b8c7`)
+* Make trailing slashes for EC2 metadata URLs explicit & remove them from userdata requests. This fixes using boto for CloudStack (:issue:`1900`, :issue:`1900`, :issue:`1897`, :issue:`1856`, :sha:`5f4506e`)
+* Fix the DynamoDB 'scan in' filter to compare the same attribute types in a list rather than using an attribute set. (:issue:`1896`, :issue:`1896`, :sha:`5fc59d6`)
+* Updating Amazon ElastiCache parameters to be optional when creating a new cache cluster. (:issue:`1876`, :issue:`1876`, :sha:`342b8df`)
+* Fix honor cooldown AutoScaling parameter serialization to prevent an exception and bad request. (:issue:`1895`, :issue:`1895`, :issue:`1892`, :sha:`fc4674f`)
+* Fix ignored RDS backup_retention_period when value was 0. (:issue:`1887`, :issue:`1887`, :issue:`1886`, :sha:`a19eb14`)
+* Use auth_handler to specify host header value including custom ports if possible, which are used by Eucalyptus. (:issue:`1862`, :issue:`1862`, :sha:`ce6df03`)
+* Fix documentation of launch config in Autoscaling Group. (:issue:`1881`, :issue:`1881`, :sha:`6f704d9`)
+* typo: AIM -> IAM (:issue:`1882`, :sha:`7ea2d5c`)
diff --git a/setup.py b/setup.py
index 16cbd814..840ddffd 100644
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,8 @@ setup(name = "boto",
"boto.swf", "boto.mws", "boto.cloudsearch", "boto.glacier",
"boto.beanstalk", "boto.datapipeline", "boto.elasticache",
"boto.elastictranscoder", "boto.opsworks", "boto.redshift",
- "boto.dynamodb2", "boto.support", "boto.cloudtrail"],
+ "boto.dynamodb2", "boto.support", "boto.cloudtrail",
+ "boto.directconnect", "boto.kinesis"],
package_data = {"boto.cacerts": ["cacerts.txt"]},
license = "MIT",
platforms = "Posix; MacOS X; Windows",
diff --git a/tests/integration/directconnect/__init__.py b/tests/integration/directconnect/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/directconnect/__init__.py
diff --git a/tests/integration/directconnect/test_directconnect.py b/tests/integration/directconnect/test_directconnect.py
new file mode 100644
index 00000000..aa6567fc
--- /dev/null
+++ b/tests/integration/directconnect/test_directconnect.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import boto
+
+from unittest import TestCase
+
+
+class DirectConnectTest(TestCase):
+ """
+ A very basic test to make sure signatures and
+ basic calls work.
+ """
+ def test_basic(self):
+ conn = boto.connect_directconnect()
+
+ response = conn.describe_connections()
+
+ self.assertTrue(response)
+ self.assertTrue('connections' in response)
+ self.assertIsInstance(response['connections'], list)
diff --git a/tests/integration/kinesis/__init__.py b/tests/integration/kinesis/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/kinesis/__init__.py
diff --git a/tests/integration/kinesis/test_kinesis.py b/tests/integration/kinesis/test_kinesis.py
new file mode 100644
index 00000000..3930f2f7
--- /dev/null
+++ b/tests/integration/kinesis/test_kinesis.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+import time
+
+from unittest import TestCase
+
+
+class TimeoutError(Exception):
+ pass
+
+
+class TestKinesis(TestCase):
+ def setUp(self):
+ self.kinesis = boto.connect_kinesis()
+
+ def tearDown(self):
+ # Delete the stream even if there is a failure
+ self.kinesis.delete_stream('test')
+
+ def test_kinesis(self):
+ kinesis = self.kinesis
+
+ # Create a new stream
+ kinesis.create_stream('test', 1)
+
+ # Wait for the stream to be ready
+ tries = 0
+ while tries < 10:
+ tries += 1
+ time.sleep(15)
+ response = kinesis.describe_stream('test')
+
+ if response['StreamDescription']['StreamStatus'] == 'ACTIVE':
+ shard_id = response['StreamDescription']['Shards'][0]['ShardId']
+ break
+ else:
+ raise TimeoutError('Stream is still not active, aborting...')
+
+ # Get ready to process some data from the stream
+ response = kinesis.get_shard_iterator('test', shard_id, 'TRIM_HORIZON')
+ shard_iterator = response['ShardIterator']
+
+ # Write some data to the stream
+ data = 'Some data ...'
+ response = kinesis.put_record('test', data, data)
+
+ # Wait for the data to show up
+ tries = 0
+ while tries < 100:
+ tries += 1
+ time.sleep(1)
+
+ response = kinesis.get_records(shard_iterator)
+ shard_iterator = response['NextShardIterator']
+
+ if len(response['Records']):
+ break
+ else:
+ raise TimeoutError('No records found, aborting...')
+
+ # Read the data, which should be the same as what we wrote
+ self.assertEqual(1, len(response['Records']))
+ self.assertEqual(data, response['Records'][0]['Data'])
diff --git a/tests/unit/directconnect/__init__.py b/tests/unit/directconnect/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/directconnect/__init__.py
diff --git a/tests/unit/directconnect/test_layer1.py b/tests/unit/directconnect/test_layer1.py
new file mode 100644
index 00000000..c1579d12
--- /dev/null
+++ b/tests/unit/directconnect/test_layer1.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.directconnect.layer1 import DirectConnectConnection
+from tests.unit import AWSMockServiceTestCase
+
+
+class TestDescribeTrails(AWSMockServiceTestCase):
+ connection_class = DirectConnectConnection
+
+ def default_body(self):
+ return '''
+{
+ "connections": [
+ {
+ "bandwidth": "string",
+ "connectionId": "string",
+ "connectionName": "string",
+ "connectionState": "string",
+ "location": "string",
+ "ownerAccount": "string",
+ "partnerName": "string",
+ "region": "string",
+ "vlan": 1
+ }
+ ]
+}'''
+
+ def test_describe(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.describe_connections()
+
+ self.assertEqual(1, len(api_response['connections']))
+ self.assertEqual('string', api_response['connections'][0]['region'])
+
+ self.assert_request_parameters({})
+
+ target = self.actual_request.headers['X-Amz-Target']
+ self.assertTrue('DescribeConnections' in target)
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index df00a352..16b62fd8 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -653,6 +653,10 @@ class ItemTestCase(unittest.TestCase):
date_joined=12345
)
+ def test_nonzero(self):
+ self.assertTrue(self.johndoe)
+ self.assertFalse(self.create_item({}))
+
def fake_results(name, greeting='hello', exclusive_start_key=None, limit=None):
if exclusive_start_key is None:
@@ -1698,7 +1702,12 @@ class TableTestCase(unittest.TestCase):
'ComparisonOperator': 'GE',
},
'age': {
- 'AttributeValueList': [{'NS': ['32', '33', '30', '31']}],
+ 'AttributeValueList': [
+ {'N': '30'},
+ {'N': '31'},
+ {'N': '32'},
+ {'N': '33'},
+ ],
'ComparisonOperator': 'IN',
},
'last_name': {
diff --git a/tests/unit/ec2/autoscale/test_group.py b/tests/unit/ec2/autoscale/test_group.py
index f8021cf0..9c907e6b 100644
--- a/tests/unit/ec2/autoscale/test_group.py
+++ b/tests/unit/ec2/autoscale/test_group.py
@@ -67,6 +67,29 @@ class TestAutoScaleGroup(AWSMockServiceTestCase):
'TerminationPolicies.member.2': 'OldestLaunchConfiguration',
}, ignore_params_values=['Version'])
+
+class TestAutoScaleGroupHonorCooldown(AWSMockServiceTestCase):
+ connection_class = AutoScaleConnection
+
+ def default_body(self):
+ return """
+ <SetDesiredCapacityResponse>
+ <ResponseMetadata>
+ <RequestId>9fb7e2db-6998-11e2-a985-57c82EXAMPLE</RequestId>
+ </ResponseMetadata>
+ </SetDesiredCapacityResponse>
+ """
+
+ def test_honor_cooldown(self):
+ self.set_http_response(status_code=200)
+ self.service_connection.set_desired_capacity('foo', 10, True)
+ self.assert_request_parameters({
+ 'Action': 'SetDesiredCapacity',
+ 'AutoScalingGroupName': 'foo',
+ 'DesiredCapacity': 10,
+ 'HonorCooldown': 'true',
+ }, ignore_params_values=['Version'])
+
class TestScheduledGroup(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
diff --git a/tests/unit/elasticache/__init__.py b/tests/unit/elasticache/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/elasticache/__init__.py
diff --git a/tests/unit/elasticache/test_api_interface.py b/tests/unit/elasticache/test_api_interface.py
new file mode 100644
index 00000000..51432d38
--- /dev/null
+++ b/tests/unit/elasticache/test_api_interface.py
@@ -0,0 +1,20 @@
+from boto.elasticache.layer1 import ElastiCacheConnection
+from tests.unit import AWSMockServiceTestCase
+
+
+class TestAPIInterface(AWSMockServiceTestCase):
+ connection_class = ElastiCacheConnection
+
+ def test_required_launch_params(self):
+ """ Make sure only the AWS required params are required by boto """
+ name = 'test_cache_cluster'
+ self.set_http_response(status_code=200, body='{}')
+ self.service_connection.create_cache_cluster(name)
+
+ self.assert_request_parameters({
+ 'Action': 'CreateCacheCluster',
+ 'CacheClusterId': name,
+ }, ignore_params_values=[
+ 'Version', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion',
+ 'Timestamp', 'ContentType',
+ ])
diff --git a/tests/unit/rds/test_connection.py b/tests/unit/rds/test_connection.py
index b8d012d1..2dea3758 100644
--- a/tests/unit/rds/test_connection.py
+++ b/tests/unit/rds/test_connection.py
@@ -95,7 +95,36 @@ class TestRDSConnection(AWSMockServiceTestCase):
<StatusType>read replication</StatusType>
</DBInstanceStatusInfo>
</StatusInfos>
- </DBInstance>
+ <DBSubnetGroup>
+ <VpcId>990524496922</VpcId>
+ <SubnetGroupStatus>Complete</SubnetGroupStatus>
+ <DBSubnetGroupDescription>My modified DBSubnetGroup</DBSubnetGroupDescription>
+ <DBSubnetGroupName>mydbsubnetgroup</DBSubnetGroupName>
+ <Subnets>
+ <Subnet>
+ <SubnetStatus>Active</SubnetStatus>
+ <SubnetIdentifier>subnet-7c5b4115</SubnetIdentifier>
+ <SubnetAvailabilityZone>
+ <Name>us-east-1c</Name>
+ </SubnetAvailabilityZone>
+ </Subnet>
+ <Subnet>
+ <SubnetStatus>Active</SubnetStatus>
+ <SubnetIdentifier>subnet-7b5b4112</SubnetIdentifier>
+ <SubnetAvailabilityZone>
+ <Name>us-east-1b</Name>
+ </SubnetAvailabilityZone>
+ </Subnet>
+ <Subnet>
+ <SubnetStatus>Active</SubnetStatus>
+ <SubnetIdentifier>subnet-3ea6bd57</SubnetIdentifier>
+ <SubnetAvailabilityZone>
+ <Name>us-east-1d</Name>
+ </SubnetAvailabilityZone>
+ </Subnet>
+ </Subnets>
+ </DBSubnetGroup>
+ </DBInstance>
</DBInstances>
</DescribeDBInstancesResult>
</DescribeDBInstancesResponse>
@@ -121,7 +150,7 @@ class TestRDSConnection(AWSMockServiceTestCase):
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.master_username, 'awsuser')
self.assertEqual(db.availability_zone, 'us-west-2b')
- self.assertEqual(db.backup_retention_period, '1')
+ self.assertEqual(db.backup_retention_period, 1)
self.assertEqual(db.preferred_backup_window, '10:30-11:00')
self.assertEqual(db.preferred_maintenance_window,
'wed:06:30-wed:07:00')
@@ -147,6 +176,10 @@ class TestRDSConnection(AWSMockServiceTestCase):
self.assertEqual(db.status_infos[0].status_type, 'read replication')
self.assertEqual(db.vpc_security_groups[0].status, 'active')
self.assertEqual(db.vpc_security_groups[0].vpc_group, 'sg-1')
+ self.assertEqual(db.license_model, 'general-public-license')
+ self.assertEqual(db.engine_version, '5.5.27')
+ self.assertEqual(db.auto_minor_version_upgrade, True)
+ self.assertEqual(db.subnet_group.name, 'mydbsubnetgroup')
class TestRDSCCreateDBInstance(AWSMockServiceTestCase):
@@ -165,7 +198,7 @@ class TestRDSCCreateDBInstance(AWSMockServiceTestCase):
<PendingModifiedValues>
<MasterUserPassword>****</MasterUserPassword>
</PendingModifiedValues>
- <BackupRetentionPeriod>1</BackupRetentionPeriod>
+ <BackupRetentionPeriod>0</BackupRetentionPeriod>
<MultiAZ>false</MultiAZ>
<LicenseModel>general-public-license</LicenseModel>
<DBSubnetGroup>
@@ -235,12 +268,14 @@ class TestRDSCCreateDBInstance(AWSMockServiceTestCase):
'master',
'Password01',
param_group='default.mysql5.1',
- db_subnet_group_name='dbSubnetgroup01')
+ db_subnet_group_name='dbSubnetgroup01',
+ backup_retention_period=0)
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
+ 'BackupRetentionPeriod': 0,
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
@@ -248,7 +283,7 @@ class TestRDSCCreateDBInstance(AWSMockServiceTestCase):
'Engine': 'MySQL5.1',
'MasterUsername': 'master',
'MasterUserPassword': 'Password01',
- 'Port': 3306,
+ 'Port': 3306
}, ignore_params_values=['Version'])
self.assertEqual(db.id, 'simcoprod01')
@@ -265,6 +300,7 @@ class TestRDSCCreateDBInstance(AWSMockServiceTestCase):
'default.mysql5.1')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
+ self.assertEqual(db.backup_retention_period, 0)
def test_create_db_instance_param_group_instance(self):
self.set_http_response(status_code=200)
diff --git a/tests/unit/rds/test_snapshot.py b/tests/unit/rds/test_snapshot.py
index b350494f..c3c9d8a6 100644
--- a/tests/unit/rds/test_snapshot.py
+++ b/tests/unit/rds/test_snapshot.py
@@ -29,6 +29,10 @@ class TestDescribeDBSnapshots(AWSMockServiceTestCase):
<SnapshotType>manual</SnapshotType>
<MasterUsername>master</MasterUsername>
<OptionGroupName>myoptiongroupname</OptionGroupName>
+ <Iops>1000</Iops>
+ <PercentProgress>100</PercentProgress>
+ <SourceRegion>eu-west-1</SourceRegion>
+ <VpcId>myvpc</VpcId>
</DBSnapshot>
<DBSnapshot>
<Port>3306</Port>
@@ -45,6 +49,7 @@ class TestDescribeDBSnapshots(AWSMockServiceTestCase):
<SnapshotType>manual</SnapshotType>
<MasterUsername>sa</MasterUsername>
<OptionGroupName>myoptiongroupname</OptionGroupName>
+ <Iops>1000</Iops>
</DBSnapshot>
<DBSnapshot>
<Port>3306</Port>
@@ -61,6 +66,7 @@ class TestDescribeDBSnapshots(AWSMockServiceTestCase):
<SnapshotType>automated</SnapshotType>
<MasterUsername>master</MasterUsername>
<OptionGroupName>myoptiongroupname</OptionGroupName>
+ <Iops>1000</Iops>
</DBSnapshot>
</DBSnapshots>
</DescribeDBSnapshotsResult>
@@ -82,6 +88,15 @@ class TestDescribeDBSnapshots(AWSMockServiceTestCase):
self.assertEqual(response[0].id, 'mydbsnapshot')
self.assertEqual(response[0].status, 'available')
self.assertEqual(response[0].instance_id, 'simcoprod01')
+ self.assertEqual(response[0].engine_version, '5.1.50')
+ self.assertEqual(response[0].license_model, 'general-public-license')
+ self.assertEqual(response[0].iops, 1000)
+ self.assertEqual(response[0].option_group_name, 'myoptiongroupname')
+ self.assertEqual(response[0].percent_progress, 100)
+ self.assertEqual(response[0].snapshot_type, 'manual')
+ self.assertEqual(response[0].source_region, 'eu-west-1')
+ self.assertEqual(response[0].vpc_id, 'myvpc')
+
class TestCreateDBSnapshot(AWSMockServiceTestCase):
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index dfb4ee27..6f3a6053 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -142,6 +142,38 @@ class TestAWSAuthConnection(unittest.TestCase):
self.assertEqual(conn.proxy_port, 8180)
del os.environ['http_proxy']
+ # this tests the proper setting of the host_header in v4 signing
+ def test_host_header_with_nonstandard_port(self):
+ # test standard port first
+ conn = V4AuthConnection(
+ 'testhost',
+ aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+ request = conn.build_base_http_request(method='POST', path='/',
+ auth_path=None, params=None, headers=None, data='', host=None)
+ conn.set_host_header(request)
+ self.assertEqual(request.headers['Host'], 'testhost')
+
+ # next, test non-standard port
+ conn = V4AuthConnection(
+ 'testhost',
+ aws_access_key_id='access_key',
+ aws_secret_access_key='secret',
+ port=8773)
+ request = conn.build_base_http_request(method='POST', path='/',
+ auth_path=None, params=None, headers=None, data='', host=None)
+ conn.set_host_header(request)
+ self.assertEqual(request.headers['Host'], 'testhost:8773')
+
+class V4AuthConnection(AWSAuthConnection):
+ def __init__(self, host, aws_access_key_id, aws_secret_access_key, port=443):
+ AWSAuthConnection.__init__(self, host, aws_access_key_id,
+ aws_secret_access_key, port=port)
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+
class TestAWSQueryConnection(unittest.TestCase):
def setUp(self):
self.region = RegionInfo(name='cc-zone-1',
diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py
index abb85353..d15fd117 100644
--- a/tests/unit/utils/test_utils.py
+++ b/tests/unit/utils/test_utils.py
@@ -19,17 +19,25 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-import unittest
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+
import hashlib
import hmac
import mock
+import boto.utils
from boto.utils import Password
from boto.utils import pythonize_name
from boto.utils import _build_instance_metadata_url
+from boto.utils import get_instance_userdata
from boto.utils import retry_url
+from boto.utils import LazyLoadMetadata
+from boto.compat import json
class TestPassword(unittest.TestCase):
"""Test basic password functionality"""
@@ -115,7 +123,7 @@ class TestBuildInstanceMetadataURL(unittest.TestCase):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'latest',
- 'meta-data'
+ 'meta-data/'
),
'http://169.254.169.254/latest/meta-data/'
)
@@ -124,7 +132,7 @@ class TestBuildInstanceMetadataURL(unittest.TestCase):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'latest',
- 'dynamic'
+ 'dynamic/'
),
'http://169.254.169.254/latest/dynamic/'
)
@@ -133,7 +141,7 @@ class TestBuildInstanceMetadataURL(unittest.TestCase):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'1.0',
- 'meta-data'
+ 'meta-data/'
),
'http://169.254.169.254/1.0/meta-data/'
)
@@ -142,7 +150,7 @@ class TestBuildInstanceMetadataURL(unittest.TestCase):
self.assertEqual(_build_instance_metadata_url(
'http://10.0.1.5',
'latest',
- 'meta-data'
+ 'meta-data/'
),
'http://10.0.1.5/latest/meta-data/'
)
@@ -153,10 +161,9 @@ class TestBuildInstanceMetadataURL(unittest.TestCase):
'2013-03-22',
'user-data'
),
- 'http://10.0.1.5/2013-03-22/user-data/'
+ 'http://10.0.1.5/2013-03-22/user-data'
)
-
class TestRetryURL(unittest.TestCase):
def setUp(self):
self.urlopen_patch = mock.patch('urllib2.urlopen')
@@ -185,6 +192,56 @@ class TestRetryURL(unittest.TestCase):
response = retry_url('http://10.10.10.10/foo', num_retries=1)
self.assertEqual(response, 'no proxy response')
+class TestLazyLoadMetadata(unittest.TestCase):
+
+ def setUp(self):
+ self.retry_url_patch = mock.patch('boto.utils.retry_url')
+ boto.utils.retry_url = self.retry_url_patch.start()
+
+ def tearDown(self):
+ self.retry_url_patch.stop()
+
+ def set_normal_response(self, data):
+ # here "data" should be a list of return values in some order
+ fake_response = mock.Mock()
+ fake_response.side_effect = data
+ boto.utils.retry_url = fake_response
+
+ def test_meta_data_with_invalid_json_format_happened_once(self):
+ # here "key_data" will be stored in the "self._leaves"
+ # when the class "LazyLoadMetadata" initialized
+ key_data = "test"
+ invalid_data = '{"invalid_json_format" : true,}'
+ valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data
+ url = "/".join(["http://169.254.169.254", key_data])
+ num_retries = 2
+
+ self.set_normal_response([key_data, invalid_data, valid_data])
+ response = LazyLoadMetadata(url, num_retries)
+ self.assertEqual(response.values()[0], json.loads(valid_data))
+
+ def test_meta_data_with_invalid_json_format_happened_twice(self):
+ key_data = "test"
+ invalid_data = '{"invalid_json_format" : true,}'
+ valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data
+ url = "/".join(["http://169.254.169.254", key_data])
+ num_retries = 2
+
+ self.set_normal_response([key_data, invalid_data, invalid_data])
+ response = LazyLoadMetadata(url, num_retries)
+ with self.assertRaises(ValueError):
+ response.values()[0]
+
+ def test_user_data(self):
+ self.set_normal_response(['foo'])
+
+ userdata = get_instance_userdata()
+
+ self.assertEqual('foo', userdata)
+
+ boto.utils.retry_url.assert_called_with(
+ 'http://169.254.169.254/latest/user-data',
+ retry_on_404=False)
if __name__ == '__main__':
unittest.main()