summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.rst4
-rw-r--r--boto/__init__.py2
-rw-r--r--boto/cloudfront/distribution.py12
-rw-r--r--boto/cloudfront/identity.py2
-rw-r--r--boto/cloudfront/invalidation.py2
-rw-r--r--boto/cloudsearch/layer1.py2
-rw-r--r--boto/connection.py10
-rw-r--r--boto/core/dictresponse.py2
-rw-r--r--boto/dynamodb/item.py6
-rw-r--r--boto/dynamodb/types.py3
-rw-r--r--boto/dynamodb2/table.py59
-rw-r--r--boto/ec2/cloudwatch/__init__.py14
-rw-r--r--boto/ec2/connection.py94
-rw-r--r--boto/ec2/elb/loadbalancer.py2
-rw-r--r--boto/ecs/__init__.py2
-rw-r--r--boto/ecs/item.py6
-rw-r--r--boto/glacier/concurrent.py2
-rw-r--r--boto/glacier/job.py29
-rw-r--r--boto/glacier/layer1.py1082
-rw-r--r--boto/glacier/vault.py41
-rw-r--r--boto/gs/key.py18
-rw-r--r--boto/gs/resumable_upload_handler.py12
-rw-r--r--boto/handler.py2
-rw-r--r--boto/jsonresponse.py2
-rw-r--r--boto/manage/cmdshell.py2
-rw-r--r--boto/manage/server.py2
-rw-r--r--boto/manage/task.py2
-rw-r--r--boto/manage/volume.py16
-rw-r--r--boto/mashups/order.py2
-rw-r--r--boto/mturk/layoutparam.py2
-rw-r--r--boto/mturk/qualification.py2
-rw-r--r--boto/mws/connection.py33
-rw-r--r--boto/provider.py26
-rw-r--r--boto/pyami/config.py2
-rw-r--r--boto/pyami/installers/ubuntu/ebs.py2
-rw-r--r--boto/rds/__init__.py10
-rw-r--r--boto/rds/dbsubnetgroup.py2
-rw-r--r--boto/rds/parametergroup.py2
-rw-r--r--boto/route53/record.py14
-rw-r--r--boto/s3/connection.py4
-rw-r--r--boto/s3/key.py22
-rw-r--r--boto/s3/resumable_download_handler.py2
-rw-r--r--boto/sdb/connection.py2
-rw-r--r--boto/sdb/db/key.py2
-rw-r--r--boto/sdb/db/manager/sdbmanager.py18
-rw-r--r--boto/sdb/db/manager/xmlmanager.py6
-rw-r--r--boto/sdb/db/model.py2
-rw-r--r--boto/sdb/db/property.py16
-rw-r--r--boto/sdb/db/query.py2
-rw-r--r--boto/sdb/db/sequence.py18
-rw-r--r--boto/sdb/item.py2
-rw-r--r--boto/sdb/queryresultset.py6
-rw-r--r--boto/services/message.py2
-rw-r--r--boto/sqs/message.py2
-rw-r--r--boto/support/layer1.py422
-rw-r--r--docs/source/index.rst1
-rw-r--r--docs/source/ref/autoscale.rst8
-rw-r--r--docs/source/releasenotes/v2.23.0.rst49
-rwxr-xr-xscripts/git-release-notes.py5
-rw-r--r--tests/integration/__init__.py20
-rw-r--r--tests/integration/dynamodb2/test_highlevel.py10
-rw-r--r--tests/unit/dynamodb/test_types.py12
-rw-r--r--tests/unit/dynamodb2/test_table.py78
-rw-r--r--tests/unit/ec2/test_connection.py45
-rw-r--r--tests/unit/glacier/test_job.py22
-rw-r--r--tests/unit/glacier/test_layer2.py17
-rwxr-xr-xtests/unit/mws/test_connection.py29
-rw-r--r--tests/unit/provider/test_provider.py38
68 files changed, 1769 insertions, 622 deletions
diff --git a/README.rst b/README.rst
index 958abfdd..e386076b 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.22.1
+boto 2.23.0
-Released: 6-January-2014
+Released: 10-January-2014
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
diff --git a/boto/__init__.py b/boto/__init__.py
index d2ae4e58..a4d6c35f 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -37,7 +37,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.22.1'
+__version__ = '2.23.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
diff --git a/boto/cloudfront/distribution.py b/boto/cloudfront/distribution.py
index 9992ab6f..7664bdbf 100644
--- a/boto/cloudfront/distribution.py
+++ b/boto/cloudfront/distribution.py
@@ -350,11 +350,11 @@ class Distribution(object):
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
- if enabled != None:
+ if enabled is not None:
new_config.enabled = enabled
- if cnames != None:
+ if cnames is not None:
new_config.cnames = cnames
- if comment != None:
+ if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
@@ -730,11 +730,11 @@ class StreamingDistribution(Distribution):
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
- if enabled != None:
+ if enabled is not None:
new_config.enabled = enabled
- if cnames != None:
+ if cnames is not None:
new_config.cnames = cnames
- if comment != None:
+ if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
diff --git a/boto/cloudfront/identity.py b/boto/cloudfront/identity.py
index 123773d1..de79c8ac 100644
--- a/boto/cloudfront/identity.py
+++ b/boto/cloudfront/identity.py
@@ -52,7 +52,7 @@ class OriginAccessIdentity(object):
new_config = OriginAccessIdentityConfig(self.connection,
self.config.caller_reference,
self.config.comment)
- if comment != None:
+ if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
self.config = new_config
diff --git a/boto/cloudfront/invalidation.py b/boto/cloudfront/invalidation.py
index dcc3c4c5..91ba89d9 100644
--- a/boto/cloudfront/invalidation.py
+++ b/boto/cloudfront/invalidation.py
@@ -75,7 +75,7 @@ class InvalidationBatch(object):
def to_xml(self):
"""Get this batch as XML"""
- assert self.connection != None
+ assert self.connection is not None
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<InvalidationBatch xmlns="http://cloudfront.amazonaws.com/doc/%s/">\n' % self.connection.Version
for p in self.paths:
diff --git a/boto/cloudsearch/layer1.py b/boto/cloudsearch/layer1.py
index 4ca763e9..1e0e7f99 100644
--- a/boto/cloudsearch/layer1.py
+++ b/boto/cloudsearch/layer1.py
@@ -88,7 +88,7 @@ class Layer1(AWSQueryConnection):
for p in doc_path:
inner = inner.get(p)
if not inner:
- return None if list_marker == None else []
+ return None if list_marker is None else []
if isinstance(inner, list):
return inner
else:
diff --git a/boto/connection.py b/boto/connection.py
index 592a0098..9f8cb019 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -434,6 +434,10 @@ class AWSAuthConnection(object):
:keyword str aws_secret_access_key: Your AWS Secret Access Key
(provided by Amazon). If none is specified, the value in your
``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
+ :keyword str security_token: The security token associated with
+ temporary credentials issued by STS. Optional unless using
+ temporary credentials. If none is specified, the environment
+ variable ``AWS_SECURITY_TOKEN`` is used if defined.
:type is_secure: boolean
:param is_secure: Whether the connection is over SSL
@@ -680,7 +684,7 @@ class AWSAuthConnection(object):
self.proxy_port = self.port
self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
- self.use_proxy = (self.proxy != None)
+ self.use_proxy = (self.proxy is not None)
def get_http_connection(self, host, port, is_secure):
conn = self._pool.get_http_connection(host, port, is_secure)
@@ -982,11 +986,11 @@ class AWSAuthConnection(object):
path = self.get_path(path)
if auth_path is not None:
auth_path = self.get_path(auth_path)
- if params == None:
+ if params is None:
params = {}
else:
params = params.copy()
- if headers == None:
+ if headers is None:
headers = {}
else:
headers = headers.copy()
diff --git a/boto/core/dictresponse.py b/boto/core/dictresponse.py
index 35188349..3730cf0e 100644
--- a/boto/core/dictresponse.py
+++ b/boto/core/dictresponse.py
@@ -47,7 +47,7 @@ class XmlHandler(xml.sax.ContentHandler):
def startElement(self, name, attrs):
self.current_text = ''
t = self.nodes[-1][1].startElement(name, attrs, self.connection)
- if t != None:
+ if t is not None:
if isinstance(t, tuple):
self.nodes.append(t)
else:
diff --git a/boto/dynamodb/item.py b/boto/dynamodb/item.py
index 9d929096..9dcbad06 100644
--- a/boto/dynamodb/item.py
+++ b/boto/dynamodb/item.py
@@ -41,13 +41,13 @@ class Item(dict):
self._updates = None
self._hash_key_name = self.table.schema.hash_key_name
self._range_key_name = self.table.schema.range_key_name
- if attrs == None:
+ if attrs is None:
attrs = {}
- if hash_key == None:
+ if hash_key is None:
hash_key = attrs.get(self._hash_key_name, None)
self[self._hash_key_name] = hash_key
if self._range_key_name:
- if range_key == None:
+ if range_key is None:
range_key = attrs.get(self._range_key_name, None)
self[self._range_key_name] = range_key
self._updates = {}
diff --git a/boto/dynamodb/types.py b/boto/dynamodb/types.py
index 987e0d0f..4c3270ba 100644
--- a/boto/dynamodb/types.py
+++ b/boto/dynamodb/types.py
@@ -136,6 +136,9 @@ def dynamize_value(val):
class Binary(object):
def __init__(self, value):
+ if not isinstance(value, basestring):
+ raise TypeError('Value must be a string of binary data!')
+
self.value = value
def encode(self):
diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py
index 06805753..a5db6152 100644
--- a/boto/dynamodb2/table.py
+++ b/boto/dynamodb2/table.py
@@ -8,6 +8,7 @@ from boto.dynamodb2.items import Item
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.results import ResultSet, BatchGetResultSet
from boto.dynamodb2.types import Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS
+from boto.exception import JSONResponseError
class Table(object):
@@ -436,7 +437,7 @@ class Table(object):
return raw_key
- def get_item(self, consistent=False, **kwargs):
+ def get_item(self, consistent=False, attributes=None, **kwargs):
"""
Fetches an item (record) from a table in DynamoDB.
@@ -448,6 +449,10 @@ class Table(object):
a consistent (but more expensive) read from DynamoDB.
(Default: ``False``)
+ Optionally accepts an ``attributes`` parameter, which should be a
+ list of fieldname to fetch. (Default: ``None``, which means all fields
+ should be fetched)
+
Returns an ``Item`` instance containing all the data for that record.
Example::
@@ -480,12 +485,52 @@ class Table(object):
item_data = self.connection.get_item(
self.table_name,
raw_key,
+ attributes_to_get=attributes,
consistent_read=consistent
)
item = Item(self)
item.load(item_data)
return item
+ def has_item(self, **kwargs):
+ """
+ Return whether an item (record) exists within a table in DynamoDB.
+
+ To specify the key of the item you'd like to get, you can specify the
+ key attributes as kwargs.
+
+ Optionally accepts a ``consistent`` parameter, which should be a
+ boolean. If you provide ``True``, it will perform
+ a consistent (but more expensive) read from DynamoDB.
+ (Default: ``False``)
+
+ Optionally accepts an ``attributes`` parameter, which should be a
+ list of fieldnames to fetch. (Default: ``None``, which means all fields
+ should be fetched)
+
+ Returns ``True`` if an ``Item`` is present, ``False`` if not.
+
+ Example::
+
+ # Simple, just hash-key schema.
+ >>> users.has_item(username='johndoe')
+ True
+
+ # Complex schema, item not present.
+ >>> users.has_item(
+ ... username='johndoe',
+ ... date_joined='2014-01-07'
+ ... )
+ False
+
+ """
+ try:
+ self.get_item(**kwargs)
+ except JSONResponseError:
+ return False
+
+ return True
+
def lookup(self, *args, **kwargs):
"""
Look up an entry in DynamoDB. This is mostly backwards compatible
@@ -524,7 +569,6 @@ class Table(object):
data[self.schema[x].name] = arg
return Item(self, data=data)
-
def put_item(self, data, overwrite=False):
"""
Saves an entire item to DynamoDB.
@@ -969,7 +1013,7 @@ class Table(object):
}
def scan(self, limit=None, segment=None, total_segments=None,
- max_page_size=None, **filter_kwargs):
+ max_page_size=None, attributes=None, **filter_kwargs):
"""
Scans across all items within a DynamoDB table.
@@ -1000,6 +1044,11 @@ class Table(object):
the scan from drowning out other queries. (Default: ``None`` -
fetch as many as DynamoDB will return)
+ Optionally accepts an ``attributes`` parameter, which should be a
+ tuple. If you provide any attributes only these will be fetched
+ from DynamoDB. This uses the ``AttributesToGet`` and set's
+ ``Select`` to ``SPECIFIC_ATTRIBUTES`` API.
+
Returns a ``ResultSet``, which transparently handles the pagination of
results you get back.
@@ -1034,12 +1083,13 @@ class Table(object):
'limit': limit,
'segment': segment,
'total_segments': total_segments,
+ 'attributes': attributes,
})
results.to_call(self._scan, **kwargs)
return results
def _scan(self, limit=None, exclusive_start_key=None, segment=None,
- total_segments=None, **filter_kwargs):
+ total_segments=None, attributes=None, **filter_kwargs):
"""
The internal method that performs the actual scan. Used extensively
by ``ResultSet`` to perform each (paginated) request.
@@ -1048,6 +1098,7 @@ class Table(object):
'limit': limit,
'segment': segment,
'total_segments': total_segments,
+ 'attributes_to_get': attributes,
}
if exclusive_start_key:
diff --git a/boto/ec2/cloudwatch/__init__.py b/boto/ec2/cloudwatch/__init__.py
index 6bfcdbf3..41417570 100644
--- a/boto/ec2/cloudwatch/__init__.py
+++ b/boto/ec2/cloudwatch/__init__.py
@@ -178,11 +178,11 @@ class CloudWatchConnection(AWSQueryConnection):
metric_data['StatisticValues.Minimum'] = s['minimum']
metric_data['StatisticValues.SampleCount'] = s['samplecount']
metric_data['StatisticValues.Sum'] = s['sum']
- if value != None:
+ if value is not None:
msg = 'You supplied a value and statistics for a ' + \
'metric.Posting statistics and not value.'
boto.log.warn(msg)
- elif value != None:
+ elif value is not None:
metric_data['Value'] = v
else:
raise Exception('Must specify a value or statistics to put.')
@@ -273,9 +273,13 @@ class CloudWatchConnection(AWSQueryConnection):
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
- name that you want to filter on, a list of values to
- filter on or None if you want all metrics with that
- Dimension name.
+ name that you want to filter on or None if you want all
+ metrics with that Dimension name. To be included in the
+ result a metric must contain all specified dimensions,
+ although the metric may contain additional dimensions beyond
+ the requested metrics. The Dimension names, and values must
+ be strings between 1 and 250 characters long. A maximum of
+ 10 dimensions are allowed.
:type metric_name: str
:param metric_name: The name of the Metric to filter against. If None,
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index 4fb8842d..4afe2482 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -1832,6 +1832,36 @@ class EC2Connection(AWSQueryConnection):
return self.get_status('AssignPrivateIpAddresses', params, verb='POST')
+ def _associate_address(self, status, instance_id=None, public_ip=None,
+ allocation_id=None, network_interface_id=None,
+ private_ip_address=None, allow_reassociation=False,
+ dry_run=False):
+ params = {}
+ if instance_id is not None:
+ params['InstanceId'] = instance_id
+ elif network_interface_id is not None:
+ params['NetworkInterfaceId'] = network_interface_id
+
+ if public_ip is not None:
+ params['PublicIp'] = public_ip
+ elif allocation_id is not None:
+ params['AllocationId'] = allocation_id
+
+ if private_ip_address is not None:
+ params['PrivateIpAddress'] = private_ip_address
+
+ if allow_reassociation:
+ params['AllowReassociation'] = 'true'
+
+ if dry_run:
+ params['DryRun'] = 'true'
+
+ if status:
+ return self.get_status('AssociateAddress', params, verb='POST')
+ else:
+ return self.get_object('AssociateAddress', params, Address,
+ verb='POST')
+
def associate_address(self, instance_id=None, public_ip=None,
allocation_id=None, network_interface_id=None,
private_ip_address=None, allow_reassociation=False,
@@ -1874,27 +1904,59 @@ class EC2Connection(AWSQueryConnection):
:rtype: bool
:return: True if successful
"""
- params = {}
- if instance_id is not None:
- params['InstanceId'] = instance_id
- elif network_interface_id is not None:
- params['NetworkInterfaceId'] = network_interface_id
+ return self._associate_address(True, instance_id=instance_id,
+ public_ip=public_ip, allocation_id=allocation_id,
+ network_interface_id=network_interface_id,
+ private_ip_address=private_ip_address,
+ allow_reassociation=allow_reassociation, dry_run=dry_run)
- if public_ip is not None:
- params['PublicIp'] = public_ip
- elif allocation_id is not None:
- params['AllocationId'] = allocation_id
+ def associate_address_object(self, instance_id=None, public_ip=None,
+ allocation_id=None, network_interface_id=None,
+ private_ip_address=None, allow_reassociation=False,
+ dry_run=False):
+ """
+ Associate an Elastic IP address with a currently running instance.
+ This requires one of ``public_ip`` or ``allocation_id`` depending
+ on if you're associating a VPC address or a plain EC2 address.
- if private_ip_address is not None:
- params['PrivateIpAddress'] = private_ip_address
+ When using an Allocation ID, make sure to pass ``None`` for ``public_ip``
+ as EC2 expects a single parameter and if ``public_ip`` is passed boto
+ will preference that instead of ``allocation_id``.
- if allow_reassociation:
- params['AllowReassociation'] = 'true'
+ :type instance_id: string
+ :param instance_id: The ID of the instance
- if dry_run:
- params['DryRun'] = 'true'
+ :type public_ip: string
+ :param public_ip: The public IP address for EC2 based allocations.
+
+ :type allocation_id: string
+ :param allocation_id: The allocation ID for a VPC-based elastic IP.
+
+ :type network_interface_id: string
+ :param network_interface_id: The network interface ID to which
+ elastic IP is to be assigned to
- return self.get_status('AssociateAddress', params, verb='POST')
+ :type private_ip_address: string
+ :param private_ip_address: The primary or secondary private IP address
+ to associate with the Elastic IP address.
+
+ :type allow_reassociation: bool
+ :param allow_reassociation: Specify this option to allow an Elastic IP
+ address that is already associated with another network interface
+ or instance to be re-associated with the specified instance or
+ interface.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: class:`boto.ec2.address.Address`
+ :return: The associated address instance
+ """
+ return self._associate_address(False, instance_id=instance_id,
+ public_ip=public_ip, allocation_id=allocation_id,
+ network_interface_id=network_interface_id,
+ private_ip_address=private_ip_address,
+ allow_reassociation=allow_reassociation, dry_run=dry_run)
def disassociate_address(self, public_ip=None, association_id=None,
dry_run=False):
diff --git a/boto/ec2/elb/loadbalancer.py b/boto/ec2/elb/loadbalancer.py
index ce3ee66d..f76feb15 100644
--- a/boto/ec2/elb/loadbalancer.py
+++ b/boto/ec2/elb/loadbalancer.py
@@ -324,7 +324,7 @@ class LoadBalancer(object):
listeners)
def create_listener(self, inPort, outPort=None, proto="tcp"):
- if outPort == None:
+ if outPort is None:
outPort = inPort
return self.create_listeners([(inPort, outPort, proto)])
diff --git a/boto/ecs/__init__.py b/boto/ecs/__init__.py
index f39ec5a5..96d4b670 100644
--- a/boto/ecs/__init__.py
+++ b/boto/ecs/__init__.py
@@ -66,7 +66,7 @@ class ECSConnection(AWSQueryConnection):
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
- if itemSet == None:
+ if itemSet is None:
rs = ItemSet(self, action, params, page)
else:
rs = itemSet
diff --git a/boto/ecs/item.py b/boto/ecs/item.py
index d0cdb990..4349e41e 100644
--- a/boto/ecs/item.py
+++ b/boto/ecs/item.py
@@ -110,7 +110,7 @@ class ItemSet(ResponseGroup):
def startElement(self, name, attrs, connection):
if name == "Item":
self.curItem = Item(self._connection)
- elif self.curItem != None:
+ elif self.curItem is not None:
self.curItem.startElement(name, attrs, connection)
return None
@@ -123,13 +123,13 @@ class ItemSet(ResponseGroup):
self.objs.append(self.curItem)
self._xml.write(self.curItem.to_xml())
self.curItem = None
- elif self.curItem != None:
+ elif self.curItem is not None:
self.curItem.endElement(name, value, connection)
return None
def next(self):
"""Special paging functionality"""
- if self.iter == None:
+ if self.iter is None:
self.iter = iter(self.objs)
try:
return self.iter.next()
diff --git a/boto/glacier/concurrent.py b/boto/glacier/concurrent.py
index af727ec2..dc540819 100644
--- a/boto/glacier/concurrent.py
+++ b/boto/glacier/concurrent.py
@@ -19,6 +19,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+from __future__ import with_statement
+
import os
import math
import threading
diff --git a/boto/glacier/job.py b/boto/glacier/job.py
index c7401741..d26d6b40 100644
--- a/boto/glacier/job.py
+++ b/boto/glacier/job.py
@@ -97,9 +97,12 @@ class Job(object):
actual_tree_hash, response['TreeHash'], byte_range))
return response
+ def _calc_num_chunks(self, chunk_size):
+ return int(math.ceil(self.archive_size / float(chunk_size)))
+
def download_to_file(self, filename, chunk_size=DefaultPartSize,
verify_hashes=True, retry_exceptions=(socket.error,)):
- """Download an archive to a file.
+ """Download an archive to a file by name.
:type filename: str
:param filename: The name of the file where the archive
@@ -114,11 +117,33 @@ class Job(object):
the tree hashes for each downloaded chunk.
"""
- num_chunks = int(math.ceil(self.archive_size / float(chunk_size)))
+ num_chunks = self._calc_num_chunks(chunk_size)
with open(filename, 'wb') as output_file:
self._download_to_fileob(output_file, num_chunks, chunk_size,
verify_hashes, retry_exceptions)
+ def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize,
+ verify_hashes=True,
+ retry_exceptions=(socket.error,)):
+ """Download an archive to a file object.
+
+ :type output_file: file
+ :param output_file: The file object where the archive
+ contents will be saved.
+
+ :type chunk_size: int
+ :param chunk_size: The chunk size to use when downloading
+ the archive.
+
+ :type verify_hashes: bool
+ :param verify_hashes: Indicates whether or not to verify
+ the tree hashes for each downloaded chunk.
+
+ """
+ num_chunks = self._calc_num_chunks(chunk_size)
+ self._download_to_fileob(output_file, num_chunks, chunk_size,
+ verify_hashes, retry_exceptions)
+
def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes,
retry_exceptions):
for i in xrange(num_chunks):
diff --git a/boto/glacier/layer1.py b/boto/glacier/layer1.py
index f6b05588..716cea37 100644
--- a/boto/glacier/layer1.py
+++ b/boto/glacier/layer1.py
@@ -33,9 +33,42 @@ from .utils import ResettingFileSender
class Layer1(AWSAuthConnection):
-
+ """
+ Amazon Glacier is a storage solution for "cold data."
+
+ Amazon Glacier is an extremely low-cost storage service that
+ provides secure, durable and easy-to-use storage for data backup
+ and archival. With Amazon Glacier, customers can store their data
+ cost effectively for months, years, or decades. Amazon Glacier
+ also enables customers to offload the administrative burdens of
+ operating and scaling storage to AWS, so they don't have to worry
+ about capacity planning, hardware provisioning, data replication,
+ hardware failure and recovery, or time-consuming hardware
+ migrations.
+
+ Amazon Glacier is a great storage choice when low storage cost is
+ paramount, your data is rarely retrieved, and retrieval latency of
+ several hours is acceptable. If your application requires fast or
+ frequent access to your data, consider using Amazon S3. For more
+ information, go to `Amazon Simple Storage Service (Amazon S3)`_.
+
+ You can store any kind of data in any format. There is no maximum
+ limit on the total amount of data you can store in Amazon Glacier.
+
+ If you are a first-time user of Amazon Glacier, we recommend that
+ you begin by reading the following sections in the Amazon Glacier
+ Developer Guide :
+
+
+ + `What is Amazon Glacier`_ - This section of the Developer Guide
+ describes the underlying data model, the operations it supports,
+ and the AWS SDKs that you can use to interact with the service.
+ + `Getting Started with Amazon Glacier`_ - The Getting Started
+ section walks you through the process of creating a vault,
+ uploading archives, creating jobs to download archives, retrieving
+ the job output, and deleting archives.
+ """
Version = '2012-06-01'
- """Glacier API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
account_id='-', is_secure=True, port=None,
@@ -87,35 +120,39 @@ class Layer1(AWSAuthConnection):
def list_vaults(self, limit=None, marker=None):
"""
- This operation lists all vaults owned by the calling user’s
+ This operation lists all vaults owned by the calling user's
account. The list returned in the response is ASCII-sorted by
vault name.
By default, this operation returns up to 1,000 items. If there
- are more vaults to list, the marker field in the response body
- contains the vault Amazon Resource Name (ARN) at which to
- continue the list with a new List Vaults request; otherwise,
- the marker field is null. In your next List Vaults request you
- set the marker parameter to the value Amazon Glacier returned
- in the responses to your previous List Vaults request. You can
- also limit the number of vaults returned in the response by
- specifying the limit parameter in the request.
-
- :type limit: int
- :param limit: The maximum number of items returned in the
- response. If you don't specify a value, the List Vaults
- operation returns up to 1,000 items.
-
- :type marker: str
- :param marker: A string used for pagination. marker specifies
- the vault ARN after which the listing of vaults should
- begin. (The vault specified by marker is not included in
- the returned list.) Get the marker value from a previous
- List Vaults response. You need to include the marker only
- if you are continuing the pagination of results started in
- a previous List Vaults request. Specifying an empty value
- ("") for the marker returns a list of vaults starting
- from the first vault.
+ are more vaults to list, the response `marker` field contains
+ the vault Amazon Resource Name (ARN) at which to continue the
+ list with a new List Vaults request; otherwise, the `marker`
+ field is `null`. To return a list of vaults that begins at a
+ specific vault, set the `marker` request parameter to the
+ vault ARN you obtained from a previous List Vaults request.
+ You can also limit the number of vaults returned in the
+ response by specifying the `limit` parameter in the request.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Retrieving Vault Metadata in Amazon Glacier`_ and `List
+ Vaults `_ in the Amazon Glacier Developer Guide .
+
+ :type marker: string
+ :param marker: A string used for pagination. The marker specifies the
+ vault ARN after which the listing of vaults should begin.
+
+ :type limit: string
+ :param limit: The maximum number of items returned in the response. If
+ you don't specify a value, the List Vaults operation returns up to
+ 1,000 items.
"""
params = {}
if limit:
@@ -127,18 +164,31 @@ class Layer1(AWSAuthConnection):
def describe_vault(self, vault_name):
"""
This operation returns information about a vault, including
- the vault Amazon Resource Name (ARN), the date the vault was
- created, the number of archives contained within the vault,
- and the total size of all the archives in the vault. The
- number of archives and their total size are as of the last
- vault inventory Amazon Glacier generated. Amazon Glacier
- generates vault inventories approximately daily. This means
- that if you add or remove an archive from a vault, and then
- immediately send a Describe Vault request, the response might
- not reflect the changes.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ the vault's Amazon Resource Name (ARN), the date the vault was
+ created, the number of archives it contains, and the total
+ size of all the archives in the vault. The number of archives
+ and their total size are as of the last inventory generation.
+ This means that if you add or remove an archive from a vault,
+ and then immediately use Describe Vault, the change in
+ contents will not be immediately reflected. If you want to
+ retrieve the latest inventory of the vault, use InitiateJob.
+ Amazon Glacier generates vault inventories approximately
+ daily. For more information, see `Downloading a Vault
+ Inventory in Amazon Glacier`_.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Retrieving Vault Metadata in Amazon Glacier`_ and `Describe
+ Vault `_ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('GET', uri)
@@ -147,23 +197,34 @@ class Layer1(AWSAuthConnection):
"""
This operation creates a new vault with the specified name.
The name of the vault must be unique within a region for an
- AWS account. You can create up to 1,000 vaults per
- account. For information on creating more vaults, go to the
- Amazon Glacier product detail page.
+ AWS account. You can create up to 1,000 vaults per account. If
+ you need to create more vaults, contact Amazon Glacier.
You must use the following guidelines when naming a vault.
- Names can be between 1 and 255 characters long.
- Allowed characters are a–z, A–Z, 0–9, '_' (underscore),
- '-' (hyphen), and '.' (period).
- This operation is idempotent, you can send the same request
- multiple times and it has no further effect after the first
- time Amazon Glacier creates the specified vault.
+ + Names can be between 1 and 255 characters long.
+ + Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-'
+ (hyphen), and '.' (period).
- :type vault_name: str
- :param vault_name: The name of the new vault
+
+
+ This operation is idempotent.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Creating a Vault in Amazon Glacier`_ and `Create Vault `_ in
+ the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('PUT', uri, ok_responses=(201,),
@@ -172,50 +233,114 @@ class Layer1(AWSAuthConnection):
def delete_vault(self, vault_name):
"""
This operation deletes a vault. Amazon Glacier will delete a
- vault only if there are no archives in the vault as per the
+ vault only if there are no archives in the vault as of the
last inventory and there have been no writes to the vault
since the last inventory. If either of these conditions is not
satisfied, the vault deletion fails (that is, the vault is not
- removed) and Amazon Glacier returns an error.
-
- This operation is idempotent, you can send the same request
- multiple times and it has no further effect after the first
- time Amazon Glacier delete the specified vault.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ removed) and Amazon Glacier returns an error. You can use
+ DescribeVault to return the number of archives in a vault, and
+ you can use `Initiate a Job (POST jobs)`_ to initiate a new
+ inventory retrieval for a vault. The inventory contains the
+ archive IDs you use to delete archives using `Delete Archive
+ (DELETE archive)`_.
+
+ This operation is idempotent.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Deleting a Vault in Amazon Glacier`_ and `Delete Vault `_ in
+ the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('DELETE', uri, ok_responses=(204,))
def get_vault_notifications(self, vault_name):
"""
- This operation retrieves the notification-configuration
- subresource set on the vault.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ This operation retrieves the `notification-configuration`
+ subresource of the specified vault.
+
+ For information about setting a notification configuration on
+ a vault, see SetVaultNotifications. If a notification
+ configuration for a vault is not set, the operation returns a
+ `404 Not Found` error. For more information about vault
+ notifications, see `Configuring Vault Notifications in Amazon
+ Glacier`_.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Configuring Vault Notifications in Amazon Glacier`_ and `Get
+ Vault Notification Configuration `_ in the Amazon Glacier
+ Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s/notification-configuration' % vault_name
return self.make_request('GET', uri)
def set_vault_notifications(self, vault_name, notification_config):
"""
- This operation retrieves the notification-configuration
- subresource set on the vault.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
-
- :type notification_config: dict
- :param notification_config: A Python dictionary containing
- an SNS Topic and events for which you want Amazon Glacier
- to send notifications to the topic. Possible events are:
+ This operation configures notifications that will be sent when
+ specific events happen to a vault. By default, you don't get
+ any notifications.
+
+ To configure vault notifications, send a PUT request to the
+ `notification-configuration` subresource of the vault. The
+ request should include a JSON document that provides an Amazon
+ SNS topic and specific events for which you want Amazon
+ Glacier to send notifications to the topic.
+
+ Amazon SNS topics must grant permission to the vault to be
+ allowed to publish notifications to the topic. You can
+ configure a vault to publish a notification for the following
+ vault events:
+
+
+ + **ArchiveRetrievalCompleted** This event occurs when a job
+ that was initiated for an archive retrieval is completed
+ (InitiateJob). The status of the completed job can be
+ "Succeeded" or "Failed". The notification sent to the SNS
+ topic is the same output as returned from DescribeJob.
+ + **InventoryRetrievalCompleted** This event occurs when a job
+ that was initiated for an inventory retrieval is completed
+ (InitiateJob). The status of the completed job can be
+ "Succeeded" or "Failed". The notification sent to the SNS
+ topic is the same output as returned from DescribeJob.
+
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Configuring Vault Notifications in Amazon Glacier`_ and `Set
+ Vault Notification Configuration `_ in the Amazon Glacier
+ Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
- * ArchiveRetrievalCompleted - occurs when a job that was
- initiated for an archive retrieval is completed.
- * InventoryRetrievalCompleted - occurs when a job that was
- initiated for an inventory retrieval is completed.
+ :type vault_notification_config: dict
+ :param vault_notification_config: Provides options for specifying
+ notification configuration.
The format of the dictionary is:
@@ -229,11 +354,27 @@ class Layer1(AWSAuthConnection):
def delete_vault_notifications(self, vault_name):
"""
- This operation deletes the notification-configuration
- subresource set on the vault.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ This operation deletes the notification configuration set for
+ a vault. The operation is eventually consistent;that is, it
+ might take some time for Amazon Glacier to completely disable
+ the notifications and you might still receive some
+ notifications for a short time after you send the delete
+ request.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Configuring Vault Notifications in Amazon Glacier`_ and
+ `Delete Vault Notification Configuration `_ in the Amazon
+ Glacier Developer Guide.
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s/notification-configuration' % vault_name
return self.make_request('DELETE', uri, ok_responses=(204,))
@@ -243,36 +384,80 @@ class Layer1(AWSAuthConnection):
def list_jobs(self, vault_name, completed=None, status_code=None,
limit=None, marker=None):
"""
- This operation lists jobs for a vault including jobs that are
+ This operation lists jobs for a vault, including jobs that are
in-progress and jobs that have recently finished.
- :type vault_name: str
+
+ Amazon Glacier retains recently completed jobs for a period
+ before deleting them; however, it eventually removes completed
+ jobs. The output of completed jobs can be retrieved. Retaining
+ completed jobs for a period of time after they have completed
+ enables you to get a job output in the event you miss the job
+ completion notification or your first attempt to download it
+ fails. For example, suppose you start an archive retrieval job
+ to download an archive. After the job completes, you start to
+ download the archive but encounter a network error. In this
+ scenario, you can retry and download the archive while the job
+ exists.
+
+
+ To retrieve an archive or retrieve a vault inventory from
+ Amazon Glacier, you first initiate a job, and after the job
+ completes, you download the data. For an archive retrieval,
+ the output is the archive data, and for an inventory
+ retrieval, it is the inventory list. The List Job operation
+ returns a list of these jobs sorted by job initiation time.
+
+ This List Jobs operation supports pagination. By default, this
+ operation returns up to 1,000 jobs in the response. You should
+ always check the response for a `marker` at which to continue
+ the list; if there are no more items the `marker` is `null`.
+ To return a list of jobs that begins at a specific job, set
+ the `marker` request parameter to the value you obtained from
+ a previous List Jobs request. You can also limit the number of
+ jobs returned in the response by specifying the `limit`
+ parameter in the request.
+
+ Additionally, you can filter the jobs list returned by
+ specifying an optional `statuscode` (InProgress, Succeeded, or
+ Failed) and `completed` (true, false) parameter. The
+ `statuscode` allows you to specify that only jobs that match a
+ specified status are returned. The `completed` parameter
+ allows you to specify that only jobs in a specific completion
+ state are returned.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For the underlying REST API, go to `List Jobs `_
+
+ :type vault_name: string
:param vault_name: The name of the vault.
- :type completed: boolean
- :param completed: Specifies the state of the jobs to return.
- If a value of True is passed, only completed jobs will
- be returned. If a value of False is passed, only
- uncompleted jobs will be returned. If no value is
- passed, all jobs will be returned.
+ :type limit: string
+ :param limit: Specifies that the response be limited to the specified
+ number of items or fewer. If not specified, the List Jobs operation
+ returns up to 1,000 jobs.
- :type status_code: string
- :param status_code: Specifies the type of job status to return.
- Valid values are: InProgress|Succeeded|Failed. If not
- specified, jobs with all status codes are returned.
+ :type marker: string
+ :param marker: An opaque string used for pagination. This value
+ specifies the job at which the listing of jobs should begin. Get
+ the marker value from a previous List Jobs response. You need only
+ include the marker if you are continuing the pagination of results
+ started in a previous List Jobs request.
- :type limit: int
- :param limit: The maximum number of items returned in the
- response. If you don't specify a value, the List Jobs
- operation returns up to 1,000 items.
+ :type statuscode: string
+ :param statuscode: Specifies the type of job status to return. You can
+ specify the following values: "InProgress", "Succeeded", or
+ "Failed".
- :type marker: str
- :param marker: An opaque string used for pagination. marker
- specifies the job at which the listing of jobs should
- begin. Get the marker value from a previous List Jobs
- response. You need only include the marker if you are
- continuing the pagination of results started in a previous
- List Jobs request.
+ :type completed: string
+ :param completed: Specifies the state of the jobs to return. You can
+ specify `True` or `False`.
"""
params = {}
@@ -292,39 +477,154 @@ class Layer1(AWSAuthConnection):
This operation returns information about a job you previously
initiated, including the job initiation date, the user who
initiated the job, the job status code/message and the Amazon
- Simple Notification Service (Amazon SNS) topic to notify after
- Amazon Glacier completes the job.
+ SNS topic to notify after Amazon Glacier completes the job.
+ For more information about initiating a job, see InitiateJob.
+
+
+ This operation enables you to check the status of your job.
+ However, it is strongly recommended that you set up an Amazon
+ SNS topic and specify it in your initiate job request so that
+ Amazon Glacier can notify the topic after it completes the
+ job.
- :type vault_name: str
- :param vault_name: The name of the new vault
- :type job_id: str
- :param job_id: The ID of the job.
+ A job ID will not expire for at least 24 hours after Amazon
+ Glacier completes the job.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For information about the underlying REST API, go to `Working
+ with Archives in Amazon Glacier`_ in the Amazon Glacier
+ Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
+
+ :type job_id: string
+ :param job_id: The ID of the job to describe.
"""
uri = 'vaults/%s/jobs/%s' % (vault_name, job_id)
return self.make_request('GET', uri, ok_responses=(200,))
def initiate_job(self, vault_name, job_data):
"""
- This operation initiates a job of the specified
- type. Retrieving an archive or a vault inventory are
- asynchronous operations that require you to initiate a job. It
- is a two-step process:
-
- * Initiate a retrieval job.
- * After the job completes, download the bytes.
-
- The retrieval is executed asynchronously. When you initiate
- a retrieval job, Amazon Glacier creates a job and returns a
- job ID in the response.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ This operation initiates a job of the specified type. In this
+ release, you can initiate a job to retrieve either an archive
+ or a vault inventory (a list of archives in a vault).
+
+ Retrieving data from Amazon Glacier is a two-step process:
+
+
+ #. Initiate a retrieval job.
+ #. After the job completes, download the bytes.
+
+
+ The retrieval request is executed asynchronously. When you
+ initiate a retrieval job, Amazon Glacier creates a job and
+ returns a job ID in the response. When Amazon Glacier
+ completes the job, you can get the job output (archive or
+ inventory data). For information about getting job output, see
+ GetJobOutput operation.
+
+ The job must complete before you can get its output. To
+ determine when a job is complete, you have the following
+ options:
+
+
+ + **Use Amazon SNS Notification** You can specify an Amazon
+ Simple Notification Service (Amazon SNS) topic to which Amazon
+ Glacier can post a notification after the job is completed.
+ You can specify an SNS topic per job request. The notification
+ is sent only after Amazon Glacier completes the job. In
+ addition to specifying an SNS topic per job request, you can
+ configure vault notifications for a vault so that job
+ notifications are always sent. For more information, see
+ SetVaultNotifications.
+ + **Get job details** You can make a DescribeJob request to
+ obtain job status information while a job is in progress.
+ However, it is more efficient to use an Amazon SNS
+ notification to determine when a job is complete.
+
+
+
+ The information you get via notification is same that you get
+ by calling DescribeJob.
+
+
+ If for a specific event, you add both the notification
+ configuration on the vault and also specify an SNS topic in
+ your initiate job request, Amazon Glacier sends both
+ notifications. For more information, see
+ SetVaultNotifications.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ **About the Vault Inventory**
+
+ Amazon Glacier prepares an inventory for each vault
+ periodically, every 24 hours. When you initiate a job for a
+ vault inventory, Amazon Glacier returns the last inventory for
+ the vault. The inventory data you get might be up to a day or
+ two days old. Also, the initiate inventory job might take some
+ time to complete before you can download the vault inventory.
+ So you do not want to retrieve a vault inventory for each
+ vault operation. However, in some scenarios, you might find
+ the vault inventory useful. For example, when you upload an
+ archive, you can provide an archive description but not an
+ archive name. Amazon Glacier provides you a unique archive ID,
+ an opaque string of characters. So, you might maintain your
+ own database that maps archive names to their corresponding
+ Amazon Glacier assigned archive IDs. You might find the vault
+ inventory useful in the event you need to reconcile
+ information in your database with the actual vault inventory.
+
+ **About Ranged Archive Retrieval**
+
+ You can initiate an archive retrieval for the whole archive or
+ a range of the archive. In the case of ranged archive
+ retrieval, you specify a byte range to return or the whole
+ archive. The range specified must be megabyte (MB) aligned,
+ that is the range start value must be divisible by 1 MB and
+ range end value plus 1 must be divisible by 1 MB or equal the
+ end of the archive. If the ranged archive retrieval is not
+ megabyte aligned, this operation returns a 400 response.
+ Furthermore, to ensure you get checksum values for data you
+ download using Get Job Output API, the range must be tree hash
+ aligned.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and the underlying REST API, go to
+ `Initiate a Job`_ and `Downloading a Vault Inventory`_
+
+ :type account_id: string
+ :param account_id: The `AccountId` is the AWS Account ID. You can
+ specify either the AWS Account ID or optionally a '-', in which
+ case Amazon Glacier uses the AWS Account ID associated with the
+ credentials used to sign the request. If you specify your Account
+ ID, do not include hyphens in it.
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
- :type job_data: dict
- :param job_data: A Python dictionary containing the
- information about the requested job. The dictionary
- can contain the following attributes:
+ :type job_parameters: dict
+ :param job_parameters: Provides options for specifying job information.
+ The dictionary can contain the following attributes:
* ArchiveId - The ID of the archive you want to retrieve.
This field is required only if the Type is set to
@@ -340,6 +640,12 @@ class Layer1(AWSAuthConnection):
archive-retrieval|inventory-retrieval
* RetrievalByteRange - Optionally specify the range of
bytes to retrieve.
+ * InventoryRetrievalParameters: Optional job parameters
+ * Format - The output format, like "JSON"
+ * StartDate - ISO8601 starting date string
+ * EndDate - ISO8601 ending date string
+ * Limit - Maximum number of entries
+ * Marker - A unique string used for pagination
"""
uri = 'vaults/%s/jobs' % vault_name
@@ -353,27 +659,72 @@ class Layer1(AWSAuthConnection):
def get_job_output(self, vault_name, job_id, byte_range=None):
"""
This operation downloads the output of the job you initiated
- using Initiate a Job. Depending on the job type
- you specified when you initiated the job, the output will be
- either the content of an archive or a vault inventory.
-
- You can download all the job output or download a portion of
- the output by specifying a byte range. In the case of an
- archive retrieval job, depending on the byte range you
- specify, Amazon Glacier returns the checksum for the portion
- of the data. You can compute the checksum on the client and
- verify that the values match to ensure the portion you
- downloaded is the correct data.
-
- :type vault_name: str :param
- :param vault_name: The name of the new vault
+ using InitiateJob. Depending on the job type you specified
+ when you initiated the job, the output will be either the
+ content of an archive or a vault inventory.
+
+ A job ID will not expire for at least 24 hours after Amazon
+ Glacier completes the job. That is, you can download the job
+ output within the 24 hours period after Amazon Glacier
+ completes the job.
+
+ If the job output is large, then you can use the `Range`
+ request header to retrieve a portion of the output. This
+ allows you to download the entire output in smaller chunks of
+ bytes. For example, suppose you have 1 GB of job output you
+ want to download and you decide to download 128 MB chunks of
+ data at a time, which is a total of eight Get Job Output
+ requests. You use the following process to download the job
+ output:
+
+
+ #. Download a 128 MB chunk of output by specifying the
+ appropriate byte range using the `Range` header.
+ #. Along with the data, the response includes a checksum of
+ the payload. You compute the checksum of the payload on the
+ client and compare it with the checksum you received in the
+ response to ensure you received all the expected data.
+ #. Repeat steps 1 and 2 for all the eight 128 MB chunks of
+ output data, each time specifying the appropriate byte range.
+ #. After downloading all the parts of the job output, you have
+ a list of eight checksum values. Compute the tree hash of
+ these values to find the checksum of the entire output. Using
+ the Describe Job API, obtain job information of the job that
+ provided you the output. The response includes the checksum of
+ the entire archive stored in Amazon Glacier. You compare this
+ value with the checksum you computed to ensure you have
+ downloaded the entire archive content with no errors.
+
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and the underlying REST API, go to
+ `Downloading a Vault Inventory`_, `Downloading an Archive`_,
+ and `Get Job Output `_
+
+ :type account_id: string
+ :param account_id: The `AccountId` is the AWS Account ID. You can
+ specify either the AWS Account ID or optionally a '-', in which
+ case Amazon Glacier uses the AWS Account ID associated with the
+ credentials used to sign the request. If you specify your Account
+ ID, do not include hyphens in it.
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
- :type job_id: str
- :param job_id: The ID of the job.
+ :type job_id: string
+ :param job_id: The job ID whose data is downloaded.
- :type byte_range: tuple
- :param range: A tuple of integers specifying the slice (in bytes)
- of the archive you want to receive
+ :type byte_range: string
+ :param byte_range: The range of bytes to retrieve from the output. For
+ example, if you want to download the first 1,048,576 bytes, specify
+ "Range: bytes=0-1048575". By default, this operation downloads the
+ entire output.
"""
response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'),
('Content-Range', u'ContentRange'),
@@ -392,13 +743,50 @@ class Layer1(AWSAuthConnection):
def upload_archive(self, vault_name, archive,
linear_hash, tree_hash, description=None):
"""
- This operation adds an archive to a vault. For a successful
- upload, your data is durably persisted. In response, Amazon
- Glacier returns the archive ID in the x-amz-archive-id header
- of the response. You should save the archive ID returned so
- that you can access the archive later.
+ This operation adds an archive to a vault. This is a
+ synchronous operation, and for a successful upload, your data
+ is durably persisted. Amazon Glacier returns the archive ID in
+ the `x-amz-archive-id` header of the response.
+
+ You must use the archive ID to access your data in Amazon
+ Glacier. After you upload an archive, you should save the
+ archive ID returned so that you can retrieve or delete the
+ archive later. Besides saving the archive ID, you can also
+ index it and give it a friendly name to allow for better
+ searching. You can also use the optional archive description
+ field to specify how the archive is referred to in an external
+ index of archives, such as you might create in Amazon
+ DynamoDB. You can also get the vault inventory to obtain a
+ list of archive IDs in a vault. For more information, see
+ InitiateJob.
+
+ You must provide a SHA256 tree hash of the data you are
+ uploading. For information about computing a SHA256 tree hash,
+ see `Computing Checksums`_.
+
+ You can optionally specify an archive description of up to
+ 1,024 printable ASCII characters. You can get the archive
+ description when you either retrieve the archive or get the
+ vault inventory. For more information, see InitiateJob. Amazon
+ Glacier does not interpret the description in any way. An
+ archive description does not need to be unique. You cannot use
+ the description to retrieve or sort the archive list.
+
+ Archives are immutable. After you upload an archive, you
+ cannot edit the archive or its description.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Uploading an Archive in Amazon Glacier`_ and `Upload
+ Archive`_ in the Amazon Glacier Developer Guide .
- :type vault_name: str :param
+ :type vault_name: str
:param vault_name: The name of the vault
:type archive: bytes
@@ -414,7 +802,8 @@ class Layer1(AWSAuthConnection):
tree hash, see http://goo.gl/u7chF.
:type description: str
- :param description: An optional description of the archive.
+ :param description: The optional description of the archive you
+ are uploading.
"""
response_headers = [('x-amz-archive-id', u'ArchiveId'),
('Location', u'Location'),
@@ -445,13 +834,39 @@ class Layer1(AWSAuthConnection):
def delete_archive(self, vault_name, archive_id):
"""
- This operation deletes an archive from a vault.
+ This operation deletes an archive from a vault. Subsequent
+ requests to initiate a retrieval of this archive will fail.
+ Archive retrievals that are in progress for this archive ID
+ may or may not succeed according to the following scenarios:
+
+
+ + If the archive retrieval job is actively preparing the data
+ for download when Amazon Glacier receives the delete archive
+ request, the archival retrieval operation might fail.
+ + If the archive retrieval job has successfully prepared the
+ archive for download when Amazon Glacier receives the delete
+ archive request, you will be able to download the output.
- :type vault_name: str
- :param vault_name: The name of the new vault
- :type archive_id: str
- :param archive_id: The ID for the archive to be deleted.
+ This operation is idempotent. Attempting to delete an already-
+ deleted archive does not result in an error.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Deleting an Archive in Amazon Glacier`_ and `Delete Archive`_
+ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
+
+ :type archive_id: string
+ :param archive_id: The ID of the archive to delete.
"""
uri = 'vaults/%s/archives/%s' % (vault_name, archive_id)
return self.make_request('DELETE', uri, ok_responses=(204,))
@@ -461,21 +876,65 @@ class Layer1(AWSAuthConnection):
def initiate_multipart_upload(self, vault_name, part_size,
description=None):
"""
- Initiate a multipart upload. Amazon Glacier creates a
- multipart upload resource and returns it's ID. You use this
- ID in subsequent multipart upload operations.
+ This operation initiates a multipart upload. Amazon Glacier
+ creates a multipart upload resource and returns its ID in the
+ response. The multipart upload ID is used in subsequent
+ requests to upload parts of an archive (see
+ UploadMultipartPart).
+
+ When you initiate a multipart upload, you specify the part
+ size in number of bytes. The part size must be a megabyte
+ (1024 KB) multiplied by a power of 2-for example, 1048576 (1
+ MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so
+ on. The minimum allowable part size is 1 MB, and the maximum
+ is 4 GB.
+
+ Every part you upload to this resource (see
+ UploadMultipartPart), except the last one, must have the same
+ size. The last one can be the same size or smaller. For
+ example, suppose you want to upload a 16.2 MB file. If you
+ initiate the multipart upload with a part size of 4 MB, you
+ will upload four parts of 4 MB each and one part of 0.2 MB.
+
+
+ You don't need to know the size of the archive when you start
+ a multipart upload because Amazon Glacier does not require you
+ to specify the overall archive size.
+
+
+ After you complete the multipart upload, Amazon Glacier
+ removes the multipart upload resource referenced by the ID.
+ Amazon Glacier also removes the multipart upload resource if
+ you cancel the multipart upload or it may be removed if there
+ is no activity for a period of 24 hours.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Uploading Large Archives in Parts (Multipart Upload)`_ and
+ `Initiate Multipart Upload`_ in the Amazon Glacier Developer
+ Guide .
+
+ The part size must be a megabyte (1024 KB) multiplied by a power of
+ 2, for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB),
+ 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB,
+ and the maximum is 4 GB (4096 MB).
:type vault_name: str
:param vault_name: The name of the vault.
:type description: str
- :param description: An optional description of the archive.
+ :param description: The archive description that you are uploading in
+ parts.
:type part_size: int
- :param part_size: The size of each part except the last, in bytes.
- The part size must be a multiple of 1024 KB multiplied by
- a power of 2. The minimum allowable part size is 1MB and the
- maximum is 4GB.
+ :param part_size: The size of each part except the last, in bytes. The
+ last part can be smaller than this part size.
"""
response_headers = [('x-amz-multipart-upload-id', u'UploadId'),
('Location', u'Location')]
@@ -491,24 +950,77 @@ class Layer1(AWSAuthConnection):
def complete_multipart_upload(self, vault_name, upload_id,
sha256_treehash, archive_size):
"""
- Call this to inform Amazon Glacier that all of the archive parts
- have been uploaded and Amazon Glacier can now assemble the archive
- from the uploaded parts.
+ You call this operation to inform Amazon Glacier that all the
+ archive parts have been uploaded and that Amazon Glacier can
+ now assemble the archive from the uploaded parts. After
+ assembling and saving the archive to the vault, Amazon Glacier
+ returns the URI path of the newly created archive resource.
+ Using the URI path, you can then access the archive. After you
+ upload an archive, you should save the archive ID returned to
+ retrieve the archive at a later point. You can also get the
+ vault inventory to obtain a list of archive IDs in a vault.
+ For more information, see InitiateJob.
+
+ In the request, you must include the computed SHA256 tree hash
+ of the entire archive you have uploaded. For information about
+ computing a SHA256 tree hash, see `Computing Checksums`_. On
+ the server side, Amazon Glacier also constructs the SHA256
+ tree hash of the assembled archive. If the values match,
+ Amazon Glacier saves the archive to the vault; otherwise, it
+ returns an error, and the operation fails. The ListParts
+ operation returns a list of parts uploaded for a specific
+ multipart upload. It includes checksum information for each
+ uploaded part that can be used to debug a bad checksum issue.
+
+ Additionally, Amazon Glacier also checks for any missing
+ content ranges when assembling the archive, if missing content
+ ranges are found, Amazon Glacier returns an error and the
+ operation fails.
+
+ Complete Multipart Upload is an idempotent operation. After
+ your first successful complete multipart upload, if you call
+ the operation again within a short period, the operation will
+ succeed and return the same archive ID. This is useful in the
+ event you experience a network issue that causes an aborted
+ connection or receive a 500 server error, in which case you
+ can repeat your Complete Multipart Upload request and get the
+ same archive ID without creating duplicate archives. Note,
+ however, that after the multipart upload completes, you cannot
+ call the List Parts operation and the multipart upload will
+ not appear in List Multipart Uploads response, even if
+ idempotent complete is possible.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Uploading Large Archives in Parts (Multipart Upload)`_ and
+ `Complete Multipart Upload`_ in the Amazon Glacier Developer
+ Guide .
+
+ :type checksum: string
+ :param checksum: The SHA256 tree hash of the entire archive. It is the
+ tree hash of SHA256 tree hash of the individual parts. If the value
+ you specify in the request does not match the SHA256 tree hash of
+ the final assembled archive as computed by Amazon Glacier, Amazon
+ Glacier returns an error and the request fails.
:type vault_name: str
:param vault_name: The name of the vault.
:type upload_id: str
- :param upload_id: The unique ID associated with this upload
- operation.
+ :param upload_id: The upload ID of the multipart upload.
:type sha256_treehash: str
- :param sha256_treehash: The SHA256 tree hash of the entire
- archive. It is the tree hash of SHA256 tree hash of the
- individual parts. If the value you specify in the request
- does not match the SHA256 tree hash of the final assembled
- archive as computed by Amazon Glacier, Amazon Glacier
- returns an error and the request fails.
+ :param sha256_treehash: The SHA256 tree hash of the entire archive.
+ It is the tree hash of SHA256 tree hash of the individual parts.
+ If the value you specify in the request does not match the SHA256
+ tree hash of the final assembled archive as computed by Amazon
+ Glacier, Amazon Glacier returns an error and the request fails.
:type archive_size: int
:param archive_size: The total size, in bytes, of the entire
@@ -527,37 +1039,90 @@ class Layer1(AWSAuthConnection):
def abort_multipart_upload(self, vault_name, upload_id):
"""
- Call this to abort a multipart upload identified by the upload ID.
-
- :type vault_name: str
+ This operation aborts a multipart upload identified by the
+ upload ID.
+
+ After the Abort Multipart Upload request succeeds, you cannot
+ upload any more parts to the multipart upload or complete the
+ multipart upload. Aborting a completed upload fails. However,
+ aborting an already-aborted upload will succeed, for a short
+ time. For more information about uploading a part and
+ completing a multipart upload, see UploadMultipartPart and
+ CompleteMultipartUpload.
+
+ This operation is idempotent.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Working with Archives in Amazon Glacier`_ and `Abort
+ Multipart Upload`_ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
:param vault_name: The name of the vault.
- :type upload_id: str
- :param upload_id: The unique ID associated with this upload
- operation.
+ :type upload_id: string
+ :param upload_id: The upload ID of the multipart upload to delete.
"""
uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
return self.make_request('DELETE', uri, ok_responses=(204,))
def list_multipart_uploads(self, vault_name, limit=None, marker=None):
"""
- Lists in-progress multipart uploads for the specified vault.
-
- :type vault_name: str
+ This operation lists in-progress multipart uploads for the
+ specified vault. An in-progress multipart upload is a
+ multipart upload that has been initiated by an
+ InitiateMultipartUpload request, but has not yet been
+ completed or aborted. The list returned in the List Multipart
+ Upload response has no guaranteed order.
+
+ The List Multipart Uploads operation supports pagination. By
+ default, this operation returns up to 1,000 multipart uploads
+ in the response. You should always check the response for a
+ `marker` at which to continue the list; if there are no more
+ items the `marker` is `null`. To return a list of multipart
+ uploads that begins at a specific upload, set the `marker`
+ request parameter to the value you obtained from a previous
+ List Multipart Upload request. You can also limit the number
+ of uploads returned in the response by specifying the `limit`
+ parameter in the request.
+
+ Note the difference between this operation and listing parts
+ (ListParts). The List Multipart Uploads operation lists all
+ multipart uploads for a vault and does not require a multipart
+ upload ID. The List Parts operation requires a multipart
+ upload ID since parts are associated with a single upload.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and the underlying REST API, go to
+ `Working with Archives in Amazon Glacier`_ and `List Multipart
+ Uploads `_ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
:param vault_name: The name of the vault.
- :type limit: int
- :param limit: The maximum number of items returned in the
- response. If you don't specify a value, the operation
- returns up to 1,000 items.
-
- :type marker: str
- :param marker: An opaque string used for pagination. marker
- specifies the item at which the listing should
- begin. Get the marker value from a previous
- response. You need only include the marker if you are
- continuing the pagination of results started in a previous
- request.
+ :type limit: string
+ :param limit: Specifies the maximum number of uploads returned in the
+ response body. If this value is not specified, the List Uploads
+ operation returns up to 1,000 uploads.
+
+ :type marker: string
+ :param marker: An opaque string used for pagination. This value
+ specifies the upload at which the listing of uploads should begin.
+ Get the marker value from a previous List Uploads response. You
+ need only include the marker if you are continuing the pagination
+ of results started in a previous List Uploads request.
"""
params = {}
if limit:
@@ -569,27 +1134,51 @@ class Layer1(AWSAuthConnection):
def list_parts(self, vault_name, upload_id, limit=None, marker=None):
"""
- Lists in-progress multipart uploads for the specified vault.
-
- :type vault_name: str
+ This operation lists the parts of an archive that have been
+ uploaded in a specific multipart upload. You can make this
+ request at any time during an in-progress multipart upload
+ before you complete the upload (see CompleteMultipartUpload.
+ List Parts returns an error for completed uploads. The list
+ returned in the List Parts response is sorted by part range.
+
+ The List Parts operation supports pagination. By default, this
+ operation returns up to 1,000 uploaded parts in the response.
+ You should always check the response for a `marker` at which
+ to continue the list; if there are no more items the `marker`
+ is `null`. To return a list of parts that begins at a specific
+ part, set the `marker` request parameter to the value you
+ obtained from a previous List Parts request. You can also
+ limit the number of parts returned in the response by
+ specifying the `limit` parameter in the request.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and the underlying REST API, go to
+ `Working with Archives in Amazon Glacier`_ and `List Parts`_
+ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
:param vault_name: The name of the vault.
- :type upload_id: str
- :param upload_id: The unique ID associated with this upload
- operation.
+ :type upload_id: string
+ :param upload_id: The upload ID of the multipart upload.
- :type limit: int
- :param limit: The maximum number of items returned in the
- response. If you don't specify a value, the operation
- returns up to 1,000 items.
+ :type marker: string
+ :param marker: An opaque string used for pagination. This value
+ specifies the part at which the listing of parts should begin. Get
+ the marker value from the response of a previous List Parts
+ response. You need only include the marker if you are continuing
+ the pagination of results started in a previous List Parts request.
- :type marker: str
- :param marker: An opaque string used for pagination. marker
- specifies the item at which the listing should
- begin. Get the marker value from a previous
- response. You need only include the marker if you are
- continuing the pagination of results started in a previous
- request.
+ :type limit: string
+ :param limit: Specifies the maximum number of parts returned in the
+ response body. If this value is not specified, the List Parts
+ operation returns up to 1,000 uploads.
"""
params = {}
if limit:
@@ -602,7 +1191,55 @@ class Layer1(AWSAuthConnection):
def upload_part(self, vault_name, upload_id, linear_hash,
tree_hash, byte_range, part_data):
"""
- Lists in-progress multipart uploads for the specified vault.
+ This operation uploads a part of an archive. You can upload
+ archive parts in any order. You can also upload them in
+ parallel. You can upload up to 10,000 parts for a multipart
+ upload.
+
+ Amazon Glacier rejects your upload part request if any of the
+ following conditions is true:
+
+
+ + **SHA256 tree hash does not match**To ensure that part data
+ is not corrupted in transmission, you compute a SHA256 tree
+ hash of the part and include it in your request. Upon
+ receiving the part data, Amazon Glacier also computes a SHA256
+ tree hash. If these hash values don't match, the operation
+ fails. For information about computing a SHA256 tree hash, see
+ `Computing Checksums`_.
+ + **Part size does not match**The size of each part except the
+ last must match the size specified in the corresponding
+ InitiateMultipartUpload request. The size of the last part
+ must be the same size as, or smaller than, the specified size.
+ If you upload a part whose size is smaller than the part size
+ you specified in your initiate multipart upload request and
+ that part is not the last part, then the upload part request
+ will succeed. However, the subsequent Complete Multipart
+ Upload request will fail.
+ + **Range does not align**The byte range value in the request
+ does not align with the part size specified in the
+ corresponding initiate request. For example, if you specify a
+ part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4
+ MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid
+ part ranges. However, if you set a range value of 2 MB to 6
+ MB, the range does not align with the part size and the upload
+ will fail.
+
+
+ This operation is idempotent. If you upload the same part
+ multiple times, the data included in the most recent request
+ overwrites the previously uploaded data.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Uploading Large Archives in Parts (Multipart Upload)`_ and
+ `Upload Part `_ in the Amazon Glacier Developer Guide .
:type vault_name: str
:param vault_name: The name of the vault.
@@ -621,8 +1258,11 @@ class Layer1(AWSAuthConnection):
operation.
:type byte_range: tuple of ints
- :param byte_range: Identfies the range of bytes in the assembled
- archive that will be uploaded in this part.
+ :param byte_range: Identifies the range of bytes in the assembled
+ archive that will be uploaded in this part. Amazon Glacier uses
+ this information to assemble the archive in the proper sequence.
+ The format of this header follows RFC 2616. An example header is
+ Content-Range:bytes 0-4194303/*.
:type part_data: bytes
:param part_data: The data to be uploaded for the part
diff --git a/boto/glacier/vault.py b/boto/glacier/vault.py
index 0186dbd3..e7d4e27d 100644
--- a/boto/glacier/vault.py
+++ b/boto/glacier/vault.py
@@ -300,7 +300,9 @@ class Vault(object):
return self.get_job(response['JobId'])
def retrieve_inventory(self, sns_topic=None,
- description=None):
+ description=None, byte_range=None,
+ start_date=None, end_date=None,
+ limit=None):
"""
Initiate a inventory retrieval job to list the items in the
vault. You will need to wait for the notification from
@@ -315,6 +317,18 @@ class Vault(object):
sends notification when the job is completed and the output
is ready for you to download.
+ :type byte_range: str
+ :param byte_range: Range of bytes to retrieve.
+
+ :type start_date: DateTime
+ :param start_date: Beginning of the date range to query.
+
+ :type end_date: DateTime
+ :param end_date: End of the date range to query.
+
+ :type limit: int
+ :param limit: Limits the number of results returned.
+
:rtype: str
:return: The ID of the job
"""
@@ -323,6 +337,19 @@ class Vault(object):
job_data['SNSTopic'] = sns_topic
if description is not None:
job_data['Description'] = description
+ if byte_range is not None:
+ job_data['RetrievalByteRange'] = byte_range
+ if start_date is not None or end_date is not None or limit is not None:
+ rparams = {}
+
+ if start_date is not None:
+ rparams['StartDate'] = start_date.isoformat()
+ if end_date is not None:
+ rparams['EndDate'] = end_date.isoformat()
+ if limit is not None:
+ rparams['Limit'] = limit
+
+ job_data['InventoryRetrievalParameters'] = rparams
response = self.layer1.initiate_job(self.name, job_data)
return response['JobId']
@@ -340,6 +367,18 @@ class Vault(object):
sends notification when the job is completed and the output
is ready for you to download.
+ :type byte_range: str
+ :param byte_range: Range of bytes to retrieve.
+
+ :type start_date: DateTime
+ :param start_date: Beginning of the date range to query.
+
+ :type end_date: DateTime
+ :param end_date: End of the date range to query.
+
+ :type limit: int
+ :param limit: Limits the number of results returned.
+
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the retrieval job.
"""
diff --git a/boto/gs/key.py b/boto/gs/key.py
index 4417973b..b67e0604 100644
--- a/boto/gs/key.py
+++ b/boto/gs/key.py
@@ -219,7 +219,7 @@ class Key(S3Key):
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
- if self.bucket != None:
+ if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
@@ -528,7 +528,7 @@ class Key(S3Key):
if hasattr(fp, 'name'):
self.path = fp.name
- if self.bucket != None:
+ if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
@@ -552,12 +552,12 @@ class Key(S3Key):
fp.seek(spos)
size = self.size
- if md5 == None:
+ if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
- if self.name == None:
+ if self.name is None:
self.name = self.md5
if not replace:
@@ -792,7 +792,7 @@ class Key(S3Key):
the acl will only be updated if its current metageneration number is
this value.
"""
- if self.bucket != None:
+ if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
@@ -809,7 +809,7 @@ class Key(S3Key):
:rtype: :class:`.gs.acl.ACL`
"""
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
@@ -824,7 +824,7 @@ class Key(S3Key):
:rtype: str
"""
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
@@ -852,7 +852,7 @@ class Key(S3Key):
the acl will only be updated if its current metageneration number is
this value.
"""
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
@@ -883,7 +883,7 @@ class Key(S3Key):
the acl will only be updated if its current metageneration number is
this value.
"""
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
diff --git a/boto/gs/resumable_upload_handler.py b/boto/gs/resumable_upload_handler.py
index d3d86297..d3835e3a 100644
--- a/boto/gs/resumable_upload_handler.py
+++ b/boto/gs/resumable_upload_handler.py
@@ -102,13 +102,13 @@ class ResumableUploadHandler(object):
# Ignore non-existent file (happens first time an upload
# is attempted on a file), but warn user for other errors.
if e.errno != errno.ENOENT:
- # Will restart because self.tracker_uri == None.
+ # Will restart because self.tracker_uri is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'upload from scratch.' %
(self.tracker_file_name, e.strerror))
except InvalidUriError, e:
# Warn user, but proceed (will restart because
- # self.tracker_uri == None).
+ # self.tracker_uri is None).
print('Invalid tracker URI (%s) found in URI tracker file '
'(%s). Restarting upload from scratch.' %
(uri, self.tracker_file_name))
@@ -124,8 +124,9 @@ class ResumableUploadHandler(object):
return
f = None
try:
- f = open(self.tracker_file_name, 'w')
- f.write(self.tracker_uri)
+ with os.fdopen(os.open(self.tracker_file_name,
+ os.O_WRONLY | os.O_CREAT, 0600), 'w') as f:
+ f.write(self.tracker_uri)
except IOError, e:
raise ResumableUploadException(
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
@@ -134,9 +135,6 @@ class ResumableUploadHandler(object):
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
- finally:
- if f:
- f.close()
def _set_tracker_uri(self, uri):
"""
diff --git a/boto/handler.py b/boto/handler.py
index e11722bf..f936ee88 100644
--- a/boto/handler.py
+++ b/boto/handler.py
@@ -32,7 +32,7 @@ class XmlHandler(xml.sax.ContentHandler):
def startElement(self, name, attrs):
self.current_text = ''
new_node = self.nodes[-1][1].startElement(name, attrs, self.connection)
- if new_node != None:
+ if new_node is not None:
self.nodes.append((name, new_node))
def endElement(self, name):
diff --git a/boto/jsonresponse.py b/boto/jsonresponse.py
index 01e1f54f..5dab5af9 100644
--- a/boto/jsonresponse.py
+++ b/boto/jsonresponse.py
@@ -33,7 +33,7 @@ class XmlHandler(xml.sax.ContentHandler):
def startElement(self, name, attrs):
self.current_text = ''
t = self.nodes[-1][1].startElement(name, attrs, self.connection)
- if t != None:
+ if t is not None:
if isinstance(t, tuple):
self.nodes.append(t)
else:
diff --git a/boto/manage/cmdshell.py b/boto/manage/cmdshell.py
index 0d04d58b..0d726412 100644
--- a/boto/manage/cmdshell.py
+++ b/boto/manage/cmdshell.py
@@ -182,7 +182,7 @@ class LocalClient(object):
log_fp = StringIO.StringIO()
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- while process.poll() == None:
+ while process.poll() is None:
time.sleep(1)
t = process.communicate()
log_fp.write(t[0])
diff --git a/boto/manage/server.py b/boto/manage/server.py
index 93fec504..885db651 100644
--- a/boto/manage/server.py
+++ b/boto/manage/server.py
@@ -323,7 +323,7 @@ class Server(Model):
i = 0
elastic_ip = params.get('elastic_ip')
instances = reservation.instances
- if elastic_ip != None and instances.__len__() > 0:
+ if elastic_ip is not None and instances.__len__() > 0:
instance = instances[0]
print 'Waiting for instance to start so we can set its elastic IP address...'
# Sometimes we get a message from ec2 that says that the instance does not exist.
diff --git a/boto/manage/task.py b/boto/manage/task.py
index 8271529a..5d273c31 100644
--- a/boto/manage/task.py
+++ b/boto/manage/task.py
@@ -105,7 +105,7 @@ class Task(Model):
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
nsecs = 5
current_timeout = vtimeout
- while process.poll() == None:
+ while process.poll() is None:
boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout))
if nsecs >= current_timeout:
current_timeout += vtimeout
diff --git a/boto/manage/volume.py b/boto/manage/volume.py
index 841c1247..fae9df46 100644
--- a/boto/manage/volume.py
+++ b/boto/manage/volume.py
@@ -136,7 +136,7 @@ class Volume(Model):
if size < self.size:
size = self.size
ec2 = self.get_ec2_connection()
- if self.zone_name == None or self.zone_name == '':
+ if self.zone_name is None or self.zone_name == '':
# deal with the migration case where the zone is not set in the logical volume:
current_volume = ec2.get_all_volumes([self.volume_id])[0]
self.zone_name = current_volume.zone
@@ -155,7 +155,7 @@ class Volume(Model):
def get_ec2_connection(self):
if self.server:
return self.server.ec2
- if not hasattr(self, 'ec2') or self.ec2 == None:
+ if not hasattr(self, 'ec2') or self.ec2 is None:
self.ec2 = boto.ec2.connect_to_region(self.region_name)
return self.ec2
@@ -209,7 +209,7 @@ class Volume(Model):
def detach(self, force=False):
state = self.attachment_state
- if state == 'available' or state == None or state == 'detaching':
+ if state == 'available' or state is None or state == 'detaching':
print 'already detached'
return None
ec2 = self.get_ec2_connection()
@@ -218,7 +218,7 @@ class Volume(Model):
self.put()
def checkfs(self, use_cmd=None):
- if self.server == None:
+ if self.server is None:
raise ValueError('server attribute must be set to run this command')
# detemine state of file system on volume, only works if attached
if use_cmd:
@@ -233,7 +233,7 @@ class Volume(Model):
return True
def wait(self):
- if self.server == None:
+ if self.server is None:
raise ValueError('server attribute must be set to run this command')
with closing(self.server.get_cmdshell()) as cmd:
# wait for the volume device to appear
@@ -243,7 +243,7 @@ class Volume(Model):
time.sleep(10)
def format(self):
- if self.server == None:
+ if self.server is None:
raise ValueError('server attribute must be set to run this command')
status = None
with closing(self.server.get_cmdshell()) as cmd:
@@ -253,7 +253,7 @@ class Volume(Model):
return status
def mount(self):
- if self.server == None:
+ if self.server is None:
raise ValueError('server attribute must be set to run this command')
boto.log.info('handle_mount_point')
with closing(self.server.get_cmdshell()) as cmd:
@@ -302,7 +302,7 @@ class Volume(Model):
# we need to freeze the XFS file system
try:
self.freeze()
- if self.server == None:
+ if self.server is None:
snapshot = self.get_ec2_connection().create_snapshot(self.volume_id)
else:
snapshot = self.server.ec2.create_snapshot(self.volume_id)
diff --git a/boto/mashups/order.py b/boto/mashups/order.py
index 6efdc3ec..c4deebff 100644
--- a/boto/mashups/order.py
+++ b/boto/mashups/order.py
@@ -179,7 +179,7 @@ class Order(IObject):
item.ami.id, item.groups, item.key.name)
def place(self, block=True):
- if get_domain() == None:
+ if get_domain() is None:
print 'SDB Persistence Domain not set'
domain_name = self.get_string('Specify SDB Domain')
set_domain(domain_name)
diff --git a/boto/mturk/layoutparam.py b/boto/mturk/layoutparam.py
index 781f981d..de798955 100644
--- a/boto/mturk/layoutparam.py
+++ b/boto/mturk/layoutparam.py
@@ -22,7 +22,7 @@
class LayoutParameters(object):
def __init__(self, layoutParameters=None):
- if layoutParameters == None:
+ if layoutParameters is None:
layoutParameters = []
self.layoutParameters = layoutParameters
diff --git a/boto/mturk/qualification.py b/boto/mturk/qualification.py
index 4b518c89..4fc230f9 100644
--- a/boto/mturk/qualification.py
+++ b/boto/mturk/qualification.py
@@ -22,7 +22,7 @@
class Qualifications(object):
def __init__(self, requirements=None):
- if requirements == None:
+ if requirements is None:
requirements = []
self.requirements = requirements
diff --git a/boto/mws/connection.py b/boto/mws/connection.py
index d9a20035..7c068b52 100644
--- a/boto/mws/connection.py
+++ b/boto/mws/connection.py
@@ -78,7 +78,7 @@ def http_body(field):
def decorator(func):
def wrapper(*args, **kw):
- if filter(lambda x: not x in kw, (field, 'content_type')):
+ if any([f not in kw for f in (field, 'content_type')]):
message = "{0} requires {1} and content_type arguments for " \
"building HTTP body".format(func.action, field)
raise KeyError(message)
@@ -94,16 +94,18 @@ def http_body(field):
return decorator
-def destructure_object(value, into={}, prefix=''):
+def destructure_object(value, into, prefix=''):
if isinstance(value, ResponseElement):
- for name, attr in value.__dict__.items():
+ destructure_object(value.__dict__, into, prefix=prefix)
+ elif isinstance(value, dict):
+ for name, attr in value.iteritems():
if name.startswith('_'):
continue
- destructure_object(attr, into=into, prefix=prefix + '.' + name)
- elif filter(lambda x: isinstance(value, x), (list, set, tuple)):
- for index, element in [(prefix + '.' + str(i + 1), value[i])
- for i in range(len(value))]:
- destructure_object(element, into=into, prefix=index)
+ destructure_object(attr, into, prefix=prefix + '.' + name)
+ elif any([isinstance(value, typ) for typ in (list, set, tuple)]):
+ for index, element in enumerate(value):
+ newprefix = prefix + '.' + str(index + 1)
+ destructure_object(element, into, prefix=newprefix)
elif isinstance(value, bool):
into[prefix] = str(value).lower()
else:
@@ -116,10 +118,10 @@ def structured_objects(*fields):
def wrapper(*args, **kw):
for field in filter(kw.has_key, fields):
- destructure_object(kw.pop(field), into=kw, prefix=field)
+ destructure_object(kw.pop(field), kw, prefix=field)
return func(*args, **kw)
- wrapper.__doc__ = "{0}\nObjects: {1}".format(func.__doc__,
- ', '.join(fields))
+ wrapper.__doc__ = "{0}\nObjects|dicts: {1}".format(func.__doc__,
+ ', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
@@ -203,7 +205,7 @@ def boolean_arguments(*fields):
def decorator(func):
def wrapper(*args, **kw):
- for field in filter(lambda x: isinstance(kw.get(x), bool), fields):
+ for field in [f for f in fields if isinstance(kw.get(f), bool)]:
kw[field] = str(kw[field]).lower()
return func(*args, **kw)
wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__,
@@ -256,11 +258,13 @@ class MWSConnection(AWSQueryConnection):
def _required_auth_capability(self):
return ['mws']
- def post_request(self, path, params, cls, body='', headers={}, isXML=True):
+ def post_request(self, path, params, cls, body='', headers=None,
+ isXML=True):
"""Make a POST request, optionally with a content body,
and return the response, optionally as raw text.
Modelled off of the inherited get_object/make_request flow.
"""
+ headers = headers or {}
request = self.build_base_http_request('POST', path, None, data=body,
params=params, headers=headers,
host=self.host)
@@ -321,9 +325,10 @@ class MWSConnection(AWSQueryConnection):
@structured_lists('MarketplaceIdList.Id')
@requires(['FeedType'])
@api_action('Feeds', 15, 120)
- def submit_feed(self, path, response, headers={}, body='', **kw):
+ def submit_feed(self, path, response, headers=None, body='', **kw):
"""Uploads a feed for processing by Amazon MWS.
"""
+ headers = headers or {}
return self.post_request(path, kw, response, body=body,
headers=headers)
diff --git a/boto/provider.py b/boto/provider.py
index 6f77faaf..75400638 100644
--- a/boto/provider.py
+++ b/boto/provider.py
@@ -68,8 +68,10 @@ STORAGE_RESPONSE_ERROR = 'StorageResponseError'
class Provider(object):
CredentialMap = {
- 'aws': ('aws_access_key_id', 'aws_secret_access_key'),
- 'google': ('gs_access_key_id', 'gs_secret_access_key'),
+ 'aws': ('aws_access_key_id', 'aws_secret_access_key',
+ 'aws_security_token'),
+ 'google': ('gs_access_key_id', 'gs_secret_access_key',
+ None),
}
AclClassMap = {
@@ -176,7 +178,7 @@ class Provider(object):
self.acl_class = self.AclClassMap[self.name]
self.canned_acls = self.CannedAclsMap[self.name]
self._credential_expiry_time = None
- self.get_credentials(access_key, secret_key)
+ self.get_credentials(access_key, secret_key, security_token)
self.configure_headers()
self.configure_errors()
# Allow config file to override default host and port.
@@ -239,8 +241,9 @@ class Provider(object):
else:
return False
- def get_credentials(self, access_key=None, secret_key=None):
- access_key_name, secret_key_name = self.CredentialMap[self.name]
+ def get_credentials(self, access_key=None, secret_key=None,
+ security_token=None):
+ access_key_name, secret_key_name, security_token_name = self.CredentialMap[self.name]
if access_key is not None:
self.access_key = access_key
boto.log.debug("Using access key provided by client.")
@@ -273,6 +276,19 @@ class Provider(object):
keyring_name, self.access_key)
boto.log.debug("Using secret key found in keyring.")
+ if security_token is not None:
+ self.security_token = security_token
+ boto.log.debug("Using security token provided by client.")
+ elif security_token_name is not None:
+ if security_token_name.upper() in os.environ:
+ self.security_token = os.environ[security_token_name.upper()]
+ boto.log.debug("Using security token found in environment"
+ " variable.")
+ elif config.has_option('Credentials', security_token_name):
+ self.security_token = config.get('Credentials',
+ security_token_name)
+ boto.log.debug("Using security token found in config file.")
+
if ((self._access_key is None or self._secret_key is None) and
self.MetadataServiceSupport[self.name]):
self._populate_keys_from_metadata_server()
diff --git a/boto/pyami/config.py b/boto/pyami/config.py
index 28b6f6d8..48314e26 100644
--- a/boto/pyami/config.py
+++ b/boto/pyami/config.py
@@ -223,7 +223,7 @@ class Config(ConfigParser.SafeConfigParser):
d = json.loads(item[section])
for attr_name in d.keys():
attr_value = d[attr_name]
- if attr_value == None:
+ if attr_value is None:
attr_value = 'None'
if isinstance(attr_value, bool):
self.setbool(section, attr_name, attr_value)
diff --git a/boto/pyami/installers/ubuntu/ebs.py b/boto/pyami/installers/ubuntu/ebs.py
index 45f5dbbe..34d635fc 100644
--- a/boto/pyami/installers/ubuntu/ebs.py
+++ b/boto/pyami/installers/ubuntu/ebs.py
@@ -219,7 +219,7 @@ class EBSInstaller(Installer):
# Set up the backup cleanup script
minute = boto.config.get('EBS', 'backup_cleanup_cron_minute')
hour = boto.config.get('EBS', 'backup_cleanup_cron_hour')
- if (minute != None) and (hour != None):
+ if (minute is not None) and (hour is not None):
# Snapshot clean up can either be done via the manage module, or via the new tag based
# snapshot code, if the snapshots have been tagged with the name of the associated
# volume. Check for the presence of the new configuration flag, and use the appropriate
diff --git a/boto/rds/__init__.py b/boto/rds/__init__.py
index d08d445d..775c1acf 100644
--- a/boto/rds/__init__.py
+++ b/boto/rds/__init__.py
@@ -1384,11 +1384,11 @@ class RDSConnection(AWSQueryConnection):
:return: A list of :class:`boto.rds.dbsubnetgroup.DBSubnetGroup`
"""
params = dict()
- if name != None:
+ if name is not None:
params['DBSubnetGroupName'] = name
- if max_records != None:
+ if max_records is not None:
params['MaxRecords'] = max_records
- if marker != None:
+ if marker is not None:
params['Marker'] = marker
return self.get_list('DescribeDBSubnetGroups', params, [('DBSubnetGroup',DBSubnetGroup)])
@@ -1407,9 +1407,9 @@ class RDSConnection(AWSQueryConnection):
:return: The newly created ParameterGroup
"""
params = {'DBSubnetGroupName': name}
- if description != None:
+ if description is not None:
params['DBSubnetGroupDescription'] = description
- if subnet_ids != None:
+ if subnet_ids is not None:
self.build_list_params(params, subnet_ids, 'SubnetIds.member')
return self.get_object('ModifyDBSubnetGroup', params, DBSubnetGroup)
diff --git a/boto/rds/dbsubnetgroup.py b/boto/rds/dbsubnetgroup.py
index 4b9fc580..4f6bde89 100644
--- a/boto/rds/dbsubnetgroup.py
+++ b/boto/rds/dbsubnetgroup.py
@@ -40,7 +40,7 @@ class DBSubnetGroup(object):
self.connection = connection
self.name = name
self.description = description
- if subnet_ids != None:
+ if subnet_ids is not None:
self.subnet_ids = subnet_ids
else:
self.subnet_ids = []
diff --git a/boto/rds/parametergroup.py b/boto/rds/parametergroup.py
index 423cfc96..ade3b807 100644
--- a/boto/rds/parametergroup.py
+++ b/boto/rds/parametergroup.py
@@ -175,7 +175,7 @@ class Parameter(object):
raise TypeError('unknown type (%s)' % self.type)
def get_value(self):
- if self._value == None:
+ if self._value is None:
return self._value
if self.type == 'string':
return self._value
diff --git a/boto/route53/record.py b/boto/route53/record.py
index 17f38b94..ab436db9 100644
--- a/boto/route53/record.py
+++ b/boto/route53/record.py
@@ -218,7 +218,7 @@ class Record(object):
self.name = name
self.type = type
self.ttl = ttl
- if resource_records == None:
+ if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
@@ -241,7 +241,7 @@ class Record(object):
def to_xml(self):
"""Spit this resource record set out as XML"""
- if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
+ if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name)
else:
@@ -258,10 +258,10 @@ class Record(object):
weight = ""
- if self.identifier != None and self.weight != None:
+ if self.identifier is not None and self.weight is not None:
weight = self.WRRBody % {"identifier": self.identifier, "weight":
self.weight}
- elif self.identifier != None and self.region != None:
+ elif self.identifier is not None and self.region is not None:
weight = self.RRRBody % {"identifier": self.identifier, "region":
self.region}
@@ -275,16 +275,16 @@ class Record(object):
def to_print(self):
rr = ""
- if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
+ if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
- if self.identifier != None and self.weight != None:
+ if self.identifier is not None and self.weight is not None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
- elif self.identifier != None and self.region != None:
+ elif self.identifier is not None and self.region is not None:
rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region)
return rr
diff --git a/boto/s3/connection.py b/boto/s3/connection.py
index ba4e642f..b876b8f1 100644
--- a/boto/s3/connection.py
+++ b/boto/s3/connection.py
@@ -270,9 +270,9 @@ class S3Connection(AWSAuthConnection):
"""
- if fields == None:
+ if fields is None:
fields = []
- if conditions == None:
+ if conditions is None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
diff --git a/boto/s3/key.py b/boto/s3/key.py
index 0849584d..5c9f076f 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -258,7 +258,7 @@ class Key(object):
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
- if self.resp == None:
+ if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
@@ -538,19 +538,19 @@ class Key(object):
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
- if self.bucket != None:
+ if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
@@ -882,7 +882,7 @@ class Key(object):
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
- if self.content_type == None:
+ if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
@@ -1060,7 +1060,7 @@ class Key(object):
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
- if self.bucket != None:
+ if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
@@ -1194,7 +1194,7 @@ class Key(object):
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
- if self.bucket != None:
+ if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
@@ -1233,7 +1233,7 @@ class Key(object):
self.md5 = md5[0]
self.base64md5 = md5[1]
- if self.name == None:
+ if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
@@ -1574,7 +1574,7 @@ class Key(object):
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
- if self.bucket != None:
+ if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
@@ -1641,7 +1641,7 @@ class Key(object):
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
- if self.last_modified != None:
+ if self.last_modified is not None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
diff --git a/boto/s3/resumable_download_handler.py b/boto/s3/resumable_download_handler.py
index cf182791..56e0ce3e 100644
--- a/boto/s3/resumable_download_handler.py
+++ b/boto/s3/resumable_download_handler.py
@@ -140,7 +140,7 @@ class ResumableDownloadHandler(object):
# is attempted on an object), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because
- # self.etag_value_for_current_download == None.
+ # self.etag_value_for_current_download is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'download from scratch.' %
(self.tracker_file_name, e.strerror))
diff --git a/boto/sdb/connection.py b/boto/sdb/connection.py
index 04212df8..bfbce921 100644
--- a/boto/sdb/connection.py
+++ b/boto/sdb/connection.py
@@ -493,7 +493,7 @@ class SDBConnection(AWSQueryConnection):
response = self.make_request('GetAttributes', params)
body = response.read()
if response.status == 200:
- if item == None:
+ if item is None:
item = self.item_cls(domain, item_name)
h = handler.XmlHandler(item, self)
xml.sax.parseString(body, h)
diff --git a/boto/sdb/db/key.py b/boto/sdb/db/key.py
index f630d398..6ac47a68 100644
--- a/boto/sdb/db/key.py
+++ b/boto/sdb/db/key.py
@@ -50,7 +50,7 @@ class Key(object):
return self.id
def has_id_or_name(self):
- return self.id != None
+ return self.id is not None
def parent(self):
raise NotImplementedError("Key parents are not currently supported")
diff --git a/boto/sdb/db/manager/sdbmanager.py b/boto/sdb/db/manager/sdbmanager.py
index 23183a0b..2613ff08 100644
--- a/boto/sdb/db/manager/sdbmanager.py
+++ b/boto/sdb/db/manager/sdbmanager.py
@@ -107,7 +107,7 @@ class SDBConverter(object):
def encode_map(self, prop, value):
import urllib
- if value == None:
+ if value is None:
return None
if not isinstance(value, dict):
raise ValueError('Expected a dict value, got %s' % type(value))
@@ -117,7 +117,7 @@ class SDBConverter(object):
if self.model_class in item_type.mro():
item_type = self.model_class
encoded_value = self.encode(item_type, value[key])
- if encoded_value != None:
+ if encoded_value is not None:
new_value.append('%s:%s' % (urllib.quote(key), encoded_value))
return new_value
@@ -136,7 +136,7 @@ class SDBConverter(object):
item_type = getattr(prop, "item_type")
dec_val = {}
for val in value:
- if val != None:
+ if val is not None:
k, v = self.decode_map_element(item_type, val)
try:
k = int(k)
@@ -351,7 +351,7 @@ class SDBConverter(object):
else:
raise SDBPersistenceError("Invalid Blob ID: %s" % value.id)
- if value.value != None:
+ if value.value is not None:
key.set_contents_from_string(value.value)
return value.id
@@ -415,7 +415,7 @@ class SDBManager(object):
self.converter = SDBConverter(self)
self._sdb = None
self._domain = None
- if consistent == None and hasattr(cls, "__consistent__"):
+ if consistent is None and hasattr(cls, "__consistent__"):
consistent = cls.__consistent__
self.consistent = consistent
@@ -456,7 +456,7 @@ class SDBManager(object):
yield obj
def encode_value(self, prop, value):
- if value == None:
+ if value is None:
return None
if not prop:
return str(value)
@@ -544,7 +544,7 @@ class SDBManager(object):
name = 'itemName()'
if name != "itemName()":
name = '`%s`' % name
- if val == None:
+ if val is None:
if op in ('is', '='):
return "%(name)s is null" % {"name": name}
elif op in ('is not', '!='):
@@ -585,7 +585,7 @@ class SDBManager(object):
query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__)
if order_by in ["__id__", "itemName()"]:
query += " ORDER BY itemName() %s" % order_by_method
- elif order_by != None:
+ elif order_by is not None:
query += " ORDER BY `%s` %s" % (order_by, order_by_method)
return query
@@ -667,7 +667,7 @@ class SDBManager(object):
value = self.encode_value(property, value)
if value == []:
value = None
- if value == None:
+ if value is None:
del_attrs.append(property.name)
continue
attrs[property.name] = value
diff --git a/boto/sdb/db/manager/xmlmanager.py b/boto/sdb/db/manager/xmlmanager.py
index cfaa6280..2cfcd132 100644
--- a/boto/sdb/db/manager/xmlmanager.py
+++ b/boto/sdb/db/manager/xmlmanager.py
@@ -147,7 +147,7 @@ class XMLConverter(object):
def encode_reference(self, value):
if isinstance(value, basestring):
return value
- if value == None:
+ if value is None:
return ''
else:
val_node = self.manager.doc.createElement("object")
@@ -296,7 +296,7 @@ class XMLManager(object):
prop = obj.find_property(prop_name)
value = self.decode_value(prop, prop_node)
value = prop.make_value_from_datastore(value)
- if value != None:
+ if value is not None:
try:
setattr(obj, prop.name, value)
except:
@@ -321,7 +321,7 @@ class XMLManager(object):
prop = cls.find_property(prop_name)
value = self.decode_value(prop, prop_node)
value = prop.make_value_from_datastore(value)
- if value != None:
+ if value is not None:
props[prop.name] = value
return (cls, props, id)
diff --git a/boto/sdb/db/model.py b/boto/sdb/db/model.py
index 3d9a6b5a..9e589d52 100644
--- a/boto/sdb/db/model.py
+++ b/boto/sdb/db/model.py
@@ -270,7 +270,7 @@ class Model(object):
return cls
for sc in cls.__sub_classes__:
r = sc.find_subclass(name)
- if r != None:
+ if r is not None:
return r
class Expando(Model):
diff --git a/boto/sdb/db/property.py b/boto/sdb/db/property.py
index 35bce3d1..7488c2c3 100644
--- a/boto/sdb/db/property.py
+++ b/boto/sdb/db/property.py
@@ -85,7 +85,7 @@ class Property(object):
return self.default
def validate(self, value):
- if self.required and value == None:
+ if self.required and value is None:
raise ValueError('%s is a required property' % self.name)
if self.choices and value and not value in self.choices:
raise ValueError('%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name))
@@ -111,7 +111,7 @@ class Property(object):
def validate_string(value):
- if value == None:
+ if value is None:
return
elif isinstance(value, basestring):
if len(value) > 1024:
@@ -328,7 +328,7 @@ class IntegerProperty(Property):
return value is None
def __set__(self, obj, value):
- if value == "" or value == None:
+ if value == "" or value is None:
value = 0
return super(IntegerProperty, self).__set__(obj, value)
@@ -408,7 +408,7 @@ class DateTimeProperty(Property):
return super(DateTimeProperty, self).default_value()
def validate(self, value):
- if value == None:
+ if value is None:
return
if isinstance(value, datetime.date):
return value
@@ -441,7 +441,7 @@ class DateProperty(Property):
def validate(self, value):
value = super(DateProperty, self).validate(value)
- if value == None:
+ if value is None:
return
if not isinstance(value, self.data_type):
raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value)))
@@ -501,7 +501,7 @@ class ReferenceProperty(Property):
def __set__(self, obj, value):
"""Don't allow this object to be associated to itself
This causes bad things to happen"""
- if value != None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)):
+ if value is not None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)):
raise ValueError("Can not associate an object with itself!")
return super(ReferenceProperty, self).__set__(obj, value)
@@ -533,7 +533,7 @@ class ReferenceProperty(Property):
def validate(self, value):
if self.validator:
self.validator(value)
- if self.required and value == None:
+ if self.required and value is None:
raise ValueError('%s is a required property' % self.name)
if value == self.default_value():
return
@@ -658,7 +658,7 @@ class ListProperty(Property):
item_type = self.item_type
if isinstance(value, item_type):
value = [value]
- elif value == None: # Override to allow them to set this to "None" to remove everything
+ elif value is None: # Override to allow them to set this to "None" to remove everything
value = []
return super(ListProperty, self).__set__(obj, value)
diff --git a/boto/sdb/db/query.py b/boto/sdb/db/query.py
index 31b71aa0..bd1a41dd 100644
--- a/boto/sdb/db/query.py
+++ b/boto/sdb/db/query.py
@@ -39,7 +39,7 @@ class Query(object):
return iter(self.manager.query(self))
def next(self):
- if self.__local_iter__ == None:
+ if self.__local_iter__ is None:
self.__local_iter__ = self.__iter__()
return self.__local_iter__.next()
diff --git a/boto/sdb/db/sequence.py b/boto/sdb/db/sequence.py
index fed7da3f..70540c52 100644
--- a/boto/sdb/db/sequence.py
+++ b/boto/sdb/db/sequence.py
@@ -59,7 +59,7 @@ class SequenceGenerator(object):
# If they pass us in a string that's not at least
# the lenght of our sequence, then return the
# first element in our sequence
- if val == None or len(val) < self.sequence_length:
+ if val is None or len(val) < self.sequence_length:
return self.sequence_string[0]
last_value = val[-self.sequence_length:]
if (not self.rollover) and (last_value == self.last_item):
@@ -79,21 +79,21 @@ class SequenceGenerator(object):
# Simple Sequence Functions
#
def increment_by_one(cv=None, lv=None):
- if cv == None:
+ if cv is None:
return 0
return cv + 1
def double(cv=None, lv=None):
- if cv == None:
+ if cv is None:
return 1
return cv * 2
def fib(cv=1, lv=0):
"""The fibonacci sequence, this incrementer uses the
last value"""
- if cv == None:
+ if cv is None:
cv = 1
- if lv == None:
+ if lv is None:
lv = 0
return cv + lv
@@ -136,10 +136,10 @@ class Sequence(object):
self.last_value = None
self.domain_name = domain_name
self.id = id
- if init_val == None:
+ if init_val is None:
init_val = fnc(init_val)
- if self.id == None:
+ if self.id is None:
import uuid
self.id = str(uuid.uuid4())
@@ -162,7 +162,7 @@ class Sequence(object):
expected_value = []
new_val = {}
new_val['timestamp'] = now
- if self._value != None:
+ if self._value is not None:
new_val['last_value'] = self._value
expected_value = ['current_value', str(self._value)]
new_val['current_value'] = val
@@ -184,7 +184,7 @@ class Sequence(object):
self.timestamp = val['timestamp']
if 'current_value' in val:
self._value = self.item_type(val['current_value'])
- if "last_value" in val and val['last_value'] != None:
+ if "last_value" in val and val['last_value'] is not None:
self.last_value = self.item_type(val['last_value'])
return self._value
diff --git a/boto/sdb/item.py b/boto/sdb/item.py
index 999c7f0b..a742d80c 100644
--- a/boto/sdb/item.py
+++ b/boto/sdb/item.py
@@ -123,7 +123,7 @@ class Item(dict):
if replace:
del_attrs = []
for name in self:
- if self[name] == None:
+ if self[name] is None:
del_attrs.append(name)
if len(del_attrs) > 0:
self.domain.delete_attributes(self.name, del_attrs)
diff --git a/boto/sdb/queryresultset.py b/boto/sdb/queryresultset.py
index f943949f..9ff0ae2f 100644
--- a/boto/sdb/queryresultset.py
+++ b/boto/sdb/queryresultset.py
@@ -33,7 +33,7 @@ def query_lister(domain, query='', max_items=None, attr_names=None):
yield item
num_results += 1
next_token = rs.next_token
- more_results = next_token != None
+ more_results = next_token is not None
class QueryResultSet(object):
@@ -59,7 +59,7 @@ def select_lister(domain, query='', max_items=None):
yield item
num_results += 1
next_token = rs.next_token
- more_results = next_token != None
+ more_results = next_token is not None
class SelectResultSet(object):
@@ -86,7 +86,7 @@ class SelectResultSet(object):
self.next_token = rs.next_token
if self.max_items and num_results >= self.max_items:
raise StopIteration
- more_results = self.next_token != None
+ more_results = self.next_token is not None
def next(self):
return self.__iter__().next()
diff --git a/boto/services/message.py b/boto/services/message.py
index 79f6d19f..31f37019 100644
--- a/boto/services/message.py
+++ b/boto/services/message.py
@@ -34,7 +34,7 @@ class ServiceMessage(MHMessage):
self['OriginalLocation'] = t[0]
self['OriginalFileName'] = t[1]
mime_type = mimetypes.guess_type(t[1])[0]
- if mime_type == None:
+ if mime_type is None:
mime_type = 'application/octet-stream'
self['Content-Type'] = mime_type
s = os.stat(key.path)
diff --git a/boto/sqs/message.py b/boto/sqs/message.py
index a0364712..ce7976c1 100644
--- a/boto/sqs/message.py
+++ b/boto/sqs/message.py
@@ -177,7 +177,7 @@ class MHMessage(Message):
"""
def __init__(self, queue=None, body=None, xml_attrs=None):
- if body == None or body == '':
+ if body is None or body == '':
body = {}
super(MHMessage, self).__init__(queue, body)
diff --git a/boto/support/layer1.py b/boto/support/layer1.py
index 3b52efd9..c4e18da0 100644
--- a/boto/support/layer1.py
+++ b/boto/support/layer1.py
@@ -42,24 +42,24 @@ class SupportConnection(AWSQueryConnection):
format.
The AWS Support service also exposes a set of `Trusted Advisor`_
- features. You can retrieve a list of checks you can run on your
- resources, specify checks to run and refresh, and check the status
- of checks you have submitted.
+ features. You can retrieve a list of checks and their
+ descriptions, get check results, specify checks to refresh, and
+ get the refresh status of checks.
The following list describes the AWS Support case management
actions:
+ **Service names, issue categories, and available severity
- levels. **The actions `DescribeServices`_ and
- `DescribeSeverityLevels`_ enable you to obtain AWS service names,
- service codes, service categories, and problem severity levels.
- You use these values when you call the `CreateCase`_ action.
- + **Case Creation, case details, and case resolution**. The
- actions `CreateCase`_, `DescribeCases`_, and `ResolveCase`_ enable
- you to create AWS Support cases, retrieve them, and resolve them.
- + **Case communication**. The actions `DescribeCommunications`_
- and `AddCommunicationToCase`_ enable you to retrieve and add
+ levels. **The actions DescribeServices and DescribeSeverityLevels
+ enable you to obtain AWS service names, service codes, service
+ categories, and problem severity levels. You use these values when
+ you call the CreateCase action.
+ + **Case creation, case details, and case resolution.** The
+ actions CreateCase, DescribeCases, and ResolveCase enable you to
+ create AWS Support cases, retrieve them, and resolve them.
+ + **Case communication.** The actions DescribeCommunications and
+ AddCommunicationToCase enable you to retrieve and add
communication to AWS Support cases.
@@ -67,26 +67,26 @@ class SupportConnection(AWSQueryConnection):
Support service for Trusted Advisor:
- + `DescribeTrustedAdvisorChecks`_ returns the list of checks that
- you can run against your AWS resources.
+ + DescribeTrustedAdvisorChecks returns the list of checks that run
+ against your AWS resources.
+ Using the CheckId for a specific check returned by
DescribeTrustedAdvisorChecks, you can call
- `DescribeTrustedAdvisorCheckResult`_ and obtain a new result for the check you specified.
- + Using `DescribeTrustedAdvisorCheckSummaries`_, you can get
- summaries for a set of Trusted Advisor checks.
- + `RefreshTrustedAdvisorCheck`_ enables you to request that
- Trusted Advisor run the check again.
- + `DescribeTrustedAdvisorCheckRefreshStatuses`_ gets statuses on
- the checks you are running.
+ DescribeTrustedAdvisorCheckResult to obtain the results for the
+ check you specified.
+ + DescribeTrustedAdvisorCheckSummaries returns summarized results
+ for one or more Trusted Advisor checks.
+ + RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a
+ specified check.
+ + DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh
+ status of one or more checks.
- For authentication of requests, the AWS Support uses `Signature
+ For authentication of requests, AWS Support uses `Signature
Version 4 Signing Process`_.
- See the AWS Support `Developer Guide`_ for information about how
- to use this service to manage create and manage your support
- cases, and how to call Trusted Advisor for results of checks on
- your resources.
+ See the AWS Support `User Guide`_ for information about how to use
+ this service to create and manage your support cases, and how to
+ call Trusted Advisor for results of checks on your resources.
"""
APIVersion = "2013-04-15"
DefaultRegionName = "us-east-1"
@@ -117,32 +117,30 @@ class SupportConnection(AWSQueryConnection):
def add_communication_to_case(self, communication_body, case_id=None,
cc_email_addresses=None):
"""
- This action adds additional customer communication to an AWS
- Support case. You use the CaseId value to identify the case to
- which you want to add communication. You can list a set of
- email addresses to copy on the communication using the
- CcEmailAddresses value. The CommunicationBody value contains
- the text of the communication.
-
- This action's response indicates the success or failure of the
- request.
+ Adds additional customer communication to an AWS Support case.
+ You use the `CaseId` value to identify the case to add
+ communication to. You can list a set of email addresses to
+ copy on the communication using the `CcEmailAddresses` value.
+ The `CommunicationBody` value contains the text of the
+ communication.
+
+ The response indicates the success or failure of the request.
- This action implements a subset of the behavior on the AWS
+ This operation implements a subset of the behavior on the AWS
Support `Your Support Cases`_ web form.
:type case_id: string
- :param case_id: String that indicates the AWS Support caseID requested
- or returned in the call. The caseID is an alphanumeric string
- formatted as shown in this example CaseId:
- case-12345678910-2013-c4c1d2bf33c5cf47
+ :param case_id: The AWS Support case ID requested or returned in the
+ call. The case ID is an alphanumeric string formatted as shown in
+ this example: case- 12345678910-2013-c4c1d2bf33c5cf47
:type communication_body: string
- :param communication_body: Represents the body of an email
- communication added to the support case.
+ :param communication_body: The body of an email communication to add to
+ the support case.
:type cc_email_addresses: list
- :param cc_email_addresses: Represents any email addresses contained in
- the CC line of an email added to the support case.
+ :param cc_email_addresses: The email addresses in the CC line of an
+ email to be added to the support case.
"""
params = {'communicationBody': communication_body, }
@@ -157,89 +155,89 @@ class SupportConnection(AWSQueryConnection):
severity_code=None, category_code=None,
cc_email_addresses=None, language=None, issue_type=None):
"""
- Creates a new case in the AWS Support Center. This action is
- modeled on the behavior of the AWS Support Center `Open a new
- case`_ page. Its parameters require you to specify the
+ Creates a new case in the AWS Support Center. This operation
+ is modeled on the behavior of the AWS Support Center `Open a
+ new case`_ page. Its parameters require you to specify the
following information:
- #. **ServiceCode.** Represents a code for an AWS service. You
- obtain the ServiceCode by calling `DescribeServices`_.
- #. **CategoryCode**. Represents a category for the service
- defined for the ServiceCode value. You also obtain the
- cateogory code for a service by calling `DescribeServices`_.
- Each AWS service defines its own set of category codes.
- #. **SeverityCode**. Represents a value that specifies the
- urgency of the case, and the time interval in which your
- service level agreement specifies a response from AWS Support.
- You obtain the SeverityCode by calling
- `DescribeSeverityLevels`_.
- #. **Subject**. Represents the **Subject** field on the AWS
+ #. **ServiceCode.** The code for an AWS service. You obtain
+ the `ServiceCode` by calling DescribeServices.
+ #. **CategoryCode.** The category for the service defined for
+ the `ServiceCode` value. You also obtain the category code for
+ a service by calling DescribeServices. Each AWS service
+ defines its own set of category codes.
+ #. **SeverityCode.** A value that indicates the urgency of the
+ case, which in turn determines the response time according to
+ your service level agreement with AWS Support. You obtain the
+ SeverityCode by calling DescribeSeverityLevels.
+ #. **Subject.** The **Subject** field on the AWS Support
+ Center `Open a new case`_ page.
+ #. **CommunicationBody.** The **Description** field on the AWS
Support Center `Open a new case`_ page.
- #. **CommunicationBody**. Represents the **Description** field
- on the AWS Support Center `Open a new case`_ page.
- #. **Language**. Specifies the human language in which AWS
- Support handles the case. The API currently supports English
- and Japanese.
- #. **CcEmailAddresses**. Represents the AWS Support Center
- **CC** field on the `Open a new case`_ page. You can list
- email addresses to be copied on any correspondence about the
- case. The account that opens the case is already identified by
- passing the AWS Credentials in the HTTP POST method or in a
- method or function call from one of the programming languages
- supported by an `AWS SDK`_.
- #. **IssueType**. Indicates the type of issue for the case.
- You can specify either "customer-service" or "technical." If
- you do not indicate a value, this parameter defaults to
- "technical."
+ #. **Language.** The human language in which AWS Support
+ handles the case. English and Japanese are currently
+ supported.
+ #. **CcEmailAddresses.** The AWS Support Center **CC** field
+ on the `Open a new case`_ page. You can list email addresses
+ to be copied on any correspondence about the case. The account
+ that opens the case is already identified by passing the AWS
+ Credentials in the HTTP POST method or in a method or function
+ call from one of the programming languages supported by an
+ `AWS SDK`_.
+ #. **IssueType.** The type of issue for the case. You can
+ specify either "customer-service" or "technical." If you do
+ not indicate a value, the default is "technical."
+
The AWS Support API does not currently support the ability to
add attachments to cases. You can, however, call
- `AddCommunicationToCase`_ to add information to an open case.
+ AddCommunicationToCase to add information to an open case.
+
- A successful `CreateCase`_ request returns an AWS Support case
- number. Case numbers are used by `DescribeCases`_ request to
- retrieve existing AWS Support support cases.
+ A successful CreateCase request returns an AWS Support case
+ number. Case numbers are used by the DescribeCases action to
+ retrieve existing AWS Support cases.
:type subject: string
- :param subject: Title of the AWS Support case.
+ :param subject: The title of the AWS Support case.
:type service_code: string
- :param service_code: Code for the AWS service returned by the call to
- `DescribeServices`_.
+ :param service_code: The code for the AWS service returned by the call
+ to DescribeServices.
:type severity_code: string
:param severity_code:
- Code for the severity level returned by the call to
- `DescribeSeverityLevels`_.
+ The code for the severity level returned by the call to
+ DescribeSeverityLevels.
+
The availability of severity levels depends on each customer's support
subscription. In other words, your subscription may not necessarily
require the urgent level of response time.
:type category_code: string
- :param category_code: Specifies the category of problem for the AWS
- Support case.
+ :param category_code: The category of problem for the AWS Support case.
:type communication_body: string
- :param communication_body: Parameter that represents the communication
- body text when you create an AWS Support case by calling
- `CreateCase`_.
+ :param communication_body: The communication body text when you create
+ an AWS Support case by calling CreateCase.
:type cc_email_addresses: list
- :param cc_email_addresses: List of email addresses that AWS Support
+ :param cc_email_addresses: A list of email addresses that AWS Support
copies on case correspondence.
:type language: string
- :param language: Specifies the ISO 639-1 code for the language in which
- AWS provides support. AWS Support currently supports English and
- Japanese, for which the codes are en and ja , respectively.
- Language parameters must be passed explicitly for operations that
- take them.
+ :param language: The ISO 639-1 code for the language in which AWS
+ provides support. AWS Support currently supports English ("en") and
+ Japanese ("ja"). Language parameters must be passed explicitly for
+ operations that take them.
:type issue_type: string
- :param issue_type: Field passed as a parameter in a `CreateCase`_ call.
+ :param issue_type: The type of issue for the case. You can specify
+ either "customer-service" or "technical." If you do not indicate a
+ value, the default is "technical."
"""
params = {
@@ -266,51 +264,51 @@ class SupportConnection(AWSQueryConnection):
include_resolved_cases=None, next_token=None,
max_results=None, language=None):
"""
- This action returns a list of cases that you specify by
- passing one or more CaseIds. In addition, you can filter the
- cases by date by setting values for the AfterTime and
- BeforeTime request parameters.
+ Returns a list of cases that you specify by passing one or
+ more case IDs. In addition, you can filter the cases by date
+ by setting values for the `AfterTime` and `BeforeTime` request
+ parameters.
+
The response returns the following in JSON format:
- #. One or more `CaseDetails`_ data types.
- #. One or more NextToken objects, strings that specifies where
- to paginate the returned records represented by CaseDetails .
+
+ #. One or more CaseDetails data types.
+ #. One or more `NextToken` values, which specify where to
+ paginate the returned records represented by the `CaseDetails`
+ objects.
:type case_id_list: list
- :param case_id_list: A list of Strings comprising ID numbers for
- support cases you want returned. The maximum number of cases is
- 100.
+ :param case_id_list: A list of ID numbers of the support cases you want
+ returned. The maximum number of cases is 100.
:type display_id: string
- :param display_id: String that corresponds to the ID value displayed
- for a case in the AWS Support Center user interface.
+ :param display_id: The ID displayed for a case in the AWS Support
+ Center user interface.
:type after_time: string
- :param after_time: Start date for a filtered date search on support
+ :param after_time: The start date for a filtered date search on support
case communications.
:type before_time: string
- :param before_time: End date for a filtered date search on support case
- communications.
+ :param before_time: The end date for a filtered date search on support
+ case communications.
:type include_resolved_cases: boolean
- :param include_resolved_cases: Boolean that indicates whether or not
- resolved support cases should be listed in the `DescribeCases`_
- search.
+ :param include_resolved_cases: Specifies whether resolved support cases
+ should be included in the DescribeCases results.
:type next_token: string
- :param next_token: Defines a resumption point for pagination.
+ :param next_token: A resumption point for pagination.
:type max_results: integer
- :param max_results: Integer that sets the maximum number of results to
- return before paginating.
+ :param max_results: The maximum number of results to return before
+ paginating.
:type language: string
- :param language: Specifies the ISO 639-1 code for the language in which
- AWS provides support. AWS Support currently supports English and
- Japanese, for which the codes are en and ja , respectively.
- Language parameters must be passed explicitly for operations that
- take them.
+ :param language: The ISO 639-1 code for the language in which AWS
+ provides support. AWS Support currently supports English ("en") and
+ Japanese ("ja"). Language parameters must be passed explicitly for
+ operations that take them.
"""
params = {}
@@ -337,36 +335,35 @@ class SupportConnection(AWSQueryConnection):
after_time=None, next_token=None,
max_results=None):
"""
- This action returns communications regarding the support case.
- You can use the AfterTime and BeforeTime parameters to filter
- by date. The CaseId parameter enables you to identify a
- specific case by its CaseId number.
+ Returns communications regarding the support case. You can use
+ the `AfterTime` and `BeforeTime` parameters to filter by date.
+ The `CaseId` parameter enables you to identify a specific case
+ by its `CaseId` value.
- The MaxResults and NextToken parameters enable you to control
- the pagination of the result set. Set MaxResults to the number
- of cases you want displayed on each page, and use NextToken to
- specify the resumption of pagination.
+ The `MaxResults` and `NextToken` parameters enable you to
+ control the pagination of the result set. Set `MaxResults` to
+ the number of cases you want displayed on each page, and use
+ `NextToken` to specify the resumption of pagination.
:type case_id: string
- :param case_id: String that indicates the AWS Support caseID requested
- or returned in the call. The caseID is an alphanumeric string
- formatted as shown in this example CaseId:
- case-12345678910-2013-c4c1d2bf33c5cf47
+ :param case_id: The AWS Support case ID requested or returned in the
+ call. The case ID is an alphanumeric string formatted as shown in
+ this example: case- 12345678910-2013-c4c1d2bf33c5cf47
:type before_time: string
- :param before_time: End date for a filtered date search on support case
- communications.
+ :param before_time: The end date for a filtered date search on support
+ case communications.
:type after_time: string
- :param after_time: Start date for a filtered date search on support
+ :param after_time: The start date for a filtered date search on support
case communications.
:type next_token: string
- :param next_token: Defines a resumption point for pagination.
+ :param next_token: A resumption point for pagination.
:type max_results: integer
- :param max_results: Integer that sets the maximum number of results to
- return before paginating.
+ :param max_results: The maximum number of results to return before
+ paginating.
"""
params = {'caseId': case_id, }
@@ -385,7 +382,7 @@ class SupportConnection(AWSQueryConnection):
"""
Returns the current list of AWS services and a list of service
categories that applies to each one. You then use service
- names and categories in your `CreateCase`_ requests. Each AWS
+ names and categories in your CreateCase requests. Each AWS
service has its own set of categories.
The service codes and category codes correspond to the values
@@ -399,15 +396,14 @@ class SupportConnection(AWSQueryConnection):
category codes.
:type service_code_list: list
- :param service_code_list: List in JSON format of service codes
+ :param service_code_list: A JSON-formatted list of service codes
available for AWS services.
:type language: string
- :param language: Specifies the ISO 639-1 code for the language in which
- AWS provides support. AWS Support currently supports English and
- Japanese, for which the codes are en and ja , respectively.
- Language parameters must be passed explicitly for operations that
- take them.
+ :param language: The ISO 639-1 code for the language in which AWS
+ provides support. AWS Support currently supports English ("en") and
+ Japanese ("ja"). Language parameters must be passed explicitly for
+ operations that take them.
"""
params = {}
@@ -420,17 +416,16 @@ class SupportConnection(AWSQueryConnection):
def describe_severity_levels(self, language=None):
"""
- This action returns the list of severity levels that you can
- assign to an AWS Support case. The severity level for a case
- is also a field in the `CaseDetails`_ data type included in
- any `CreateCase`_ request.
+ Returns the list of severity levels that you can assign to an
+ AWS Support case. The severity level for a case is also a
+ field in the CaseDetails data type included in any CreateCase
+ request.
:type language: string
- :param language: Specifies the ISO 639-1 code for the language in which
- AWS provides support. AWS Support currently supports English and
- Japanese, for which the codes are en and ja , respectively.
- Language parameters must be passed explicitly for operations that
- take them.
+ :param language: The ISO 639-1 code for the language in which AWS
+ provides support. AWS Support currently supports English ("en") and
+ Japanese ("ja"). Language parameters must be passed explicitly for
+ operations that take them.
"""
params = {}
@@ -441,13 +436,12 @@ class SupportConnection(AWSQueryConnection):
def describe_trusted_advisor_check_refresh_statuses(self, check_ids):
"""
- Returns the status of all refresh requests Trusted Advisor
- checks called using `RefreshTrustedAdvisorCheck`_.
+ Returns the refresh status of the Trusted Advisor checks that
+ have the specified check IDs. Check IDs can be obtained by
+ calling DescribeTrustedAdvisorChecks.
:type check_ids: list
- :param check_ids: List of the CheckId values for the Trusted Advisor
- checks for which you want to refresh the status. You obtain the
- CheckId values by calling `DescribeTrustedAdvisorChecks`_.
+ :param check_ids: The IDs of the Trusted Advisor checks.
"""
params = {'checkIds': check_ids, }
@@ -456,41 +450,35 @@ class SupportConnection(AWSQueryConnection):
def describe_trusted_advisor_check_result(self, check_id, language=None):
"""
- This action responds with the results of a Trusted Advisor
- check. Once you have obtained the list of available Trusted
- Advisor checks by calling `DescribeTrustedAdvisorChecks`_, you
- specify the CheckId for the check you want to retrieve from
- AWS Support.
+ Returns the results of the Trusted Advisor check that has the
+ specified check ID. Check IDs can be obtained by calling
+ DescribeTrustedAdvisorChecks.
- The response for this action contains a JSON-formatted
- `TrustedAdvisorCheckResult`_ object
- , which is a container for the following three objects:
+ The response contains a TrustedAdvisorCheckResult object,
+ which contains these three objects:
+ + TrustedAdvisorCategorySpecificSummary
+ + TrustedAdvisorResourceDetail
+ + TrustedAdvisorResourcesSummary
- #. `TrustedAdvisorCategorySpecificSummary`_
- #. `TrustedAdvisorResourceDetail`_
- #. `TrustedAdvisorResourcesSummary`_
+ In addition, the response contains these fields:
- In addition, the response contains the following fields:
-
- #. **Status**. Overall status of the check.
- #. **Timestamp**. Time at which Trusted Advisor last ran the
- check.
- #. **CheckId**. Unique identifier for the specific check
- returned by the request.
+ + **Status.** The alert status of the check: "ok" (green),
+ "warning" (yellow), "error" (red), or "not_available".
+ + **Timestamp.** The time of the last refresh of the check.
+ + **CheckId.** The unique identifier for the check.
:type check_id: string
- :param check_id:
+ :param check_id: The unique identifier for the Trusted Advisor check.
:type language: string
- :param language: Specifies the ISO 639-1 code for the language in which
- AWS provides support. AWS Support currently supports English and
- Japanese, for which the codes are en and ja , respectively.
- Language parameters must be passed explicitly for operations that
- take them.
+ :param language: The ISO 639-1 code for the language in which AWS
+ provides support. AWS Support currently supports English ("en") and
+ Japanese ("ja"). Language parameters must be passed explicitly for
+ operations that take them.
"""
params = {'checkId': check_id, }
@@ -501,17 +489,15 @@ class SupportConnection(AWSQueryConnection):
def describe_trusted_advisor_check_summaries(self, check_ids):
"""
- This action enables you to get the latest summaries for
- Trusted Advisor checks that you specify in your request. You
- submit the list of Trusted Advisor checks for which you want
- summaries. You obtain these CheckIds by submitting a
- `DescribeTrustedAdvisorChecks`_ request.
+ Returns the summaries of the results of the Trusted Advisor
+ checks that have the specified check IDs. Check IDs can be
+ obtained by calling DescribeTrustedAdvisorChecks.
- The response body contains an array of
- `TrustedAdvisorCheckSummary`_ objects.
+ The response contains an array of TrustedAdvisorCheckSummary
+ objects.
:type check_ids: list
- :param check_ids: Unique identifier for a Trusted Advisor check.
+ :param check_ids: The IDs of the Trusted Advisor checks.
"""
params = {'checkIds': check_ids, }
@@ -520,18 +506,17 @@ class SupportConnection(AWSQueryConnection):
def describe_trusted_advisor_checks(self, language):
"""
- This action enables you to get a list of the available Trusted
- Advisor checks. You must specify a language code. English
- ("en") and Japanese ("jp") are currently supported. The
- response contains a list of `TrustedAdvisorCheckDescription`_
- objects.
+ Returns information about all available Trusted Advisor
+ checks, including name, ID, category, description, and
+ metadata. You must specify a language code; English ("en") and
+ Japanese ("ja") are currently supported. The response contains
+ a TrustedAdvisorCheckDescription for each check.
:type language: string
- :param language: Specifies the ISO 639-1 code for the language in which
- AWS provides support. AWS Support currently supports English and
- Japanese, for which the codes are en and ja , respectively.
- Language parameters must be passed explicitly for operations that
- take them.
+ :param language: The ISO 639-1 code for the language in which AWS
+ provides support. AWS Support currently supports English ("en") and
+ Japanese ("ja"). Language parameters must be passed explicitly for
+ operations that take them.
"""
params = {'language': language, }
@@ -540,14 +525,22 @@ class SupportConnection(AWSQueryConnection):
def refresh_trusted_advisor_check(self, check_id):
"""
- This action enables you to query the service to request a
- refresh for a specific Trusted Advisor check. Your request
- body contains a CheckId for which you are querying. The
- response body contains a `RefreshTrustedAdvisorCheckResult`_
- object containing Status and TimeUntilNextRefresh fields.
+ Requests a refresh of the Trusted Advisor check that has the
+ specified check ID. Check IDs can be obtained by calling
+ DescribeTrustedAdvisorChecks.
+
+ The response contains a RefreshTrustedAdvisorCheckResult
+ object, which contains these fields:
+
+
+ + **Status.** The refresh status of the check: "none",
+ "enqueued", "processing", "success", or "abandoned".
+ + **MillisUntilNextRefreshable.** The amount of time, in
+ milliseconds, until the check is eligible for refresh.
+ + **CheckId.** The unique identifier for the check.
:type check_id: string
- :param check_id:
+ :param check_id: The unique identifier for the Trusted Advisor check.
"""
params = {'checkId': check_id, }
@@ -556,15 +549,14 @@ class SupportConnection(AWSQueryConnection):
def resolve_case(self, case_id=None):
"""
- Takes a CaseId and returns the initial state of the case along
- with the state of the case after the call to `ResolveCase`_
+ Takes a `CaseId` and returns the initial state of the case
+ along with the state of the case after the call to ResolveCase
completed.
:type case_id: string
- :param case_id: String that indicates the AWS Support caseID requested
- or returned in the call. The caseID is an alphanumeric string
- formatted as shown in this example CaseId:
- case-12345678910-2013-c4c1d2bf33c5cf47
+ :param case_id: The AWS Support case ID requested or returned in the
+ call. The case ID is an alphanumeric string formatted as shown in
+ this example: case- 12345678910-2013-c4c1d2bf33c5cf47
"""
params = {}
diff --git a/docs/source/index.rst b/docs/source/index.rst
index be44098b..b8d842f7 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -116,6 +116,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.23.0
releasenotes/v2.22.1
releasenotes/v2.22.0
releasenotes/v2.21.2
diff --git a/docs/source/ref/autoscale.rst b/docs/source/ref/autoscale.rst
index f3728067..aed3d526 100644
--- a/docs/source/ref/autoscale.rst
+++ b/docs/source/ref/autoscale.rst
@@ -60,3 +60,11 @@ boto.ec2.autoscale.scheduled
.. automodule:: boto.ec2.autoscale.scheduled
:members:
:undoc-members:
+
+
+boto.ec2.autoscale.tag
+----------------------------
+
+.. automodule:: boto.ec2.autoscale.tag
+ :members:
+ :undoc-members:
diff --git a/docs/source/releasenotes/v2.23.0.rst b/docs/source/releasenotes/v2.23.0.rst
new file mode 100644
index 00000000..f30473d5
--- /dev/null
+++ b/docs/source/releasenotes/v2.23.0.rst
@@ -0,0 +1,49 @@
+boto v2.23.0
+============
+
+:date: 2014/01/10
+
+This release adds new pagination & date range filtering to Amazon Glacier, more
+support for selecting specific attributes within Amazon DynamoDB, security
+tokens from environment/config variables & many bugfixes/small improvements.
+
+
+Features
+--------
+
+* Added pagination & date range filtering to Glacier inventory options.
+ (:issue:`1977`, :sha:`402a305`)
+* Added the ability to select the specific attributes to fetch in the ``scan``
+ & ``get_item`` calls within DynamoDB v2. (:issue:`1945`, :issue:`1972`,
+ :sha:`f6451fb` & :sha:`96cd413`)
+* Allow getting a security token from either an environment or configuration
+ variable. (:issue:``, :sha:``)
+* Ported the ``has_item`` call from the original DynamoDB (v1) module to
+ DynamoDB v2. (:issue:`1973`, :issue:`1822`, :sha:`f96e9e3`)
+* Added an ``associate_address_object`` method to EC2. (:issue:`1967`,
+ :issue:`1874`, :issue:`1893`, :sha:`dd6180c`)
+* Added a ``download_to_fileobj`` method to Glacier,similar to the S3 call
+ of the same name. (:issue:`1960`, :issue:`1941`, :sha:`67266e5`)
+* Added support for arbitrary ``dict`` inputs to MWS. (:issue:`1966`,
+ :sha:`46f193f`)
+
+
+Bugfixes
+--------
+
+* Made the usage of ``is/is not`` more consistent. (:issue:`1930`,
+ :sha:`8597c54`)
+* Imported ``with_statement`` for old Python versions (:issue:`1975`,
+ :sha:`a53a574`)
+* Changed the ``Binary`` data object within DynamoDB to throw an error if an
+ invalid data type is used. (:issue:`1963`, :issue:`1956`, :sha:`e5d30c8`)
+* Altered the integration tests to avoid connection errors to certain regions.
+ (:sha:`2555b8a`)
+* Changed the GCS resumable upload handler to save tracker files with protection
+ 0600. (:sha:`7cb344c`)
+* Documentation:
+
+ * Clarified documentation around the ``list_metrics`` call in
+ CloudFormation. (:issue:`1962`, :sha:`c996a72`)
+ * Added ``Tag`` to the Autoscale API docs. (:issue:`1964`, :sha:`31118d9`)
+ * Updated the AWS Support documentation to the latest. (:sha:`29f9264`)
diff --git a/scripts/git-release-notes.py b/scripts/git-release-notes.py
index 5e4c3acb..3079f845 100755
--- a/scripts/git-release-notes.py
+++ b/scripts/git-release-notes.py
@@ -51,8 +51,11 @@ removals = [
changes = ''
for commit, message in commit_list:
append = []
+ issues = set()
for issue in ISSUE.findall(message):
- append.append(':issue:`{issue}`'.format(issue=issue))
+ if issue not in issues:
+ append.append(':issue:`{issue}`'.format(issue=issue))
+ issues.add(issue)
append.append(':sha:`{commit}`'.format(commit=commit))
append = ' (' + ', '.join(append) + ')'
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
index 310043f2..b571d9a0 100644
--- a/tests/integration/__init__.py
+++ b/tests/integration/__init__.py
@@ -42,21 +42,21 @@ class ServiceCertVerificationTest(object):
self.assertTrue(len(self.regions) > 0)
for region in self.regions:
+ special_access_required = False
+
+ for snippet in ('gov', 'cn-'):
+ if snippet in region.name:
+ special_access_required = True
+ break
+
try:
c = region.connect()
self.sample_service_call(c)
- except (socket.gaierror, httplib.BadStatusLine):
+ except:
# This is bad (because the SSL cert failed). Re-raise the
# exception.
- raise
- except:
- if 'gov' in region.name:
- # Ignore it. GovCloud accounts require special permission
- # to use.
- continue
-
- # Anything else is bad. Re-raise.
- raise
+ if not special_access_required:
+ raise
def sample_service_call(self, conn):
"""
diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py
index 580d306f..40ddd573 100644
--- a/tests/integration/dynamodb2/test_highlevel.py
+++ b/tests/integration/dynamodb2/test_highlevel.py
@@ -324,6 +324,16 @@ class DynamoDBv2Test(unittest.TestCase):
})
self.assertTrue(penny_created)
+ # Test attributes.
+ mau5 = users.get_item(
+ username='mau5',
+ friend_count=2,
+ attributes=['username', 'first_name']
+ )
+ self.assertEqual(mau5['username'], 'mau5')
+ self.assertEqual(mau5['first_name'], 'dead')
+ self.assertTrue('last_name' not in mau5)
+
def test_unprocessed_batch_writes(self):
# Create a very limited table w/ low throughput.
users = Table.create('slow_users', schema=[
diff --git a/tests/unit/dynamodb/test_types.py b/tests/unit/dynamodb/test_types.py
index 99ffa6d9..9ef0c986 100644
--- a/tests/unit/dynamodb/test_types.py
+++ b/tests/unit/dynamodb/test_types.py
@@ -78,5 +78,17 @@ class TestDynamizer(unittest.TestCase):
self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}),
set([1.1, 2.2, 3.3]))
+
+class TestBinary(unittest.TestCase):
+ def test_bad_input(self):
+ with self.assertRaises(TypeError):
+ data = types.Binary(1)
+
+ def test_good_input(self):
+ data = types.Binary(chr(1))
+
+ self.assertEqual('\x01', str(data))
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index d6631ffe..b7b40b95 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -11,6 +11,7 @@ from boto.dynamodb2.results import ResultSet, BatchGetResultSet
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import (STRING, NUMBER,
FILTER_OPERATORS, QUERY_OPERATORS)
+from boto.exception import JSONResponseError
FakeDynamoDBConnection = mock.create_autospec(DynamoDBConnection)
@@ -1497,7 +1498,46 @@ class TableTestCase(unittest.TestCase):
mock_get_item.assert_called_once_with('users', {
'username': {'S': 'johndoe'}
- }, consistent_read=False)
+ }, consistent_read=False, attributes_to_get=None)
+
+ with mock.patch.object(
+ self.users.connection,
+ 'get_item',
+ return_value=expected) as mock_get_item:
+ item = self.users.get_item(username='johndoe', attributes=[
+ 'username',
+ 'first_name',
+ ])
+
+ mock_get_item.assert_called_once_with('users', {
+ 'username': {'S': 'johndoe'}
+ }, consistent_read=False, attributes_to_get=['username', 'first_name'])
+
+ def test_has_item(self):
+ expected = {
+ 'Item': {
+ 'username': {'S': 'johndoe'},
+ 'first_name': {'S': 'John'},
+ 'last_name': {'S': 'Doe'},
+ 'date_joined': {'N': '1366056668'},
+ 'friend_count': {'N': '3'},
+ 'friends': {'SS': ['alice', 'bob', 'jane']},
+ }
+ }
+
+ with mock.patch.object(
+ self.users.connection,
+ 'get_item',
+ return_value=expected) as mock_get_item:
+ found = self.users.has_item(username='johndoe')
+ self.assertTrue(found)
+
+ with mock.patch.object(
+ self.users.connection,
+ 'get_item') as mock_get_item:
+ mock_get_item.side_effect = JSONResponseError("Nope.", None, None)
+ found = self.users.has_item(username='mrsmith')
+ self.assertFalse(found)
def test_lookup_hash(self):
"""Tests the "lookup" function with just a hash key"""
@@ -2162,6 +2202,7 @@ class TableTestCase(unittest.TestCase):
},
limit=2,
segment=None,
+ attributes_to_get=None,
total_segments=None
)
@@ -2204,6 +2245,7 @@ class TableTestCase(unittest.TestCase):
},
},
segment=None,
+ attributes_to_get=None,
total_segments=None
)
@@ -2356,6 +2398,40 @@ class TableTestCase(unittest.TestCase):
self.assertEqual(mock_scan_2.call_count, 1)
+
+ def test_scan_with_specific_attributes(self):
+ items_1 = {
+ 'results': [
+ Item(self.users, data={
+ 'username': 'johndoe',
+ }),
+ Item(self.users, data={
+ 'username': 'jane',
+ }),
+ ],
+ 'last_key': 'jane',
+ }
+
+ results = self.users.scan(attributes=['username'])
+ self.assertTrue(isinstance(results, ResultSet))
+ self.assertEqual(len(results._results), 0)
+ self.assertEqual(results.the_callable, self.users._scan)
+
+ with mock.patch.object(
+ results,
+ 'the_callable',
+ return_value=items_1) as mock_query:
+ res_1 = results.next()
+ # Now it should be populated.
+ self.assertEqual(len(results._results), 2)
+ self.assertEqual(res_1['username'], 'johndoe')
+ self.assertEqual(res_1.keys(), ['username'])
+ res_2 = results.next()
+ self.assertEqual(res_2['username'], 'jane')
+
+ self.assertEqual(mock_query.call_count, 1)
+
+
def test_count(self):
expected = {
"Table": {
diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py
index 3c50e911..4ebece28 100644
--- a/tests/unit/ec2/test_connection.py
+++ b/tests/unit/ec2/test_connection.py
@@ -1355,5 +1355,50 @@ class TestSignatureAlteration(TestEC2ConnectionBase):
)
+class TestAssociateAddress(TestEC2ConnectionBase):
+ def default_body(self):
+ return """
+ <AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
+ <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
+ <return>true</return>
+ <associationId>eipassoc-fc5ca095</associationId>
+ </AssociateAddressResponse>
+ """
+
+ def test_associate_address(self):
+ self.set_http_response(status_code=200)
+ result = self.ec2.associate_address(instance_id='i-1234',
+ public_ip='192.0.2.1')
+ self.assertEqual(True, result)
+
+ def test_associate_address_object(self):
+ self.set_http_response(status_code=200)
+ result = self.ec2.associate_address_object(instance_id='i-1234',
+ public_ip='192.0.2.1')
+ self.assertEqual('eipassoc-fc5ca095', result.association_id)
+
+
+class TestAssociateAddressFail(TestEC2ConnectionBase):
+ def default_body(self):
+ return """
+ <Response>
+ <Errors>
+ <Error>
+ <Code>InvalidInstanceID.NotFound</Code>
+ <Message>The instance ID 'i-4cbc822a' does not exist</Message>
+ </Error>
+ </Errors>
+ <RequestID>ea966190-f9aa-478e-9ede-cb5432daacc0</RequestID>
+ <StatusCode>Failure</StatusCode>
+ </Response>
+ """
+
+ def test_associate_address(self):
+ self.set_http_response(status_code=200)
+ result = self.ec2.associate_address(instance_id='i-1234',
+ public_ip='192.0.2.1')
+ self.assertEqual(False, result)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/glacier/test_job.py b/tests/unit/glacier/test_job.py
index 277fb853..5f77beb6 100644
--- a/tests/unit/glacier/test_job.py
+++ b/tests/unit/glacier/test_job.py
@@ -19,6 +19,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+from StringIO import StringIO
from tests.unit import unittest
import mock
@@ -55,6 +56,27 @@ class TestJob(unittest.TestCase):
# With validate_checksum set to False, this call succeeds.
self.job.get_output(byte_range=(1, 1024), validate_checksum=False)
+ def test_download_to_fileobj(self):
+ http_response=mock.Mock(read=mock.Mock(return_value='xyz'))
+ response = GlacierResponse(http_response, None)
+ response['TreeHash'] = 'tree_hash'
+ self.api.get_job_output.return_value = response
+ fileobj = StringIO()
+ self.job.archive_size = 3
+ with mock.patch('boto.glacier.job.tree_hash_from_str') as t:
+ t.return_value = 'tree_hash'
+ self.job.download_to_fileobj(fileobj)
+ fileobj.seek(0)
+ self.assertEqual(http_response.read.return_value, fileobj.read())
+
+ def test_calc_num_chunks(self):
+ self.job.archive_size = 0
+ self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 0)
+ self.job.archive_size = 1
+ self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 1)
+ self.job.archive_size = self.job.DefaultPartSize + 1
+ self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 2)
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/glacier/test_layer2.py b/tests/unit/glacier/test_layer2.py
index 854904e0..0c696880 100644
--- a/tests/unit/glacier/test_layer2.py
+++ b/tests/unit/glacier/test_layer2.py
@@ -33,6 +33,8 @@ from boto.glacier.vault import Job
from StringIO import StringIO
+from datetime import datetime
+
# Some fixture data from the Glacier docs
FIXTURE_VAULT = {
"CreationDate" : "2012-02-20T17:01:45.198Z",
@@ -208,6 +210,21 @@ class TestVault(GlacierLayer2Base):
self.mock_layer1.delete_archive.assert_called_with("examplevault",
"archive")
+ def test_initiate_job(self):
+ self.mock_layer1.initiate_job.return_value = {'JobId': 'job-id'}
+ self.vault.retrieve_inventory(start_date=datetime(2014, 01, 01),
+ end_date=datetime(2014, 01, 02),
+ limit=100)
+ self.mock_layer1.initiate_job.assert_called_with(
+ 'examplevault', {
+ 'Type': 'inventory-retrieval',
+ 'InventoryRetrievalParameters': {
+ 'StartDate': '2014-01-01T00:00:00',
+ 'EndDate': '2014-01-02T00:00:00',
+ 'Limit': 100
+ }
+ })
+
def test_get_job(self):
self.mock_layer1.describe_job.return_value = FIXTURE_ARCHIVE_JOB
job = self.vault.get_job(
diff --git a/tests/unit/mws/test_connection.py b/tests/unit/mws/test_connection.py
index 58f43ad1..c23f4c26 100755
--- a/tests/unit/mws/test_connection.py
+++ b/tests/unit/mws/test_connection.py
@@ -20,7 +20,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-from boto.mws.connection import MWSConnection, api_call_map
+from boto.mws.connection import MWSConnection, api_call_map, destructure_object
+from boto.mws.response import ResponseElement
from tests.unit import AWSMockServiceTestCase
@@ -48,6 +49,29 @@ doc/2009-01-01/">
</ResponseMetadata>
</GetFeedSubmissionListResponse>"""
+ def test_destructure_object(self):
+ # Test that parsing of user input to Amazon input works.
+ response = ResponseElement(name='Prefix')
+ response.C = 'four'
+ response.D = 'five'
+ inputs = [
+ ('A', 'B'), ['B', 'A'], set(['C']),
+ False, 'String', {'A': 'one', 'B': 'two'},
+ response,
+ ]
+ outputs = [
+ {'Prefix.1': 'A', 'Prefix.2': 'B'},
+ {'Prefix.1': 'B', 'Prefix.2': 'A'},
+ {'Prefix.1': 'C'},
+ {'Prefix': 'false'}, {'Prefix': 'String'},
+ {'Prefix.A': 'one', 'Prefix.B': 'two'},
+ {'Prefix.C': 'four', 'Prefix.D': 'five'},
+ ]
+ for user, amazon in zip(inputs, outputs):
+ result = {}
+ destructure_object(user, result, prefix='Prefix')
+ self.assertEqual(result, amazon)
+
def test_built_api_call_map(self):
# Ensure that the map is populated.
# It starts empty, but the decorators should add to it as they're
@@ -63,7 +87,8 @@ doc/2009-01-01/">
func = self.service_connection.method_for('GetFeedSubmissionList')
# Ensure the right name was found.
self.assertTrue(callable(func))
- self.assertEqual(func, self.service_connection.get_feed_submission_list)
+ ideal = self.service_connection.get_feed_submission_list
+ self.assertEqual(func, ideal)
# Check a non-existent action.
func = self.service_connection.method_for('NotHereNorThere')
diff --git a/tests/unit/provider/test_provider.py b/tests/unit/provider/test_provider.py
index 363f247d..20fd0efe 100644
--- a/tests/unit/provider/test_provider.py
+++ b/tests/unit/provider/test_provider.py
@@ -71,6 +71,15 @@ class TestProvider(unittest.TestCase):
self.assertEqual(p.secret_key, 'env_secret_key')
self.assertIsNone(p.security_token)
+ def test_environment_variable_aws_security_token(self):
+ self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
+ self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
+ self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token'
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'env_access_key')
+ self.assertEqual(p.secret_key, 'env_secret_key')
+ self.assertEqual(p.security_token, 'env_security_token')
+
def test_config_values_are_used(self):
self.config = {
'Credentials': {
@@ -83,6 +92,19 @@ class TestProvider(unittest.TestCase):
self.assertEqual(p.secret_key, 'cfg_secret_key')
self.assertIsNone(p.security_token)
+ def test_config_value_security_token_is_used(self):
+ self.config = {
+ 'Credentials': {
+ 'aws_access_key_id': 'cfg_access_key',
+ 'aws_secret_access_key': 'cfg_secret_key',
+ 'aws_security_token': 'cfg_security_token',
+ }
+ }
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'cfg_access_key')
+ self.assertEqual(p.secret_key, 'cfg_secret_key')
+ self.assertEqual(p.security_token, 'cfg_security_token')
+
def test_keyring_is_used(self):
self.config = {
'Credentials': {
@@ -124,6 +146,22 @@ class TestProvider(unittest.TestCase):
self.assertEqual(p.secret_key, 'env_secret_key')
self.assertIsNone(p.security_token)
+ def test_env_vars_security_token_beats_config_values(self):
+ self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
+ self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
+ self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token'
+ self.config = {
+ 'Credentials': {
+ 'aws_access_key_id': 'cfg_access_key',
+ 'aws_secret_access_key': 'cfg_secret_key',
+ 'aws_security_token': 'cfg_security_token',
+ }
+ }
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'env_access_key')
+ self.assertEqual(p.secret_key, 'env_secret_key')
+ self.assertEqual(p.security_token, 'env_security_token')
+
def test_metadata_server_credentials(self):
self.get_instance_metadata.return_value = INSTANCE_CONFIG
p = provider.Provider('aws')