summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Lindsley <daniel@toastdriven.com>2013-07-08 22:15:08 -0700
committerDaniel Lindsley <daniel@toastdriven.com>2013-07-08 22:15:08 -0700
commitf3fc2c57c85c6664d7ef67e3c4a806fe746ea8a1 (patch)
treed3846190a80bca7e1e99e7ef4b157e003c1d6ddd
parent25be2b7de7065a32439fa13d0e88fc30b4788381 (diff)
parenta49e719bd4221986845355ce013afea717acf01a (diff)
downloadboto-f3fc2c57c85c6664d7ef67e3c4a806fe746ea8a1.tar.gz
Merge branch 'release-2.9.7'2.9.7
* release-2.9.7: (23 commits) Bumping version to 2.9.7 A bit more release notes cleanup. Added release notes for v2.9.7. Updated Elastic Transcoder for VBR, max frame rate & watermark changes. Fixed more Cloudformation timestamp changes. Added tests for more Cloudformation timestamp changes. and make sure we simulate the behavior of the actual ws call never return more than 'limit' results from a query Fixed an error in the ``get_key_pair`` docstring. Corrected some imports in the DDBv2 tutorial. Fixed an error in the S3 tutorial examples. Fix for CF resource timestamps with milliseconds Make upload header handling case-insensitive. Fix error in docs for get_all_tags. Return more information about gs key generations for parallel composite uploads. Removed StorageUri parse check for lone ':' (interferes with using filenames containing :) Backfilled the release notes from the wiki. Added a note about considering DynamoDB. Updated RDS to use SigV4. Small test cleanup from the previous merge. ...
-rw-r--r--README.rst4
-rw-r--r--boto/__init__.py14
-rw-r--r--boto/cloudformation/stack.py27
-rw-r--r--boto/connection.py2
-rw-r--r--boto/dynamodb2/results.py6
-rw-r--r--boto/dynamodb2/table.py72
-rw-r--r--boto/ec2/connection.py8
-rw-r--r--boto/elastictranscoder/layer1.py228
-rw-r--r--boto/gs/key.py6
-rw-r--r--boto/gs/resumable_upload_handler.py1
-rw-r--r--boto/rds/__init__.py2
-rw-r--r--boto/s3/key.py26
-rwxr-xr-xboto/storage_uri.py4
-rw-r--r--boto/utils.py21
-rw-r--r--docs/source/dynamodb2_tut.rst4
-rw-r--r--docs/source/index.rst20
-rw-r--r--docs/source/releasenotes/v2.0.0.rst135
-rw-r--r--docs/source/releasenotes/v2.0b1.rst14
-rw-r--r--docs/source/releasenotes/v2.1.0.rst115
-rw-r--r--docs/source/releasenotes/v2.1.1.rst7
-rw-r--r--docs/source/releasenotes/v2.2.0.rst89
-rw-r--r--docs/source/releasenotes/v2.2.1.rst6
-rw-r--r--docs/source/releasenotes/v2.2.2.rst31
-rw-r--r--docs/source/releasenotes/v2.3.0.rst47
-rw-r--r--docs/source/releasenotes/v2.4.0.rst60
-rw-r--r--docs/source/releasenotes/v2.5.0.rst39
-rw-r--r--docs/source/releasenotes/v2.5.1.rst6
-rw-r--r--docs/source/releasenotes/v2.5.2.rst9
-rw-r--r--docs/source/releasenotes/v2.6.0.rst101
-rw-r--r--docs/source/releasenotes/v2.7.0.rst91
-rw-r--r--docs/source/releasenotes/v2.8.0.rst45
-rw-r--r--docs/source/releasenotes/v2.9.0.rst56
-rw-r--r--docs/source/releasenotes/v2.9.7.rst40
-rw-r--r--docs/source/s3_tut.rst2
-rw-r--r--docs/source/simpledb_tut.rst37
-rw-r--r--tests/integration/dynamodb2/test_highlevel.py10
-rw-r--r--tests/unit/cloudformation/test_stack.py154
-rw-r--r--tests/unit/dynamodb2/test_table.py81
-rw-r--r--tests/unit/rds/test_connection.py3
-rw-r--r--tests/unit/s3/test_uri.py7
40 files changed, 1511 insertions, 119 deletions
diff --git a/README.rst b/README.rst
index 98e92ccf..13c67d8f 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.9.6
+boto 2.9.7
-Released: 18-June-2013
+Released: 08-July-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
diff --git a/boto/__init__.py b/boto/__init__.py
index c32e4968..033d24b8 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -36,7 +36,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.9.6'
+__version__ = '2.9.7'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
@@ -765,18 +765,6 @@ def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
# (the latter includes an optional host/net location part).
end_scheme_idx = uri_str.find('://')
if end_scheme_idx == -1:
- # Check for common error: user specifies gs:bucket instead
- # of gs://bucket. Some URI parsers allow this, but it can cause
- # confusion for callers, so we don't.
- colon_pos = uri_str.find(':')
- if colon_pos != -1:
- # Allow Windows path names including drive letter (C: etc.)
- drive_char = uri_str[0].lower()
- if not (platform.system().lower().startswith('windows')
- and colon_pos == 1
- and drive_char >= 'a' and drive_char <= 'z'):
- raise InvalidUriError('"%s" contains ":" instead of "://"' %
- uri_str)
scheme = default_scheme.lower()
path = uri_str
else:
diff --git a/boto/cloudformation/stack.py b/boto/cloudformation/stack.py
index 289e18f4..5d35e891 100644
--- a/boto/cloudformation/stack.py
+++ b/boto/cloudformation/stack.py
@@ -126,9 +126,15 @@ class StackSummary(object):
elif name == 'StackName':
self.stack_name = value
elif name == 'CreationTime':
- self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ try:
+ self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ except ValueError:
+ self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == "DeletionTime":
- self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ try:
+ self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ except ValueError:
+ self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'TemplateDescription':
self.template_description = value
elif name == "member":
@@ -271,7 +277,10 @@ class StackResource(object):
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
- self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ try:
+ self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ except ValueError:
+ self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
setattr(self, name, value)
@@ -295,8 +304,16 @@ class StackResourceSummary(object):
def endElement(self, name, value, connection):
if name == "LastUpdatedTimestamp":
- self.last_updated_timestamp = datetime.strptime(value,
- '%Y-%m-%dT%H:%M:%SZ')
+ try:
+ self.last_updated_timestamp = datetime.strptime(
+ value,
+ '%Y-%m-%dT%H:%M:%SZ'
+ )
+ except ValueError:
+ self.last_updated_timestamp = datetime.strptime(
+ value,
+ '%Y-%m-%dT%H:%M:%S.%fZ'
+ )
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
diff --git a/boto/connection.py b/boto/connection.py
index 1f7392c1..375a9ca7 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -120,7 +120,7 @@ class HostConnectionPool(object):
Thread Safety:
- This class is used only fram ConnectionPool while it's mutex
+ This class is used only from ConnectionPool while it's mutex
is held.
"""
diff --git a/boto/dynamodb2/results.py b/boto/dynamodb2/results.py
index bcd855c3..23f64046 100644
--- a/boto/dynamodb2/results.py
+++ b/boto/dynamodb2/results.py
@@ -120,7 +120,11 @@ class ResultSet(object):
# Decrease the limit, if it's present.
if self.call_kwargs.get('limit'):
self.call_kwargs['limit'] -= len(results['results'])
-
+ # and if limit hits zero, we don't have any more
+ # results to look for
+ if 0 == self.call_kwargs['limit']:
+ self._results_left = False
+
class BatchGetResultSet(ResultSet):
def __init__(self, *args, **kwargs):
diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py
index 36f918ed..c1de437d 100644
--- a/boto/dynamodb2/table.py
+++ b/boto/dynamodb2/table.py
@@ -644,7 +644,7 @@ class Table(object):
return filters
def query(self, limit=None, index=None, reverse=False, consistent=False,
- **filter_kwargs):
+ attributes=None, **filter_kwargs):
"""
Queries for a set of matching items in a DynamoDB table.
@@ -674,6 +674,11 @@ class Table(object):
the data (more expensive). (Default: ``False`` - use eventually
consistent reads)
+ Optionally accepts a ``attributes`` parameter, which should be a
+ tuple. If you provide any attributes only these will be fetched
+ from DynamoDB. This uses the ``AttributesToGet`` and set's
+ ``Select`` to ``SPECIFIC_ATTRIBUTES`` API.
+
Returns a ``ResultSet``, which transparently handles the pagination of
results you get back.
@@ -719,6 +724,11 @@ class Table(object):
"You must specify more than one key to filter on."
)
+ if attributes is not None:
+ select = 'SPECIFIC_ATTRIBUTES'
+ else:
+ select = None
+
results = ResultSet()
kwargs = filter_kwargs.copy()
kwargs.update({
@@ -726,12 +736,68 @@ class Table(object):
'index': index,
'reverse': reverse,
'consistent': consistent,
+ 'select': select,
+ 'attributes_to_get': attributes
})
results.to_call(self._query, **kwargs)
return results
+ def query_count(self, index=None, consistent=False, **filter_kwargs):
+ """
+ Queries the exact count of matching items in a DynamoDB table.
+
+ Queries can be performed against a hash key, a hash+range key or
+ against any data stored in your local secondary indexes.
+
+ To specify the filters of the items you'd like to get, you can specify
+ the filters as kwargs. Each filter kwarg should follow the pattern
+ ``<fieldname>__<filter_operation>=<value_to_look_for>``.
+
+ Optionally accepts an ``index`` parameter, which should be a string of
+ name of the local secondary index you want to query against.
+ (Default: ``None``)
+
+ Optionally accepts a ``consistent`` parameter, which should be a
+ boolean. If you provide ``True``, it will force a consistent read of
+ the data (more expensive). (Default: ``False`` - use eventually
+ consistent reads)
+
+ Returns an integer which represents the exact amount of matched
+ items.
+
+ Example::
+
+ # Look for last names equal to "Doe".
+ >>> users.query_count(last_name__eq='Doe')
+ 5
+
+ # Use an LSI & a consistent read.
+ >>> users.query_count(
+ ... date_joined__gte=1236451000,
+ ... owner__eq=1,
+ ... index='DateJoinedIndex',
+ ... consistent=True
+ ... )
+ 2
+
+ """
+ key_conditions = self._build_filters(
+ filter_kwargs,
+ using=QUERY_OPERATORS
+ )
+
+ raw_results = self.connection.query(
+ self.table_name,
+ index_name=index,
+ consistent_read=consistent,
+ select='COUNT',
+ key_conditions=key_conditions,
+ )
+ return int(raw_results.get('Count', 0))
+
def _query(self, limit=None, index=None, reverse=False, consistent=False,
- exclusive_start_key=None, **filter_kwargs):
+ exclusive_start_key=None, select=None, attributes_to_get=None,
+ **filter_kwargs):
"""
The internal method that performs the actual queries. Used extensively
by ``ResultSet`` to perform each (paginated) request.
@@ -741,6 +807,8 @@ class Table(object):
'index_name': index,
'scan_index_forward': reverse,
'consistent_read': consistent,
+ 'select': select,
+ 'attributes_to_get': attributes_to_get
}
if exclusive_start_key:
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index 7752d230..3d6b0205 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -2144,8 +2144,8 @@ class EC2Connection(AWSQueryConnection):
"""
Convenience method to retrieve a specific keypair (KeyPair).
- :type image_id: string
- :param image_id: the ID of the Image to retrieve
+ :type keyname: string
+ :param keyname: The name of the keypair to retrieve
:rtype: :class:`boto.ec2.keypair.KeyPair`
:return: The KeyPair specified or None if it is not found
@@ -3233,8 +3233,8 @@ class EC2Connection(AWSQueryConnection):
being performed. Check the EC2 API guide
for details.
- :rtype: dict
- :return: A dictionary containing metadata tags
+ :rtype: list
+ :return: A list of :class:`boto.ec2.tag.Tag` objects
"""
params = {}
if filters:
diff --git a/boto/elastictranscoder/layer1.py b/boto/elastictranscoder/layer1.py
index 0a225109..d741530a 100644
--- a/boto/elastictranscoder/layer1.py
+++ b/boto/elastictranscoder/layer1.py
@@ -63,15 +63,14 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def cancel_job(self, id=None):
"""
- To cancel a job, send a DELETE request to the
- `/2012-09-25/jobs/ [jobId] ` resource.
+ The CancelJob operation cancels an unfinished job.
You can only cancel a job that has a status of `Submitted`. To
prevent a pipeline from starting to process a job while you're
getting the job identifier, use UpdatePipelineStatus to
temporarily pause the pipeline.
:type id: string
- :param id: The identifier of the job that you want to delete.
+ :param id: The identifier of the job that you want to cancel.
To get a list of the jobs (including their `jobId`) that have a status
of `Submitted`, use the ListJobsByStatus API action.
@@ -82,9 +81,6 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def create_job(self, pipeline_id=None, input_name=None, output=None,
outputs=None, output_key_prefix=None, playlists=None):
"""
- To create a job, send a POST request to the `/2012-09-25/jobs`
- resource.
-
When you create a job, Elastic Transcoder returns JSON data
that includes the values that you specified plus information
about the job that is created.
@@ -107,7 +103,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
information about the file that is being transcoded.
:type output: dict
- :param output:
+ :param output: The `CreateJobOutput` structure.
:type outputs: list
:param outputs: A section of the request body that provides information
@@ -149,8 +145,8 @@ class ElasticTranscoderConnection(AWSAuthConnection):
output_bucket=None, role=None, notifications=None,
content_config=None, thumbnail_config=None):
"""
- To create a pipeline, send a POST request to the
- `2012-09-25/pipelines` resource.
+ The CreatePipeline operation creates a pipeline with settings
+ that you specify.
:type name: string
:param name: The name of the pipeline. We recommend that the name be
@@ -364,10 +360,10 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def create_preset(self, name=None, description=None, container=None,
video=None, audio=None, thumbnails=None):
"""
- To create a preset, send a POST request to the
- `/2012-09-25/presets` resource.
- Elastic Transcoder checks the settings that you specify to
- ensure that they meet Elastic Transcoder requirements and to
+ The CreatePreset operation creates a preset with settings that
+ you specify.
+ Elastic Transcoder checks the CreatePreset settings to ensure
+ that they meet Elastic Transcoder requirements and to
determine whether they comply with H.264 standards. If your
settings are not valid for Elastic Transcoder, Elastic
Transcoder returns an HTTP 400 response (
@@ -426,8 +422,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def delete_pipeline(self, id=None):
"""
- To delete a pipeline, send a DELETE request to the
- `/2012-09-25/pipelines/ [pipelineId] ` resource.
+ The DeletePipeline operation removes a pipeline.
You can only delete a pipeline that has never been used or
that is not currently in use (doesn't contain any active
@@ -443,10 +438,11 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def delete_preset(self, id=None):
"""
- To delete a preset, send a DELETE request to the
- `/2012-09-25/presets/ [presetId] ` resource.
+ The DeletePreset operation removes a preset that you've added
+ in an AWS region.
- If the preset has been used, you cannot delete it.
+ You can't delete the default presets that are included with
+ Elastic Transcoder.
:type id: string
:param id: The identifier of the preset for which you want to get
@@ -459,9 +455,8 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def list_jobs_by_pipeline(self, pipeline_id=None, ascending=None,
page_token=None):
"""
- To get a list of the jobs currently in a pipeline, send a GET
- request to the `/2012-09-25/jobsByPipeline/ [pipelineId] `
- resource.
+ The ListJobsByPipeline operation gets a list of the jobs
+ currently in a pipeline.
Elastic Transcoder returns all of the jobs currently in the
specified pipeline. The response body contains one element for
@@ -496,11 +491,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def list_jobs_by_status(self, status=None, ascending=None,
page_token=None):
"""
- To get a list of the jobs that have a specified status, send a
- GET request to the `/2012-09-25/jobsByStatus/ [status] `
- resource.
-
- Elastic Transcoder returns all of the jobs that have the
+ The ListJobsByStatus operation gets a list of jobs that have a
specified status. The response body contains one element for
each job that satisfies the search criteria.
@@ -534,9 +525,8 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def list_pipelines(self):
"""
- To get a list of the pipelines associated with the current AWS
- account, send a GET request to the `/2012-09-25/pipelines`
- resource.
+ The ListPipelines operation gets a list of the pipelines
+ associated with the current AWS account.
"""
@@ -545,9 +535,9 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def list_presets(self):
"""
- To get a list of all presets associated with the current AWS
- account, send a GET request to the `/2012-09-25/presets`
- resource.
+ The ListPresets operation gets a list of the default presets
+ included with Elastic Transcoder and the presets that you've
+ added in an AWS region.
"""
@@ -556,8 +546,8 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def read_job(self, id=None):
"""
- To get detailed information about a job, send a GET request to
- the `/2012-09-25/jobs/ [jobId] ` resource.
+ The ReadJob operation returns detailed information about a
+ job.
:type id: string
:param id: The identifier of the job for which you want to get detailed
@@ -569,9 +559,8 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def read_pipeline(self, id=None):
"""
- To get detailed information about a pipeline, send a GET
- request to the `/2012-09-25/pipelines/ [pipelineId] `
- resource.
+ The ReadPipeline operation gets detailed information about a
+ pipeline.
:type id: string
:param id: The identifier of the pipeline to read.
@@ -582,8 +571,8 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def read_preset(self, id=None):
"""
- To get detailed information about a preset, send a GET request
- to the `/2012-09-25/presets/ [presetId] ` resource.
+ The ReadPreset operation gets detailed information about a
+ preset.
:type id: string
:param id: The identifier of the preset for which you want to get
@@ -596,9 +585,8 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def test_role(self, role=None, input_bucket=None, output_bucket=None,
topics=None):
"""
- To test the IAM role that's used by Elastic Transcoder to
- create the pipeline, send a POST request to the
- `/2012-09-25/roleTests` resource.
+ The TestRole operation tests the IAM role used to create the
+ pipeline.
The `TestRole` action lets you determine whether the IAM role
you are using has sufficient permissions to let Elastic
@@ -644,28 +632,161 @@ class ElasticTranscoderConnection(AWSAuthConnection):
notifications=None, content_config=None,
thumbnail_config=None):
"""
-
+ Use the `UpdatePipeline` operation to update settings for a
+ pipeline. When you change pipeline settings, your changes take
+ effect immediately. Jobs that you have already submitted and
+ that Elastic Transcoder has not started to process are
+ affected in addition to jobs that you submit after you change
+ settings.
:type id: string
- :param id:
+ :param id: The ID of the pipeline that you want to update.
:type name: string
- :param name:
+ :param name: The name of the pipeline. We recommend that the name be
+ unique within the AWS account, but uniqueness is not enforced.
+ Constraints: Maximum 40 characters
:type input_bucket: string
- :param input_bucket:
+ :param input_bucket: The Amazon S3 bucket in which you saved the media
+ files that you want to transcode and the graphics that you want to
+ use as watermarks.
:type role: string
- :param role:
+ :param role: The IAM Amazon Resource Name (ARN) for the role that you
+ want Elastic Transcoder to use to transcode jobs for this pipeline.
:type notifications: dict
:param notifications:
+ The Amazon Simple Notification Service (Amazon SNS) topic or topics to
+ notify in order to report job status.
+ To receive notifications, you must also subscribe to the new topic in
+ the Amazon SNS console.
:type content_config: dict
:param content_config:
+ The optional `ContentConfig` object specifies information about the
+ Amazon S3 bucket in which you want Elastic Transcoder to save
+ transcoded files and playlists: which bucket to use, which users
+ you want to have access to the files, the type of access you want
+ users to have, and the storage class that you want to assign to the
+ files.
+
+ If you specify values for `ContentConfig`, you must also specify values
+ for `ThumbnailConfig`.
+
+ If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
+ the `OutputBucket` object.
+
+
+ + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
+ to save transcoded files and playlists.
+ + **Permissions** (Optional): The Permissions object specifies which
+ users you want to have access to transcoded files and the type of
+ access you want them to have. You can grant permissions to a
+ maximum of 30 users and/or predefined Amazon S3 groups.
+ + **Grantee Type**: Specify the type of value that appears in the
+ `Grantee` object:
+
+ + **Canonical**: The value in the `Grantee` object is either the
+ canonical user ID for an AWS account or an origin access identity
+ for an Amazon CloudFront distribution. For more information about
+ canonical user IDs, see Access Control List (ACL) Overview in the
+ Amazon Simple Storage Service Developer Guide. For more information
+ about using CloudFront origin access identities to require that
+ users use CloudFront URLs instead of Amazon S3 URLs, see Using an
+ Origin Access Identity to Restrict Access to Your Amazon S3
+ Content. A canonical user ID is not the same as an AWS account
+ number.
+ + **Email**: The value in the `Grantee` object is the registered email
+ address of an AWS account.
+ + **Group**: The value in the `Grantee` object is one of the following
+ predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
+ `LogDelivery`.
+
+ + **Grantee**: The AWS user or group that you want to have access to
+ transcoded files and playlists. To identify the user or group, you
+ can specify the canonical user ID for an AWS account, an origin
+ access identity for a CloudFront distribution, the registered email
+ address of an AWS account, or a predefined Amazon S3 group
+ + **Access**: The permission that you want to give to the AWS user that
+ you specified in `Grantee`. Permissions are granted on the files
+ that Elastic Transcoder adds to the bucket, including playlists and
+ video files. Valid values include:
+
+ + `READ`: The grantee can read the objects and metadata for objects
+ that Elastic Transcoder adds to the Amazon S3 bucket.
+ + `READ_ACP`: The grantee can read the object ACL for objects that
+ Elastic Transcoder adds to the Amazon S3 bucket.
+ + `WRITE_ACP`: The grantee can write the ACL for the objects that
+ Elastic Transcoder adds to the Amazon S3 bucket.
+ + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
+ permissions for the objects that Elastic Transcoder adds to the
+ Amazon S3 bucket.
+
+ + **StorageClass**: The Amazon S3 storage class, `Standard` or
+ `ReducedRedundancy`, that you want Elastic Transcoder to assign to
+ the video files and playlists that it stores in your Amazon S3
+ bucket.
:type thumbnail_config: dict
:param thumbnail_config:
+ The `ThumbnailConfig` object specifies several values, including the
+ Amazon S3 bucket in which you want Elastic Transcoder to save
+ thumbnail files, which users you want to have access to the files,
+ the type of access you want users to have, and the storage class
+ that you want to assign to the files.
+
+ If you specify values for `ContentConfig`, you must also specify values
+ for `ThumbnailConfig` even if you don't want to create thumbnails.
+
+ If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
+ the `OutputBucket` object.
+
+
+ + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
+ to save thumbnail files.
+ + **Permissions** (Optional): The `Permissions` object specifies which
+ users and/or predefined Amazon S3 groups you want to have access to
+ thumbnail files, and the type of access you want them to have. You
+ can grant permissions to a maximum of 30 users and/or predefined
+ Amazon S3 groups.
+ + **GranteeType**: Specify the type of value that appears in the
+ Grantee object:
+
+ + **Canonical**: The value in the `Grantee` object is either the
+ canonical user ID for an AWS account or an origin access identity
+ for an Amazon CloudFront distribution. A canonical user ID is not
+ the same as an AWS account number.
+ + **Email**: The value in the `Grantee` object is the registered email
+ address of an AWS account.
+ + **Group**: The value in the `Grantee` object is one of the following
+ predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
+ `LogDelivery`.
+
+ + **Grantee**: The AWS user or group that you want to have access to
+ thumbnail files. To identify the user or group, you can specify the
+ canonical user ID for an AWS account, an origin access identity for
+ a CloudFront distribution, the registered email address of an AWS
+ account, or a predefined Amazon S3 group.
+ + **Access**: The permission that you want to give to the AWS user that
+ you specified in `Grantee`. Permissions are granted on the
+ thumbnail files that Elastic Transcoder adds to the bucket. Valid
+ values include:
+
+ + `READ`: The grantee can read the thumbnails and metadata for objects
+ that Elastic Transcoder adds to the Amazon S3 bucket.
+ + `READ_ACP`: The grantee can read the object ACL for thumbnails that
+ Elastic Transcoder adds to the Amazon S3 bucket.
+ + `WRITE_ACP`: The grantee can write the ACL for the thumbnails that
+ Elastic Transcoder adds to the Amazon S3 bucket.
+ + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
+ permissions for the thumbnails that Elastic Transcoder adds to the
+ Amazon S3 bucket.
+
+ + **StorageClass**: The Amazon S3 storage class, `Standard` or
+ `ReducedRedundancy`, that you want Elastic Transcoder to assign to
+ the thumbnails that it stores in your Amazon S3 bucket.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
@@ -687,9 +808,9 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def update_pipeline_notifications(self, id=None, notifications=None):
"""
- To update Amazon Simple Notification Service (Amazon SNS)
- notifications for a pipeline, send a POST request to the
- `/2012-09-25/pipelines/ [pipelineId] /notifications` resource.
+ With the UpdatePipelineNotifications operation, you can update
+ Amazon Simple Notification Service (Amazon SNS) notifications
+ for a pipeline.
When you update notifications for a pipeline, Elastic
Transcoder returns the values that you specified in the
@@ -734,10 +855,9 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def update_pipeline_status(self, id=None, status=None):
"""
- To pause or reactivate a pipeline, so the pipeline stops or
- restarts processing jobs, update the status for the pipeline.
- Send a POST request to the `/2012-09-25/pipelines/
- [pipelineId] /status` resource.
+ The UpdatePipelineStatus operation pauses or reactivates a
+ pipeline, so that the pipeline stops or restarts the
+ processing of jobs.
Changing the pipeline status is useful if you want to cancel
one or more jobs. You can't cancel jobs after Elastic
diff --git a/boto/gs/key.py b/boto/gs/key.py
index 1ced4ce9..41ad0569 100644
--- a/boto/gs/key.py
+++ b/boto/gs/key.py
@@ -117,6 +117,8 @@ class Key(S3Key):
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
+ elif key == 'x-goog-generation':
+ self.generation = value
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
@@ -920,3 +922,7 @@ class Key(S3Key):
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
+
+ # Return the generation so that the result URI can be built with this
+ # for automatic parallel uploads.
+ return resp.getheader('x-goog-generation')
diff --git a/boto/gs/resumable_upload_handler.py b/boto/gs/resumable_upload_handler.py
index 57ae7548..e1b74347 100644
--- a/boto/gs/resumable_upload_handler.py
+++ b/boto/gs/resumable_upload_handler.py
@@ -664,6 +664,7 @@ class ResumableUploadHandler(object):
# Upload succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
self._check_final_md5(key, etag)
+ key.generation = self.generation
if debug >= 1:
print 'Resumable upload complete.'
return
diff --git a/boto/rds/__init__.py b/boto/rds/__init__.py
index d81a6bbb..5a4305f2 100644
--- a/boto/rds/__init__.py
+++ b/boto/rds/__init__.py
@@ -103,7 +103,7 @@ class RDSConnection(AWSQueryConnection):
validate_certs=validate_certs)
def _required_auth_capability(self):
- return ['rds']
+ return ['hmac-v4']
# DB Instance methods
diff --git a/boto/s3/key.py b/boto/s3/key.py
index 7fead3a5..9c7b4b27 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -40,6 +40,8 @@ from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5
+from boto.utils import find_matching_headers
+from boto.utils import merge_headers_by_name
try:
from hashlib import md5
except ImportError:
@@ -836,23 +838,31 @@ class Key(object):
headers = {}
else:
headers = headers.copy()
+ # Overwrite user-supplied user-agent.
+ for header in find_matching_headers('User-Agent', headers):
+ del headers[header]
headers['User-Agent'] = UserAgent
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
- if 'Content-Encoding' in headers:
- self.content_encoding = headers['Content-Encoding']
- if 'Content-Language' in headers:
- self.content_encoding = headers['Content-Language']
- if 'Content-Type' in headers:
+ if find_matching_headers('Content-Encoding', headers):
+ self.content_encoding = merge_headers_by_name(
+ 'Content-Encoding', headers)
+ if find_matching_headers('Content-Language', headers):
+ self.content_language = merge_headers_by_name(
+ 'Content-Language', headers)
+ content_type_headers = find_matching_headers('Content-Type', headers)
+ if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
- if headers['Content-Type'] is None:
+ if (len(content_type_headers) == 1 and
+ headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
- del headers['Content-Type']
+ del headers[content_type_headers[0]]
else:
- self.content_type = headers['Content-Type']
+ self.content_type = merge_headers_by_name(
+ 'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
diff --git a/boto/storage_uri.py b/boto/storage_uri.py
index 9a6b2bfa..f046fb15 100755
--- a/boto/storage_uri.py
+++ b/boto/storage_uri.py
@@ -765,8 +765,10 @@ class BucketStorageUri(StorageUri):
for suri in components:
component_keys.append(suri.new_key())
component_keys[-1].generation = suri.generation
- self.new_key().compose(
+ self.generation = self.new_key().compose(
component_keys, content_type=content_type, headers=headers)
+ self._build_uri_strings()
+ return self
def exists(self, headers=None):
"""Returns True if the object exists or False if it doesn't"""
diff --git a/boto/utils.py b/boto/utils.py
index 97fdd2df..6d89b21f 100644
--- a/boto/utils.py
+++ b/boto/utils.py
@@ -944,3 +944,24 @@ def compute_hash(fp, buf_size=8192, size=None, hash_algorithm=md5):
data_size = fp.tell() - spos
fp.seek(spos)
return (hex_digest, base64_digest, data_size)
+
+
+def find_matching_headers(name, headers):
+ """
+ Takes a specific header name and a dict of headers {"name": "value"}.
+ Returns a list of matching header names, case-insensitive.
+
+ """
+ return [h for h in headers if h.lower() == name.lower()]
+
+
+def merge_headers_by_name(name, headers):
+ """
+ Takes a specific header name and a dict of headers {"name": "value"}.
+ Returns a string of all header values, comma-separated, that match the
+ input header name, case-insensitive.
+
+ """
+ matching_headers = find_matching_headers(name, headers)
+ return ','.join(str(headers[h]) for h in matching_headers
+ if headers[h] is not None)
diff --git a/docs/source/dynamodb2_tut.rst b/docs/source/dynamodb2_tut.rst
index 15d9dbd7..b6e98118 100644
--- a/docs/source/dynamodb2_tut.rst
+++ b/docs/source/dynamodb2_tut.rst
@@ -73,7 +73,7 @@ Simple example::
A full example::
- >>> from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex
+ >>> from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex, AllIndex
>>> from boto.dynamodb2.layer1 import DynamoDBConnection
>>> from boto.dynamodb2.table import Table
>>> from boto.dynamodb2.types import NUMBER
@@ -112,7 +112,9 @@ Lazy example::
Efficient example::
+ >>> from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex
>>> from boto.dynamodb2.table import Table
+ >>> from boto.dynamodb2.types import NUMBER
>>> users = Table('users', schema=[
... HashKey('account_type', data_type=NUMBER),
... RangeKey('last_name'),
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 252f14a6..dedc0d69 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -33,12 +33,12 @@ Currently Supported Services
* **Database**
- * :doc:`SimpleDB <simpledb_tut>` -- (:doc:`API Reference <ref/sdb>`)
* :doc:`DynamoDB2 <dynamodb2_tut>` -- (:doc:`API Reference <ref/dynamodb2>`) -- (:doc:`Migration Guide from v1 <migrations/dynamodb_v1_to_v2>`)
* :doc:`DynamoDB <dynamodb_tut>` -- (:doc:`API Reference <ref/dynamodb>`)
* :doc:`Relational Data Services (RDS) <rds_tut>` -- (:doc:`API Reference <ref/rds>`)
* ElastiCache -- (:doc:`API Reference <ref/elasticache>`)
* Redshift -- (:doc:`API Reference <ref/redshift>`)
+ * :doc:`SimpleDB <simpledb_tut>` -- (:doc:`API Reference <ref/sdb>`)
* **Deployment and Management**
@@ -111,11 +111,29 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.9.7
+ releasenotes/v2.9.6
releasenotes/v2.9.5
releasenotes/v2.9.4
releasenotes/v2.9.3
releasenotes/v2.9.2
releasenotes/v2.9.1
+ releasenotes/v2.9.0
+ releasenotes/v2.8.0
+ releasenotes/v2.7.0
+ releasenotes/v2.6.0
+ releasenotes/v2.5.2
+ releasenotes/v2.5.1
+ releasenotes/v2.5.0
+ releasenotes/v2.4.0
+ releasenotes/v2.3.0
+ releasenotes/v2.2.2
+ releasenotes/v2.2.1
+ releasenotes/v2.2.0
+ releasenotes/v2.1.1
+ releasenotes/v2.1.0
+ releasenotes/v2.0.0
+ releasenotes/v2.0b1
.. toctree::
diff --git a/docs/source/releasenotes/v2.0.0.rst b/docs/source/releasenotes/v2.0.0.rst
new file mode 100644
index 00000000..47e563a0
--- /dev/null
+++ b/docs/source/releasenotes/v2.0.0.rst
@@ -0,0 +1,135 @@
+==========================
+Release Notes for boto 2.0
+==========================
+
+Highlights
+==========
+
+There have been many, many changes since the 2.0b4 release. This overview highlights some of those changes.
+
+* Fix connection pooling bug: don't close before reading.
+* Added AddInstanceGroup and ModifyInstanceGroup to boto.emr
+* Merge pull request #246 from chetan/multipart_s3put
+AddInstanceGroupsResponse class to boto.emr.emrobject.
+* Removed extra print statement
+* Merge pull request #244 from ryansb/master
+* Added add_instance_groups function to boto.emr.connection. Built some helper methods for it, and added AddInstanceGroupsResponse class to boto.emr.emrobject.
+* Added a new class, InstanceGroup, with just a __init__ and __repr__.
+* Adding support for GetLoginProfile request to IAM. Removing commented lines in connection.py. Fixes GoogleCode issue 532.
+* Fixed issue #195
+* Added correct sax reader for boto.emr.emrobject.BootstrapAction
+* Fixed a typo bug in ConsoleOutput sax parsing and some PEP8 cleanup in connection.py.
+* Added initial support for generating a registration url for the aws marketplace
+* Fix add_record and del_record to support multiple values, like change_record does
+* Add support to accept SecurityGroupId as a parameter for ec2 run instances. This is required to create EC2 instances under VPC security groups
+* Added support for aliases to the add_change method of ResourceRecordSets.
+* Resign each request in a retry situation. Some services are starting to incorporate replay detection algorithms and the boto approach of simply re-trying the original request triggers them. Also a small bug fix to roboto and added a delay in the ec2 test to wait for consistency.
+* Fixed a problem with InstanceMonitoring parameter of LaunchConfigurations for autoscale module.
+* Route 53 Alias Resource Record Sets
+* Fixed App Engine support
+* Fixed incorrect host on App Engine
+* Fixed issue 199 on github.
+* First pass at put_metric_data
+* Changed boto.s3.Bucket.set_acl_xml() to ISO-8859-1 encode the Unicode ACL text before sending over HTTP connection.
+* Added GetQualificationScore for mturk.
+* Added UpdateQualificationScore for mturk
+* import_key_pair base64 fix
+* Fixes for ses send_email method better handling of exceptions
+* Add optional support for SSL server certificate validation.
+* Specify a reasonable socket timeout for httplib
+* Support for ap-northeast-1 region
+* Close issue #153
+* Close issue #154
+* we must POST autoscale user-data, not GET. otherwise a HTTP 505 error is returned from AWS. see: http://groups.google.com/group/boto-dev/browse_thread/thread/d5eb79c97ea8eecf?pli=1
+* autoscale userdata needs to be base64 encoded.
+* Use the unversioned streaming jar symlink provided by EMR
+* Updated lss3 to allow for prefix based listing (more like actual ls)
+* Deal with the groupSet element that appears in the instanceSet element in the DescribeInstances response.
+* Add a change_record command to bin/route53
+* Incorporating a patch from AWS to allow security groups to be tagged.
+* Fixed an issue with extra headers in generated URLs. Fixes http://code.google.com/p/boto/issues/detail?id=499
+* Incorporating a patch to handle obscure bug in apache/fastcgi. See http://goo.gl/0Tdax.
+* Reorganizing the existing test code. Part of a long-term project to completely revamp and improve boto tests.
+* Fixed an invalid parameter bug (ECS) #102
+* Adding initial cut at s3 website support.
+
+Stats
+=====
+
+* 465 commits since boto 2.0b4
+* 70 authors
+* 111 Pull requests from 64 different authors
+
+Contributors (in order of last commits)
+=======================================
+
+* Mitch Garnaat
+* Chris Moyer
+* Garrett Holmstrom
+* Justin Riley
+* Steve Johnson
+* Sean Talts
+* Brian Beach
+* Ryan Brown
+* Chetan Sarva
+* spenczar
+* Jonathan Drosdeck
+* garnaat
+* Nathaniel Moseley
+* Bradley Ayers
+* jibs
+* Kenneth Falck
+* chirag
+* Sean O'Connor
+* Scott Moser
+* Vineeth Pillai
+* Greg Taylor
+* root
+* darktable
+* flipkin
+* brimcfadden
+* Samuel Lucidi
+* Terence Honles
+* Mike Schwartz
+* Waldemar Kornewald
+* Lucas Hrabovsky
+* thaDude
+* Vinicius Ruan Cainelli
+* David Marin
+* Stanislav Ievlev
+* Victor Trac
+* Dan Fairs
+* David Pisoni
+* Matt Robenolt
+* Matt Billenstein
+* rgrp
+* vikalp
+* Christoph Kern
+* Gabriel Monroy
+* Ben Burry
+* Hinnerk
+* Jann Kleen
+* Louis R. Marascio
+* Matt Singleton
+* David Park
+* Nick Tarleton
+* Cory Mintz
+* Robert Mela
+* rlotun
+* John Walsh
+* Keith Fitzgerald
+* Pierre Riteau
+* ryancustommade
+* Fabian Topfstedt
+* Michael Thompson
+* sanbornm
+* Seth Golub
+* Jon Colverson
+* Steve Howard
+* Roberto Gaiser
+* James Downs
+* Gleicon Moraes
+* Blake Maltby
+* Mac Morgan
+* Rytis Sileika
+* winhamwr
diff --git a/docs/source/releasenotes/v2.0b1.rst b/docs/source/releasenotes/v2.0b1.rst
new file mode 100644
index 00000000..aefd9023
--- /dev/null
+++ b/docs/source/releasenotes/v2.0b1.rst
@@ -0,0 +1,14 @@
+===============================
+Major changes for release 2.0b1
+===============================
+
+* Support for versioning in S3
+* Support for MFA Delete in S3
+* Support for Elastic Map Reduce
+* Support for Simple Notification Service
+* Support for Google Storage
+* Support for Consistent Reads and Conditional Puts in SimpleDB
+* Significant updates and improvements to Mechanical Turk (mturk) module
+* Support for Windows Bundle Tasks in EC2
+* Support for Reduced Redundancy Storage (RRS) in S3
+* Support for Cluster Computing instances and Placement Groups in EC2 \ No newline at end of file
diff --git a/docs/source/releasenotes/v2.1.0.rst b/docs/source/releasenotes/v2.1.0.rst
new file mode 100644
index 00000000..8c294aca
--- /dev/null
+++ b/docs/source/releasenotes/v2.1.0.rst
@@ -0,0 +1,115 @@
+===========
+boto v2.1.0
+===========
+
+The 2.1.0 release of boto is now available on `PyPI`_ and `Google Code`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+.. _`Google Code`: http://code.google.com/p/boto/downloads/
+
+You can view a list of issues that have been closed in this release at
+https://github.com/boto/boto/issues?milestone=4&state=closed)
+
+You can get a comprehensive list of all commits made between the 2.0 release
+and the 2.1.0 release at https://github.com/boto/boto/compare/033457f30d...a0a1fd54ef.
+
+Some highlights of this release:
+
+* Server-side encryption now supported in S3.
+* Better support for VPC in EC2.
+* Support for combiner in StreamingStep for EMR.
+* Support for CloudFormations.
+* Support for streaming uploads to Google Storage.
+* Support for generating signed URL's in CloudFront.
+* MTurk connection now uses HTTPS by default, like all other Connection objects.
+* You can now PUT multiple data points to CloudWatch in one call.
+* CloudWatch Dimension object now correctly supports multiple values for same
+ dimension name.
+* Lots of documentation fixes/additions
+
+There were 235 commits in this release from 35 different authors. The authors
+are listed below, in no particular order:
+
+* Erick Fejta
+* Joel Barciauskas
+* Matthew Tai
+* Hyunjung Park
+* Mitch Garnaat
+* Victor Trac
+* Andy Grimm
+* ZerothAngel
+* Dan Lecocq
+* jmallen
+* Greg Taylor
+* Brian Grossman
+* Marc Brinkmann
+* Hunter Blanks
+* Steve Johnson
+* Keith Fitzgerald
+* Kamil Klimkiewicz
+* Eddie Hebert
+* garnaat
+* Samuel Lucidi
+* Kazuhiro Ogura
+* David Arthur
+* Michael Budde
+* Vineeth Pillai
+* Trevor Pounds
+* Mike Schwartz
+* Ryan Brown
+* Mark
+* Chetan Sarva
+* Dan Callahan
+* INADA Naoki
+* Mitchell Hashimoto
+* Chris Moyer
+* Riobard
+* Ted Romer
+* Justin Riley
+* Brian Beach
+* Simon Ratner
+
+We processed 60 pull requests for this release from 40 different contributors. Here are the github user id's for all of the pull request authors:
+
+* jtriley
+* mbr
+* jbarciauskas
+* hyunjung
+* bugi
+* ryansb
+* gtaylor
+* ehazlett
+* secretmike
+* riobard
+* simonratner
+* irskep
+* sanbornm
+* methane
+* jumping
+* mansam
+* miGlanz
+* dlecocq
+* fdr
+* mitchellh
+* ehebert
+* memory
+* hblanks
+* mbudde
+* ZerothAngel
+* goura
+* natedub
+* tpounds
+* bwbeach
+* mumrah
+* chetan
+* jmallen
+* a13m
+* mtai
+* fejta
+* jibs
+* callahad
+* vineethrp
+* JDrosdeck
+* gholms
+
+If you are trying to reconcile that data (i.e. 35 different authors and 40 users with pull requests), well so am I. I'm just reporting on the data that I get from the Github api 8^)
diff --git a/docs/source/releasenotes/v2.1.1.rst b/docs/source/releasenotes/v2.1.1.rst
new file mode 100644
index 00000000..981d7261
--- /dev/null
+++ b/docs/source/releasenotes/v2.1.1.rst
@@ -0,0 +1,7 @@
+===========
+boto v2.1.1
+===========
+
+The 2.1.1 release fixes one serious issue with the RDS module.
+
+https://github.com/boto/boto/issues/382 \ No newline at end of file
diff --git a/docs/source/releasenotes/v2.2.0.rst b/docs/source/releasenotes/v2.2.0.rst
new file mode 100644
index 00000000..0fefd171
--- /dev/null
+++ b/docs/source/releasenotes/v2.2.0.rst
@@ -0,0 +1,89 @@
+===========
+boto v2.2.0
+===========
+
+The 2.2.0 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can view a list of issues that have been closed in this release at
+https://github.com/boto/boto/issues?milestone=5&state=closed.
+
+You can get a comprehensive list of all commits made between the 2.0 release
+and the 2.1.0 release at
+https://github.com/boto/boto/compare/fa0d6a1e49c8468abbe2c99cdc9f5fd8fd19f8f8...26c8eb108873bf8ce1b9d96d642eea2beef78c77.
+
+Some highlights of this release:
+
+* Support for Amazon DynamoDB service.
+* Support for S3 Object Lifecycle (Expiration).
+* Allow anonymous request for S3.
+* Support for creating Load Balancers in VPC.
+* Support for multi-dimension metrics in CloudWatch.
+* Support for Elastic Network Interfaces in EC2.
+* Support for Amazon S3 Multi-Delete capability.
+* Support for new AMIversion and overriding of parameters in EMR.
+* Support for SendMessageBatch request in SQS.
+* Support for DescribeInstanceStatus request in EC2.
+* Many, many improvements and additions to API documentation and Tutorials.
+ Special thanks to Greg Taylor for all of the Sphinx cleanups and new docs.
+
+There were 336 commits in this release from 40 different authors. The authors
+are listed below, in no particular order:
+
+* Garrett Holmstrom
+* mLewisLogic
+* Warren Turkal
+* Nathan Binkert
+* Scott Moser
+* Jeremy Edberg
+* najeira
+* Marc Cohen
+* Jim Browne
+* Mitch Garnaat
+* David Ormsbee
+* Blake Maltby
+* Thomas O'Dowd
+* Victor Trac
+* David Marin
+* Greg Taylor
+* rdodev
+* Jonathan Sabo
+* rdoci
+* Mike Schwartz
+* l33twolf
+* Keith Fitzgerald
+* Oleksandr Gituliar
+* Jason Allum
+* Ilya Volodarsky
+* Rajesh
+* Felipe Reyes
+* Andy Grimm
+* Seth Davis
+* Dave King
+* andy
+* Chris Moyer
+* ruben
+* Spike Gronim
+* Daniel Norberg
+* Justin Riley
+* Milan Cermak timtebeek
+* unknown
+* Yotam Gingold
+* Brian Oldfield
+
+We processed 21 pull requests for this release from 40 different contributors.
+Here are the github user id's for all of the pull request authors:
+
+* milancermak
+* jsabo
+* gituliar
+* rdodev
+* marccohen
+* tpodowd
+* trun
+* jallum
+* binkert
+* ormsbee
+* timtebeek
+
diff --git a/docs/source/releasenotes/v2.2.1.rst b/docs/source/releasenotes/v2.2.1.rst
new file mode 100644
index 00000000..5d122040
--- /dev/null
+++ b/docs/source/releasenotes/v2.2.1.rst
@@ -0,0 +1,6 @@
+===========
+boto v2.2.1
+===========
+
+The 2.2.1 release fixes a packaging problem that was causing problems when
+installing via pip. \ No newline at end of file
diff --git a/docs/source/releasenotes/v2.2.2.rst b/docs/source/releasenotes/v2.2.2.rst
new file mode 100644
index 00000000..993dc483
--- /dev/null
+++ b/docs/source/releasenotes/v2.2.2.rst
@@ -0,0 +1,31 @@
+===========
+boto v2.2.2
+===========
+
+The 2.2.2 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can view a list of issues that have been closed in this release at
+https://github.com/boto/boto/issues?milestone=8&state=closed.
+
+You can get a comprehensive list of all commits made between the 2.2.1 release
+and the 2.2.2 release at https://github.com/boto/boto/compare/2.2.1...2.2.2.
+
+This is a bugfix release.
+
+There were 71 commits in this release from 11 different authors. The authors
+are listed below, in no particular order:
+
+* aficionado
+* jimbrowne
+* rdodev
+* milancermak
+* garnaat
+* kopertop
+* samuraisam
+* tpodowd
+* psa
+* mfschwartz
+* gtaylor
+
diff --git a/docs/source/releasenotes/v2.3.0.rst b/docs/source/releasenotes/v2.3.0.rst
new file mode 100644
index 00000000..1ec69cbd
--- /dev/null
+++ b/docs/source/releasenotes/v2.3.0.rst
@@ -0,0 +1,47 @@
+===========
+boto v2.3.0
+===========
+
+The 2.3.0 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can view a list of issues that have been closed in this release at
+https://github.com/boto/boto/issues?milestone=6&state=closed.
+
+You can get a comprehensive list of all commits made between the 2.2.2 release
+and the 2.3.0 release at https://github.com/boto/boto/compare/2.2.2...2.3.0.
+
+This release includes initial support for Amazon Simple Workflow Service.
+
+The API version of the FPS module was updated to 2010-08-28.
+
+This release also includes many bug fixes and improvements in the Amazon
+DynamoDB module. One change of particular note is the behavior of the
+``new_item`` method of the ``Table`` object. See http://readthedocs.org/docs/boto/en/2.3.0/ref/dynamodb.html#module-boto.dynamodb.table
+for more details.
+
+There were 109 commits in this release from 21 different authors.
+The authors are listed below, in no particular order:
+
+* theju
+* garnaat
+* rdodev
+* mfschwartz
+* kopertop
+* tpodowd
+* gtaylor
+* kachok
+* croach
+* tmorgan
+* Erick Fejta
+* dherbst
+* marccohen
+* Arif Amirani
+* yuzeh
+* Roguelazer
+* awblocker
+* blinsay
+* Peter Broadwell
+* tierney
+* georgekola
diff --git a/docs/source/releasenotes/v2.4.0.rst b/docs/source/releasenotes/v2.4.0.rst
new file mode 100644
index 00000000..5fb7d3db
--- /dev/null
+++ b/docs/source/releasenotes/v2.4.0.rst
@@ -0,0 +1,60 @@
+===========
+boto v2.4.0
+===========
+
+The 2.4.0 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can get a comprehensive list of all commits made between the 2.3.0 release
+and the 2.4.0 release at https://github.com/boto/boto/compare/2.3.0...2.4.0.
+
+This release includes:
+
+* Initial support for Amazon Cloudsearch Service.
+* Support for Amazon's Marketplace Web Service.
+* Latency-based routing for Route53
+* Support for new domain verification features of SES.
+* A full rewrite of the FPS module.
+* Support for BatchWriteItem in DynamoDB.
+* Additional EMR steps for installing and running Pig scripts.
+* Support for additional batch operations in SQS.
+* Better support for VPC group-ids.
+* Many, many bugfixes from the community. Thanks for the reports and pull
+ requests!
+
+There were 175 commits in this release from 32 different authors. The authors
+are listed below, in no particular order:
+
+* estebistec
+* tpodowd
+* Max Noel
+* garnaat
+* mfschwartz
+* jtriley
+* akoumjian
+* jreese
+* mulka
+* Nuutti Kotivuori
+* mboersma
+* ryansb
+* dampier
+* crschmidt
+* nithint
+* sievlev
+* eckamm
+* imlucas
+* disruptek
+* trevorsummerssmith
+* tmorgan
+* evanworley
+* iandanforth
+* oozie
+* aedeph
+* alexanderdean
+* abrinsmead
+* dlecocq
+* bsimpson63
+* jamesls
+* cosmin
+* gtaylor
diff --git a/docs/source/releasenotes/v2.5.0.rst b/docs/source/releasenotes/v2.5.0.rst
new file mode 100644
index 00000000..5fb95c28
--- /dev/null
+++ b/docs/source/releasenotes/v2.5.0.rst
@@ -0,0 +1,39 @@
+===========
+boto v2.5.0
+===========
+
+The 2.5.0 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can get a comprehensive list of all commits made between the 2.4.1 release
+and the 2.5.0 release at https://github.com/boto/boto/compare/2.4.1...2.5.0.
+
+This release includes:
+
+* Support for IAM Roles for EC2 Instances
+* Added support for Capabilities in CloudFormation
+* Spot instances in autoscaling groups
+* Internal ELB's
+* Added tenancy option to run_instances
+
+There were 77 commits in this release from 18 different authors. The authors
+are listed below, in no particular order:
+
+* jimbrowne
+* cosmin
+* gtaylor
+* garnaat
+* brianjaystanley
+* jamesls
+* trevorsummerssmith
+* Bryan Donlan
+* davidmarble
+* jtriley
+* rdodev
+* toby
+* tpodowd
+* srs81
+* mfschwartz
+* rdegges
+* gholms
diff --git a/docs/source/releasenotes/v2.5.1.rst b/docs/source/releasenotes/v2.5.1.rst
new file mode 100644
index 00000000..db747bd3
--- /dev/null
+++ b/docs/source/releasenotes/v2.5.1.rst
@@ -0,0 +1,6 @@
+===========
+boto v2.5.1
+===========
+
+Release 2.5.1 is a bugfix release. It fixes the following critical issues:
+* :issue:`819`
diff --git a/docs/source/releasenotes/v2.5.2.rst b/docs/source/releasenotes/v2.5.2.rst
new file mode 100644
index 00000000..66d6d718
--- /dev/null
+++ b/docs/source/releasenotes/v2.5.2.rst
@@ -0,0 +1,9 @@
+===========
+boto v2.5.2
+===========
+
+Release 2.5.2 is a bugfix release. It fixes the following critical issues:
+* :issue:`830`
+
+This issue only affects you if you are using DynamoDB on an EC2 instance with
+IAM Roles. \ No newline at end of file
diff --git a/docs/source/releasenotes/v2.6.0.rst b/docs/source/releasenotes/v2.6.0.rst
new file mode 100644
index 00000000..124da3df
--- /dev/null
+++ b/docs/source/releasenotes/v2.6.0.rst
@@ -0,0 +1,101 @@
+===========
+boto v2.6.0
+===========
+
+The 2.6.0 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can get a comprehensive list of all commits made between the 2.5.2 release
+and the 2.6.0 release at https://github.com/boto/boto/compare/2.5.2...2.6.0.
+
+This release includes:
+
+* Support for Amazon Glacier
+* Support for AWS Elastic Beanstalk
+* CORS support for Amazon S3
+* Support for Reserved Instances Resale in Amazon EC2
+* Support for IAM Roles
+
+SSL Certificate Verification
+============================
+
+In addition, this release of boto changes the default behavior with respect to
+SSL certificate verification. Our friends at Google contributed code to boto
+well over a year ago that implemented SSL certificate verification. At the
+time, we felt the most prudent course of action was to make this feature an
+opt-in but we always felt that at some time in the future we would enable cert
+verification as the default behavior. Well, that time is now!
+
+However, in implementing this change, we came across a bug in Python for all
+versions prior to 2.7.3 (see http://bugs.python.org/issue13034 for details).
+The net result of this bug is that Python is able to check only the commonName
+in the SSL cert for verification purposes. Any subjectAltNames are ignored in
+large SSL keys. So, in addition to enabling verification as the default
+behavior we also changed some of the service endpoints in boto to match the
+commonName in the SSL certificate.
+
+If you want to disable verification for any reason (not advised, btw) you can
+still do so by editing your boto config file (see
+https://gist.github.com/3762068) or you can override it by passing
+`validate_certs=False` to the Connection class constructor or the `connect_*`
+function.
+
+Commits
+=======
+
+There were 440 commits in this release from 53 different authors. The authors are listed below, in alphabetical order:
+
+* acorley
+* acrefoot
+* aedeph
+* allardhoeve
+* almost
+* awatts
+* buzztroll
+* cadams
+* cbednarski
+* cosmin
+* dangra
+* darjus-amzn
+* disruptek
+* djw
+* garnaat
+* gertjanol
+* gimbel0893
+* gochist
+* graphaelli
+* gtaylor
+* gz
+* hardys
+* jamesls
+* jijojv
+* jimbrowne
+* jtlebigot
+* jtriley
+* kopertop
+* kotnik
+* marknca
+* mark_nunnikhoven
+* mfschwartz
+* moliware
+* NeilW
+* nkvoll
+* nsitarz
+* ohe
+* pasieronen
+* patricklucas
+* pfig
+* rajivnavada
+* reversefold
+* robie
+* scott
+* shawnps
+* smoser
+* sopel
+* staer
+* tedder
+* yamatt
+* Yossi
+* yovadia12
+* zachhuff386 \ No newline at end of file
diff --git a/docs/source/releasenotes/v2.7.0.rst b/docs/source/releasenotes/v2.7.0.rst
new file mode 100644
index 00000000..633cacda
--- /dev/null
+++ b/docs/source/releasenotes/v2.7.0.rst
@@ -0,0 +1,91 @@
+===========
+boto v2.7.0
+===========
+
+The 2.7.0 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can get a comprehensive list of all commits made between the 2.6.0 release
+and the 2.7.0 release at https://github.com/boto/boto/compare/2.6.0...2.7.0.
+
+This release includes:
+
+* Added support for AWS Data Pipeline - :sha:`999902`
+* Integrated Slick53 into Route53 module - :issue:`1186`
+* Add ability to use Decimal for DynamoDB numeric types - :issue:`1183`
+* Query/Scan Count/ScannedCount support and TableGenerator improvements -
+ :issue:`1181`
+* Added support for keyring in config files - :issue:`1157`
+* Add concurrent downloader to glacier - :issue:`1106`
+* Add support for tagged RDS DBInstances - :issue:`1050`
+* Updating RDS API Version to 2012-09-17 - :issue:`1033`
+* Added support for provisioned IOPS for RDS - :issue:`1028`
+* Add ability to set SQS Notifications in Mechanical Turk - :issue:`1018`
+
+Commits
+=======
+
+There were 447 commits in this release from 60 different authors. The authors
+are listed below, in alphabetical order:
+
+* acrefoot
+* Alex Schoof
+* Andy Davidoff
+* anoopj
+* Benoit Dubertret
+* bobveznat
+* dahlia
+* dangra
+* disruptek
+* dmcritchie
+* emtrane
+* focus
+* fsouza
+* g2harris
+* garnaat
+* georgegoh
+* georgesequeira
+* GitsMcGee
+* glance-
+* gtaylor
+* hashbackup
+* hinnerk
+* hoov
+* isaacbowen
+* jamesls
+* JerryKwan
+* jimfulton
+* jimbrowne
+* jorourke
+* jterrace
+* jtriley
+* katzj
+* kennu
+* kevinburke
+* khagler
+* Kodiologist
+* kopertop
+* kotnik
+* Leftium
+* lpetc
+* marknca
+* matthewandrews
+* mfschwartz
+* mikek
+* mkmt
+* mleonhard
+* mraposa
+* oozie
+* phunter
+* potix2
+* Rafael Cunha de Almeida
+* reinhillmann
+* reversefold
+* Robie Basak
+* seandst
+* siroken3
+* staer
+* tpodowd
+* vladimir-sol
+* yovadia12
diff --git a/docs/source/releasenotes/v2.8.0.rst b/docs/source/releasenotes/v2.8.0.rst
new file mode 100644
index 00000000..6c91dedb
--- /dev/null
+++ b/docs/source/releasenotes/v2.8.0.rst
@@ -0,0 +1,45 @@
+===========
+boto v2.8.0
+===========
+
+The 2.8.0 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can get a comprehensive list of all commits made between the 2.7.0 release
+and the 2.8.0 release at https://github.com/boto/boto/compare/2.7.0...2.8.0.
+
+This release includes:
+
+* Added support for Amazon Elasticache
+* Added support for Amazon Elastic Transcoding Service
+
+As well as numerous bug fixes and improvements.
+
+Commits
+=======
+
+There were 115 commits in this release from 21 different authors. The authors
+are listed below, in alphabetical order:
+
+* conorbranagan
+* dkavanagh
+* gaige
+* garnaat
+* halfaleague
+* jamesls
+* jjhooper
+* jordansissel
+* jterrace
+* Kodiologist
+* kopertop
+* mfschwartz
+* nathan11g
+* pasc
+* phobologic
+* schworer
+* seandst
+* SirAlvarex
+* Yaniv Ovadia
+* yig
+* yovadia12
diff --git a/docs/source/releasenotes/v2.9.0.rst b/docs/source/releasenotes/v2.9.0.rst
new file mode 100644
index 00000000..8550fc5d
--- /dev/null
+++ b/docs/source/releasenotes/v2.9.0.rst
@@ -0,0 +1,56 @@
+===========
+boto v2.9.0
+===========
+
+The 2.9.0 release of boto is now available on `PyPI`_.
+
+.. _`PyPI`: http://pypi.python.org/pypi/boto
+
+You can get a comprehensive list of all commits made between the 2.8.0 release
+and the 2.9.0 release at https://github.com/boto/boto/compare/2.8.0...2.9.0.
+
+This release includes:
+
+* Support for Amazon Redshift
+* Support for Amazon DynamoDB's new API
+* Support for AWS Opsworks
+* Add `copy_image` to EC2 (AMI copy)
+* Add `describe_account_attributes` and `describe_vpc_attribute`, and
+ `modify_vpc_attribute` operations to EC2.
+
+There were 240 commits made by 34 different authors:
+
+* g2harris
+* Michael Barrett
+* Pascal Hakim
+* James Saryerwinnie
+* Mitch Garnaat
+* ChangMin Jeon
+* Mike Schwartz
+* Jeremy Katz
+* Alex Schoof
+* reinhillmann
+* Travis Hobrla
+* Zach Wilt
+* Daniel Lindsley
+* ksacry
+* Michael Wirth
+* Eric Smalling
+* pingwin
+* Chris Moyer
+* Olivier Hervieu
+* Iuri de Silvio
+* Joe Sondow
+* Max Noel
+* Nate
+* Chris Moyer
+* Lars Otten
+* Nathan Grigg
+* Rein Hillmann
+* Øyvind Saltvik
+* Rayson HO
+* Martin Matusiak
+* Royce Remer
+* Jeff Terrace
+* Yaniv Ovadia
+* Eduardo S. Klein
diff --git a/docs/source/releasenotes/v2.9.7.rst b/docs/source/releasenotes/v2.9.7.rst
new file mode 100644
index 00000000..13e684a3
--- /dev/null
+++ b/docs/source/releasenotes/v2.9.7.rst
@@ -0,0 +1,40 @@
+boto v2.9.7
+===========
+
+:date: 2013/07/08
+
+This release is primarily a bugfix release, but also inludes support for
+Elastic Transcoder updates (variable bit rate, max frame rate & watermark
+features).
+
+
+Features
+--------
+
+* Added support for selecting specific attributes in DynamoDB v2.
+ (:issue:`1567`, :sha:`d9e5c2`)
+* Added support for variable bit rate, max frame rate & watermark features in
+ Elastic Transcoder. (:sha:`3791c9`)
+
+
+Bugfixes
+--------
+
+* Altered RDS to now use SigV4. (:sha:`be1633`)
+* Removed parsing check in ``StorageUri``. (:sha:`21bc8f`)
+* More information returned about GS key generation. (:issue:`1571`,
+ :sha:`6d5e3a`)
+* Upload handling headers now case-insensitive. (:issue:`1575`, :sha:`60383d`)
+* Several CloudFormation timestamp updates. (:issue:`1582`, :issue:`1583`,
+ :issue:`1588`, :sha:`0a23d34`, :sha:`6d4209`)
+* Corrected a bug in how limits are handled in DynamoDB v2. (:issue:`1590`,
+ :sha:`710a62`)
+* Several documentation improvements/fixes:
+
+ * Typo in ``boto.connection`` fixed. (:issue:`1569`, :sha:`cf39fd`)
+ * All previous release notes added to the docs. (:sha:`165596`)
+ * Corrected error in ``get_all_tags`` docs. (:sha:`4bca5d`)
+ * Corrected a typo in the S3 tutorial. (:sha:`f0cef8`)
+ * Corrected several import errors in the DDBv2 tutorial. (:sha:`5401a3`)
+ * Fixed an error in the ``get_key_pair`` docstring. (:issue:`1590`,
+ :sha:`a9cb8d`)
diff --git a/docs/source/s3_tut.rst b/docs/source/s3_tut.rst
index fc75e108..2b40306a 100644
--- a/docs/source/s3_tut.rst
+++ b/docs/source/s3_tut.rst
@@ -117,7 +117,7 @@ this worked, quit out of the interpreter and start it up again. Then::
>>> import boto
>>> c = boto.connect_s3()
- >>> b = c.create_bucket('mybucket') # substitute your bucket name here
+ >>> b = c.get_bucket('mybucket') # substitute your bucket name here
>>> from boto.s3.key import Key
>>> k = Key(b)
>>> k.key = 'foobar'
diff --git a/docs/source/simpledb_tut.rst b/docs/source/simpledb_tut.rst
index 98cabfe0..6ecc087f 100644
--- a/docs/source/simpledb_tut.rst
+++ b/docs/source/simpledb_tut.rst
@@ -3,11 +3,18 @@
============================================
An Introduction to boto's SimpleDB interface
============================================
+
This tutorial focuses on the boto interface to AWS' SimpleDB_. This tutorial
assumes that you have boto already downloaded and installed.
.. _SimpleDB: http://aws.amazon.com/simpledb/
+.. note::
+
+ If you're starting a new application, you might want to consider using
+ :doc:`DynamoDB2 <dynamodb2_tut>` instead, as it has a more comprehensive
+ feature set & has guaranteed performance throughput levels.
+
Creating a Connection
---------------------
The first step in accessing SimpleDB is to create a connection to the service.
@@ -40,7 +47,7 @@ Creating new domains is a fairly straight forward operation. To do so, you can p
Domain:test-domain
>>>
-Please note that SimpleDB, unlike its newest sibling DynamoDB, is truly and completely schema-less.
+Please note that SimpleDB, unlike its newest sibling DynamoDB, is truly and completely schema-less.
Thus, there's no need specify domain keys or ranges.
Listing All Domains
@@ -67,8 +74,8 @@ If you wish to retrieve a specific domain whose name is known, you can do so as
>>>
The get_domain call has an optional validate parameter, which defaults to True. This will make sure to raise
-an exception if the domain you are looking for doesn't exist. If you set it to false, it will return a
-:py:class:`Domain <boto.sdb.domain.Domain>` object blindly regardless of its existence.
+an exception if the domain you are looking for doesn't exist. If you set it to false, it will return a
+:py:class:`Domain <boto.sdb.domain.Domain>` object blindly regardless of its existence.
Getting Domain Metadata
------------------------
@@ -79,8 +86,8 @@ To this end, boto offers a simple and convenient way to do so as shown below::
>>> domain_meta
<boto.sdb.domain.DomainMetaData instance at 0x23cd440>
>>> dir(domain_meta)
- ['BoxUsage', 'DomainMetadataResponse', 'DomainMetadataResult', 'RequestId', 'ResponseMetadata',
- '__doc__', '__init__', '__module__', 'attr_name_count', 'attr_names_size', 'attr_value_count', 'attr_values_size',
+ ['BoxUsage', 'DomainMetadataResponse', 'DomainMetadataResult', 'RequestId', 'ResponseMetadata',
+ '__doc__', '__init__', '__module__', 'attr_name_count', 'attr_names_size', 'attr_value_count', 'attr_values_size',
'domain', 'endElement', 'item_count', 'item_names_size', 'startElement', 'timestamp']
>>> domain_meta.item_count
0
@@ -92,8 +99,8 @@ can retrieve the domain metadata via its name (string).
Adding Items (and attributes)
-----------------------------
Once you have your domain setup, presumably, you'll want to start adding items to it.
-In its most straight forward form, you need to provide a name for the item -- think of it
-as a record id -- and a collection of the attributes you want to store in the item (often a Dictionary-like object).
+In its most straight forward form, you need to provide a name for the item -- think of it
+as a record id -- and a collection of the attributes you want to store in the item (often a Dictionary-like object).
So, adding an item to a domain looks as follows::
>>> item_name = 'ABC_123'
@@ -112,7 +119,7 @@ Now let's check if it worked::
Batch Adding Items (and attributes)
-----------------------------------
-You can also add a number of items at the same time in a similar fashion. All you have to provide to the batch_put_attributes() method
+You can also add a number of items at the same time in a similar fashion. All you have to provide to the batch_put_attributes() method
is a Dictionary-like object with your items and their respective attributes, as follows::
>>> items = {'item1':{'attr1':'val1'},'item2':{'attr2':'val2'}}
@@ -127,8 +134,8 @@ Now, let's check the item count once again::
3
>>>
-A few words of warning: both batch_put_attributes() and put_item(), by default, will overwrite the values of the attributes if both
-the item and attribute already exist. If the item exists, but not the attributes, it will append the new attributes to the
+A few words of warning: both batch_put_attributes() and put_item(), by default, will overwrite the values of the attributes if both
+the item and attribute already exist. If the item exists, but not the attributes, it will append the new attributes to the
attribute list of that item. If you do not wish these methods to behave in that manner, simply supply them with a 'replace=False'
parameter.
@@ -141,7 +148,7 @@ To retrieve an item along with its attributes is a fairly straight forward opera
{u'attr1': u'val1'}
>>>
-Since SimpleDB works in an "eventual consistency" manner, we can also request a forced consistent read (though this will
+Since SimpleDB works in an "eventual consistency" manner, we can also request a forced consistent read (though this will
invariably adversely affect read performance). The way to accomplish that is as shown below::
>>> dom.get_item('item1', consistent_read=True)
@@ -150,14 +157,14 @@ invariably adversely affect read performance). The way to accomplish that is as
Retrieving One or More Items
----------------------------
-Another way to retrieve items is through boto's select() method. This method, at the bare minimum, requires a standard SQL select query string
+Another way to retrieve items is through boto's select() method. This method, at the bare minimum, requires a standard SQL select query string
and you would do something along the lines of::
>>> query = 'select * from `test-domain` where attr1="val1"'
>>> rs = dom.select(query)
>>> for j in rs:
... print 'o hai'
- ...
+ ...
o hai
>>>
@@ -174,13 +181,13 @@ The easiest way to modify an item's attributes is by manipulating the item's att
Deleting Items (and its attributes)
-----------------------------------
-Deleting an item is a very simple operation. All you are required to provide is either the name of the item or an item object to the
+Deleting an item is a very simple operation. All you are required to provide is either the name of the item or an item object to the
delete_item() method, boto will take care of the rest::
>>>dom.delete_item(item)
>>>True
-
+
Deleting Domains
-----------------------------------
diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py
index a02046b2..eed46efa 100644
--- a/tests/integration/dynamodb2/test_highlevel.py
+++ b/tests/integration/dynamodb2/test_highlevel.py
@@ -184,11 +184,14 @@ class DynamoDBv2Test(unittest.TestCase):
username__eq='johndoe',
last_name__eq='Doe',
index='LastNameIndex',
+ attributes=('username',),
reverse=True
)
for res in results:
self.assertTrue(res['username'] in ['johndoe',])
+ self.assertEqual(res.keys(), ['username'])
+
# Test the strongly consistent query.
c_results = users.query(
@@ -246,6 +249,13 @@ class DynamoDBv2Test(unittest.TestCase):
# Test count, but in a weak fashion. Because lag time.
self.assertTrue(users.count() > -1)
+ # Test query count
+ count = users.query_count(
+ username__eq='bob',
+ )
+
+ self.assertEqual(count, 1)
+
# Test without LSIs (describe calls shouldn't fail).
admins = Table.create('admins', schema=[
HashKey('username')
diff --git a/tests/unit/cloudformation/test_stack.py b/tests/unit/cloudformation/test_stack.py
index 54d2dc90..0f39184e 100644
--- a/tests/unit/cloudformation/test_stack.py
+++ b/tests/unit/cloudformation/test_stack.py
@@ -52,25 +52,171 @@ SAMPLE_XML = r"""
</DescribeStacksResponse>
"""
+DESCRIBE_STACK_RESOURCE_XML = r"""
+<DescribeStackResourcesResult>
+ <StackResources>
+ <member>
+ <StackId>arn:aws:cloudformation:us-east-1:123456789:stack/MyStack/aaf549a0-a413-11df-adb3-5081b3858e83</StackId>
+ <StackName>MyStack</StackName>
+ <LogicalResourceId>MyDBInstance</LogicalResourceId>
+ <PhysicalResourceId>MyStack_DB1</PhysicalResourceId>
+ <ResourceType>AWS::DBInstance</ResourceType>
+ <Timestamp>2010-07-27T22:27:28Z</Timestamp>
+ <ResourceStatus>CREATE_COMPLETE</ResourceStatus>
+ </member>
+ <member>
+ <StackId>arn:aws:cloudformation:us-east-1:123456789:stack/MyStack/aaf549a0-a413-11df-adb3-5081b3858e83</StackId>
+ <StackName>MyStack</StackName>
+ <LogicalResourceId>MyAutoScalingGroup</LogicalResourceId>
+ <PhysicalResourceId>MyStack_ASG1</PhysicalResourceId>
+ <ResourceType>AWS::AutoScalingGroup</ResourceType>
+ <Timestamp>2010-07-27T22:28:28.123456Z</Timestamp>
+ <ResourceStatus>CREATE_IN_PROGRESS</ResourceStatus>
+ </member>
+ </StackResources>
+</DescribeStackResourcesResult>
+"""
+
+LIST_STACKS_XML = r"""
+<ListStacksResponse>
+ <ListStacksResult>
+ <StackSummaries>
+ <member>
+ <StackId>
+ arn:aws:cloudformation:us-east-1:1234567:stack/TestCreate1/aaaaa
+ </StackId>
+ <StackStatus>CREATE_IN_PROGRESS</StackStatus>
+ <StackName>vpc1</StackName>
+ <CreationTime>2011-05-23T15:47:44Z</CreationTime>
+ <TemplateDescription>
+ Creates one EC2 instance and a load balancer.
+ </TemplateDescription>
+ </member>
+ <member>
+ <StackId>
+ arn:aws:cloudformation:us-east-1:1234567:stack/TestDelete2/bbbbb
+ </StackId>
+ <StackStatus>DELETE_COMPLETE</StackStatus>
+ <DeletionTime>2011-03-10T16:20:51.575757Z</DeletionTime>
+ <StackName>WP1</StackName>
+ <CreationTime>2011-03-05T19:57:58.161616Z</CreationTime>
+ <TemplateDescription>
+ A simple basic Cloudformation Template.
+ </TemplateDescription>
+ </member>
+ </StackSummaries>
+ </ListStacksResult>
+</ListStacksResponse>
+"""
+
+LIST_STACK_RESOURCES_XML = r"""
+<ListStackResourcesResponse>
+ <ListStackResourcesResult>
+ <StackResourceSummaries>
+ <member>
+ <ResourceStatus>CREATE_COMPLETE</ResourceStatus>
+ <LogicalResourceId>DBSecurityGroup</LogicalResourceId>
+ <LastUpdatedTimestamp>2011-06-21T20:15:58Z</LastUpdatedTimestamp>
+ <PhysicalResourceId>gmarcteststack-dbsecuritygroup-1s5m0ez5lkk6w</PhysicalResourceId>
+ <ResourceType>AWS::RDS::DBSecurityGroup</ResourceType>
+ </member>
+ <member>
+ <ResourceStatus>CREATE_COMPLETE</ResourceStatus>
+ <LogicalResourceId>SampleDB</LogicalResourceId>
+ <LastUpdatedTimestamp>2011-06-21T20:25:57.875643Z</LastUpdatedTimestamp>
+ <PhysicalResourceId>MyStack-sampledb-ycwhk1v830lx</PhysicalResourceId>
+ <ResourceType>AWS::RDS::DBInstance</ResourceType>
+ </member>
+ </StackResourceSummaries>
+ </ListStackResourcesResult>
+ <ResponseMetadata>
+ <RequestId>2d06e36c-ac1d-11e0-a958-f9382b6eb86b</RequestId>
+ </ResponseMetadata>
+</ListStackResourcesResponse>
+"""
+
class TestStackParse(unittest.TestCase):
def test_parse_tags(self):
- rs = boto.resultset.ResultSet([('member', boto.cloudformation.stack.Stack)])
+ rs = boto.resultset.ResultSet([
+ ('member', boto.cloudformation.stack.Stack)
+ ])
h = boto.handler.XmlHandler(rs, None)
xml.sax.parseString(SAMPLE_XML, h)
tags = rs[0].tags
self.assertEqual(tags, {u'key0': u'value0', u'key1': u'value1'})
- def test_creation_time_with_millis(self):
+ def test_event_creation_time_with_millis(self):
millis_xml = SAMPLE_XML.replace(
"<CreationTime>2013-01-10T05:04:56Z</CreationTime>",
"<CreationTime>2013-01-10T05:04:56.102342Z</CreationTime>"
)
- rs = boto.resultset.ResultSet([('member', boto.cloudformation.stack.Stack)])
+ rs = boto.resultset.ResultSet([
+ ('member', boto.cloudformation.stack.Stack)
+ ])
h = boto.handler.XmlHandler(rs, None)
xml.sax.parseString(millis_xml, h)
creation_time = rs[0].creation_time
- self.assertEqual(creation_time, datetime.datetime(2013, 1, 10, 5, 4, 56, 102342))
+ self.assertEqual(
+ creation_time,
+ datetime.datetime(2013, 1, 10, 5, 4, 56, 102342)
+ )
+
+ def test_resource_time_with_millis(self):
+ rs = boto.resultset.ResultSet([
+ ('member', boto.cloudformation.stack.StackResource)
+ ])
+ h = boto.handler.XmlHandler(rs, None)
+ xml.sax.parseString(DESCRIBE_STACK_RESOURCE_XML, h)
+ timestamp_1 = rs[0].timestamp
+ self.assertEqual(
+ timestamp_1,
+ datetime.datetime(2010, 7, 27, 22, 27, 28)
+ )
+ timestamp_2 = rs[1].timestamp
+ self.assertEqual(
+ timestamp_2,
+ datetime.datetime(2010, 7, 27, 22, 28, 28, 123456)
+ )
+
+ def test_list_stacks_time_with_millis(self):
+ rs = boto.resultset.ResultSet([
+ ('member', boto.cloudformation.stack.StackSummary)
+ ])
+ h = boto.handler.XmlHandler(rs, None)
+ xml.sax.parseString(LIST_STACKS_XML, h)
+ timestamp_1 = rs[0].creation_time
+ self.assertEqual(
+ timestamp_1,
+ datetime.datetime(2011, 5, 23, 15, 47, 44)
+ )
+ timestamp_2 = rs[1].creation_time
+ self.assertEqual(
+ timestamp_2,
+ datetime.datetime(2011, 3, 5, 19, 57, 58, 161616)
+ )
+ timestamp_3 = rs[1].deletion_time
+ self.assertEqual(
+ timestamp_3,
+ datetime.datetime(2011, 3, 10, 16, 20, 51, 575757)
+ )
+
+ def test_list_stacks_time_with_millis(self):
+ rs = boto.resultset.ResultSet([
+ ('member', boto.cloudformation.stack.StackResourceSummary)
+ ])
+ h = boto.handler.XmlHandler(rs, None)
+ xml.sax.parseString(LIST_STACK_RESOURCES_XML, h)
+ timestamp_1 = rs[0].last_updated_timestamp
+ self.assertEqual(
+ timestamp_1,
+ datetime.datetime(2011, 6, 21, 20, 15, 58)
+ )
+ timestamp_2 = rs[1].last_updated_timestamp
+ self.assertEqual(
+ timestamp_2,
+ datetime.datetime(2011, 6, 21, 20, 25, 57, 875643)
+ )
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py
index fe7e5b95..791fdf59 100644
--- a/tests/unit/dynamodb2/test_table.py
+++ b/tests/unit/dynamodb2/test_table.py
@@ -590,6 +590,9 @@ def fake_results(name, greeting='hello', exclusive_start_key=None, limit=None):
if exclusive_start_key is None:
exclusive_start_key = -1
+ if limit == 0:
+ raise Exception("Web Service Returns '400 Bad Request'")
+
end_cap = 13
results = []
start_key = exclusive_start_key + 1
@@ -598,6 +601,10 @@ def fake_results(name, greeting='hello', exclusive_start_key=None, limit=None):
if i < end_cap:
results.append("%s %s #%s" % (greeting, name, i))
+ # Don't return more than limit results
+ if limit < len(results):
+ results = results[:limit]
+
retval = {
'results': results,
}
@@ -681,6 +688,37 @@ class ResultSetTestCase(unittest.TestCase):
self.assertRaises(StopIteration, self.results.next)
self.assertEqual(self.results.call_kwargs['limit'], 7)
+ def test_limit_smaller_than_first_page(self):
+ results = ResultSet()
+ results.to_call(fake_results, 'john', greeting='Hello', limit=2)
+ self.assertEqual(results.next(), 'Hello john #0')
+ self.assertEqual(results.next(), 'Hello john #1')
+ self.assertRaises(StopIteration, results.next)
+
+ def test_limit_equals_page(self):
+ results = ResultSet()
+ results.to_call(fake_results, 'john', greeting='Hello', limit=5)
+ # First page
+ self.assertEqual(results.next(), 'Hello john #0')
+ self.assertEqual(results.next(), 'Hello john #1')
+ self.assertEqual(results.next(), 'Hello john #2')
+ self.assertEqual(results.next(), 'Hello john #3')
+ self.assertEqual(results.next(), 'Hello john #4')
+ self.assertRaises(StopIteration, results.next)
+
+ def test_limit_greater_than_page(self):
+ results = ResultSet()
+ results.to_call(fake_results, 'john', greeting='Hello', limit=6)
+ # First page
+ self.assertEqual(results.next(), 'Hello john #0')
+ self.assertEqual(results.next(), 'Hello john #1')
+ self.assertEqual(results.next(), 'Hello john #2')
+ self.assertEqual(results.next(), 'Hello john #3')
+ self.assertEqual(results.next(), 'Hello john #4')
+ # Second page
+ self.assertEqual(results.next(), 'Hello john #5')
+ self.assertRaises(StopIteration, results.next)
+
def test_iteration_noresults(self):
def none(limit=10):
return {
@@ -1527,15 +1565,17 @@ class TableTestCase(unittest.TestCase):
mock_query.assert_called_once_with('users',
consistent_read=False,
- index_name=None,
scan_index_forward=True,
+ index_name=None,
+ attributes_to_get=None,
limit=4,
key_conditions={
'username': {
'AttributeValueList': [{'S': 'aaa'}, {'S': 'mmm'}],
'ComparisonOperator': 'BETWEEN',
}
- }
+ },
+ select=None
)
# Now alter the expected.
@@ -1571,6 +1611,7 @@ class TableTestCase(unittest.TestCase):
}
},
index_name=None,
+ attributes_to_get=None,
scan_index_forward=True,
limit=4,
exclusive_start_key={
@@ -1578,7 +1619,8 @@ class TableTestCase(unittest.TestCase):
'S': 'adam',
},
},
- consistent_read=True
+ consistent_read=True,
+ select=None
)
def test_private_scan(self):
@@ -1742,6 +1784,39 @@ class TableTestCase(unittest.TestCase):
self.assertEqual(mock_query_2.call_count, 1)
+ def test_query_with_specific_attributes(self):
+ items_1 = {
+ 'results': [
+ Item(self.users, data={
+ 'username': 'johndoe',
+ }),
+ Item(self.users, data={
+ 'username': 'jane',
+ }),
+ ],
+ 'last_key': 'jane',
+ }
+
+ results = self.users.query(last_name__eq='Doe',
+ attributes=['username'])
+ self.assertTrue(isinstance(results, ResultSet))
+ self.assertEqual(len(results._results), 0)
+ self.assertEqual(results.the_callable, self.users._query)
+
+ with mock.patch.object(
+ results,
+ 'the_callable',
+ return_value=items_1) as mock_query:
+ res_1 = results.next()
+ # Now it should be populated.
+ self.assertEqual(len(results._results), 2)
+ self.assertEqual(res_1['username'], 'johndoe')
+ self.assertEqual(res_1.keys(), ['username'])
+ res_2 = results.next()
+ self.assertEqual(res_2['username'], 'jane')
+
+ self.assertEqual(mock_query.call_count, 1)
+
def test_scan(self):
items_1 = {
'results': [
diff --git a/tests/unit/rds/test_connection.py b/tests/unit/rds/test_connection.py
index 0d4bff83..ff3000fc 100644
--- a/tests/unit/rds/test_connection.py
+++ b/tests/unit/rds/test_connection.py
@@ -91,8 +91,7 @@ class TestRDSConnection(AWSMockServiceTestCase):
self.assert_request_parameters({
'Action': 'DescribeDBInstances',
'DBInstanceIdentifier': 'instance_id',
- }, ignore_params_values=['AWSAccessKeyId', 'Timestamp', 'Version',
- 'SignatureVersion', 'SignatureMethod'])
+ }, ignore_params_values=['Version'])
db = response[0]
self.assertEqual(db.id, 'mydbinstance2')
self.assertEqual(db.create_time, '2012-10-03T22:01:51.047Z')
diff --git a/tests/unit/s3/test_uri.py b/tests/unit/s3/test_uri.py
index ab682191..c2b3faf7 100644
--- a/tests/unit/s3/test_uri.py
+++ b/tests/unit/s3/test_uri.py
@@ -244,6 +244,13 @@ class UriTest(unittest.TestCase):
self.assertEqual(uri.is_stream(), False)
self.assertEqual(uri.is_version_specific, False)
+ def test_file_containing_colon(self):
+ uri_str = 'abc:def'
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual('file', uri.scheme)
+ self.assertEqual('file://%s' % uri_str, uri.uri)
+
def test_invalid_scheme(self):
uri_str = 'mars://bucket/object'
try: